repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/checker.py | IsogeoChecker._check_filter_specific_tag | def _check_filter_specific_tag(self, specific_tag: list):
"""Check if specific_tag parameter is valid.
:param list specific_tag: list of specific tag to check
"""
if isinstance(specific_tag, list):
if len(specific_tag) > 0:
specific_tag = ",".join(specific_tag)
else:
specific_tag = ""
else:
raise TypeError("'specific_tag' expects a list")
return specific_tag | python | def _check_filter_specific_tag(self, specific_tag: list):
"""Check if specific_tag parameter is valid.
:param list specific_tag: list of specific tag to check
"""
if isinstance(specific_tag, list):
if len(specific_tag) > 0:
specific_tag = ",".join(specific_tag)
else:
specific_tag = ""
else:
raise TypeError("'specific_tag' expects a list")
return specific_tag | [
"def",
"_check_filter_specific_tag",
"(",
"self",
",",
"specific_tag",
":",
"list",
")",
":",
"if",
"isinstance",
"(",
"specific_tag",
",",
"list",
")",
":",
"if",
"len",
"(",
"specific_tag",
")",
">",
"0",
":",
"specific_tag",
"=",
"\",\"",
".",
"join",
"(",
"specific_tag",
")",
"else",
":",
"specific_tag",
"=",
"\"\"",
"else",
":",
"raise",
"TypeError",
"(",
"\"'specific_tag' expects a list\"",
")",
"return",
"specific_tag"
] | Check if specific_tag parameter is valid.
:param list specific_tag: list of specific tag to check | [
"Check",
"if",
"specific_tag",
"parameter",
"is",
"valid",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/checker.py#L372-L384 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/checker.py | IsogeoChecker._check_filter_includes | def _check_filter_includes(self, includes: list, resource: str = "metadata"):
"""Check if specific_resources parameter is valid.
:param list includes: sub resources to check
:param str resource: resource type to check sub resources.
Must be one of: metadata | keyword.
"""
# check resource parameter
if resource == "metadata":
ref_subresources = _SUBRESOURCES_MD
elif resource == "keyword":
ref_subresources = _SUBRESOURCES_KW
else:
raise ValueError("Must be one of: metadata | keyword.")
# sub resources manager
if isinstance(includes, str) and includes.lower() == "all":
includes = ",".join(ref_subresources)
elif isinstance(includes, list):
if len(includes) > 0:
includes = ",".join(includes)
else:
includes = ""
else:
raise TypeError("'includes' expect a list or a str='all'")
return includes | python | def _check_filter_includes(self, includes: list, resource: str = "metadata"):
"""Check if specific_resources parameter is valid.
:param list includes: sub resources to check
:param str resource: resource type to check sub resources.
Must be one of: metadata | keyword.
"""
# check resource parameter
if resource == "metadata":
ref_subresources = _SUBRESOURCES_MD
elif resource == "keyword":
ref_subresources = _SUBRESOURCES_KW
else:
raise ValueError("Must be one of: metadata | keyword.")
# sub resources manager
if isinstance(includes, str) and includes.lower() == "all":
includes = ",".join(ref_subresources)
elif isinstance(includes, list):
if len(includes) > 0:
includes = ",".join(includes)
else:
includes = ""
else:
raise TypeError("'includes' expect a list or a str='all'")
return includes | [
"def",
"_check_filter_includes",
"(",
"self",
",",
"includes",
":",
"list",
",",
"resource",
":",
"str",
"=",
"\"metadata\"",
")",
":",
"# check resource parameter",
"if",
"resource",
"==",
"\"metadata\"",
":",
"ref_subresources",
"=",
"_SUBRESOURCES_MD",
"elif",
"resource",
"==",
"\"keyword\"",
":",
"ref_subresources",
"=",
"_SUBRESOURCES_KW",
"else",
":",
"raise",
"ValueError",
"(",
"\"Must be one of: metadata | keyword.\"",
")",
"# sub resources manager",
"if",
"isinstance",
"(",
"includes",
",",
"str",
")",
"and",
"includes",
".",
"lower",
"(",
")",
"==",
"\"all\"",
":",
"includes",
"=",
"\",\"",
".",
"join",
"(",
"ref_subresources",
")",
"elif",
"isinstance",
"(",
"includes",
",",
"list",
")",
":",
"if",
"len",
"(",
"includes",
")",
">",
"0",
":",
"includes",
"=",
"\",\"",
".",
"join",
"(",
"includes",
")",
"else",
":",
"includes",
"=",
"\"\"",
"else",
":",
"raise",
"TypeError",
"(",
"\"'includes' expect a list or a str='all'\"",
")",
"return",
"includes"
] | Check if specific_resources parameter is valid.
:param list includes: sub resources to check
:param str resource: resource type to check sub resources.
Must be one of: metadata | keyword. | [
"Check",
"if",
"specific_resources",
"parameter",
"is",
"valid",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/checker.py#L386-L411 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/checker.py | IsogeoChecker._check_subresource | def _check_subresource(self, subresource: str):
"""Check if specific_resources parameter is valid.
:param str resource: subresource to check.
"""
warnings.warn(
"subresource in URL is deprecated." " Use _include mecanism instead.",
DeprecationWarning,
)
l_subresources = (
"conditions",
"contacts",
"coordinate-system",
"events",
"feature-attributes",
"keywords",
"layers",
"limitations",
"links",
"operations",
"specifications",
)
if isinstance(subresource, str):
if subresource in l_subresources:
subresource = subresource
elif subresource == "tags":
subresource = "keywords"
logging.debug(
"'tags' is an include not a subresource."
" Don't worry, it has be automatically renamed "
"into 'keywords' which is the correct subresource."
)
elif subresource == "serviceLayers":
subresource = "layers"
logging.debug(
"'serviceLayers' is an include not a subresource."
" Don't worry, it has be automatically renamed "
"into 'layers' which is the correct subresource."
)
else:
raise ValueError(
"Invalid subresource. Must be one of: {}".format(
"|".join(l_subresources)
)
)
else:
raise TypeError("'subresource' expects a str")
return subresource | python | def _check_subresource(self, subresource: str):
"""Check if specific_resources parameter is valid.
:param str resource: subresource to check.
"""
warnings.warn(
"subresource in URL is deprecated." " Use _include mecanism instead.",
DeprecationWarning,
)
l_subresources = (
"conditions",
"contacts",
"coordinate-system",
"events",
"feature-attributes",
"keywords",
"layers",
"limitations",
"links",
"operations",
"specifications",
)
if isinstance(subresource, str):
if subresource in l_subresources:
subresource = subresource
elif subresource == "tags":
subresource = "keywords"
logging.debug(
"'tags' is an include not a subresource."
" Don't worry, it has be automatically renamed "
"into 'keywords' which is the correct subresource."
)
elif subresource == "serviceLayers":
subresource = "layers"
logging.debug(
"'serviceLayers' is an include not a subresource."
" Don't worry, it has be automatically renamed "
"into 'layers' which is the correct subresource."
)
else:
raise ValueError(
"Invalid subresource. Must be one of: {}".format(
"|".join(l_subresources)
)
)
else:
raise TypeError("'subresource' expects a str")
return subresource | [
"def",
"_check_subresource",
"(",
"self",
",",
"subresource",
":",
"str",
")",
":",
"warnings",
".",
"warn",
"(",
"\"subresource in URL is deprecated.\"",
"\" Use _include mecanism instead.\"",
",",
"DeprecationWarning",
",",
")",
"l_subresources",
"=",
"(",
"\"conditions\"",
",",
"\"contacts\"",
",",
"\"coordinate-system\"",
",",
"\"events\"",
",",
"\"feature-attributes\"",
",",
"\"keywords\"",
",",
"\"layers\"",
",",
"\"limitations\"",
",",
"\"links\"",
",",
"\"operations\"",
",",
"\"specifications\"",
",",
")",
"if",
"isinstance",
"(",
"subresource",
",",
"str",
")",
":",
"if",
"subresource",
"in",
"l_subresources",
":",
"subresource",
"=",
"subresource",
"elif",
"subresource",
"==",
"\"tags\"",
":",
"subresource",
"=",
"\"keywords\"",
"logging",
".",
"debug",
"(",
"\"'tags' is an include not a subresource.\"",
"\" Don't worry, it has be automatically renamed \"",
"\"into 'keywords' which is the correct subresource.\"",
")",
"elif",
"subresource",
"==",
"\"serviceLayers\"",
":",
"subresource",
"=",
"\"layers\"",
"logging",
".",
"debug",
"(",
"\"'serviceLayers' is an include not a subresource.\"",
"\" Don't worry, it has be automatically renamed \"",
"\"into 'layers' which is the correct subresource.\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid subresource. Must be one of: {}\"",
".",
"format",
"(",
"\"|\"",
".",
"join",
"(",
"l_subresources",
")",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"'subresource' expects a str\"",
")",
"return",
"subresource"
] | Check if specific_resources parameter is valid.
:param str resource: subresource to check. | [
"Check",
"if",
"specific_resources",
"parameter",
"is",
"valid",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/checker.py#L413-L460 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/checker.py | IsogeoChecker._convert_md_type | def _convert_md_type(self, type_to_convert: str):
"""Metadata types are not consistent in Isogeo API. A vector dataset is
defined as vector-dataset in query filter but as vectorDataset in
resource (metadata) details.
see: https://github.com/isogeo/isogeo-api-py-minsdk/issues/29
"""
if type_to_convert in FILTER_TYPES:
return FILTER_TYPES.get(type_to_convert)
elif type_to_convert in FILTER_TYPES.values():
return [k for k, v in FILTER_TYPES.items() if v == type_to_convert][0]
else:
raise ValueError(
"Incorrect metadata type to convert: {}".format(type_to_convert)
) | python | def _convert_md_type(self, type_to_convert: str):
"""Metadata types are not consistent in Isogeo API. A vector dataset is
defined as vector-dataset in query filter but as vectorDataset in
resource (metadata) details.
see: https://github.com/isogeo/isogeo-api-py-minsdk/issues/29
"""
if type_to_convert in FILTER_TYPES:
return FILTER_TYPES.get(type_to_convert)
elif type_to_convert in FILTER_TYPES.values():
return [k for k, v in FILTER_TYPES.items() if v == type_to_convert][0]
else:
raise ValueError(
"Incorrect metadata type to convert: {}".format(type_to_convert)
) | [
"def",
"_convert_md_type",
"(",
"self",
",",
"type_to_convert",
":",
"str",
")",
":",
"if",
"type_to_convert",
"in",
"FILTER_TYPES",
":",
"return",
"FILTER_TYPES",
".",
"get",
"(",
"type_to_convert",
")",
"elif",
"type_to_convert",
"in",
"FILTER_TYPES",
".",
"values",
"(",
")",
":",
"return",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"FILTER_TYPES",
".",
"items",
"(",
")",
"if",
"v",
"==",
"type_to_convert",
"]",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Incorrect metadata type to convert: {}\"",
".",
"format",
"(",
"type_to_convert",
")",
")"
] | Metadata types are not consistent in Isogeo API. A vector dataset is
defined as vector-dataset in query filter but as vectorDataset in
resource (metadata) details.
see: https://github.com/isogeo/isogeo-api-py-minsdk/issues/29 | [
"Metadata",
"types",
"are",
"not",
"consistent",
"in",
"Isogeo",
"API",
".",
"A",
"vector",
"dataset",
"is",
"defined",
"as",
"vector",
"-",
"dataset",
"in",
"query",
"filter",
"but",
"as",
"vectorDataset",
"in",
"resource",
"(",
"metadata",
")",
"details",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/checker.py#L462-L476 |
novopl/peltak | src/peltak/commands/version.py | version_cli | def version_cli(ctx, porcelain):
# type: (click.Context, bool) -> None
""" Show project version. Has sub commands.
For this command to work you must specify where the project version is
stored. You can do that with version_file conf variable. peltak supports
multiple ways to store the project version. Right now you can store it in a
python file using built-in __version__ variable. You can use node.js
package.json and keep the version there or you can just use a plain text
file that just holds the raw project version. The appropriate storage is
guessed based on the file type and name.
Example Configuration::
version_file: 'src/mypackage/__init__.py'
Examples:
\b
$ peltak version # Pretty print current version
$ peltak version --porcelain # Print version as raw string
$ peltak version bump patch # Bump patch version component
$ peltak version bump minor # Bump minor version component
$ peltak version bump major # Bump major version component
$ peltak version bump release # same as version bump patch
$ peltak version bump --exact=1.2.1 # Set project version to 1.2.1
"""
if ctx.invoked_subcommand:
return
from peltak.core import log
from peltak.core import versioning
current = versioning.current()
if porcelain:
print(current)
else:
log.info("Version: <35>{}".format(current)) | python | def version_cli(ctx, porcelain):
# type: (click.Context, bool) -> None
""" Show project version. Has sub commands.
For this command to work you must specify where the project version is
stored. You can do that with version_file conf variable. peltak supports
multiple ways to store the project version. Right now you can store it in a
python file using built-in __version__ variable. You can use node.js
package.json and keep the version there or you can just use a plain text
file that just holds the raw project version. The appropriate storage is
guessed based on the file type and name.
Example Configuration::
version_file: 'src/mypackage/__init__.py'
Examples:
\b
$ peltak version # Pretty print current version
$ peltak version --porcelain # Print version as raw string
$ peltak version bump patch # Bump patch version component
$ peltak version bump minor # Bump minor version component
$ peltak version bump major # Bump major version component
$ peltak version bump release # same as version bump patch
$ peltak version bump --exact=1.2.1 # Set project version to 1.2.1
"""
if ctx.invoked_subcommand:
return
from peltak.core import log
from peltak.core import versioning
current = versioning.current()
if porcelain:
print(current)
else:
log.info("Version: <35>{}".format(current)) | [
"def",
"version_cli",
"(",
"ctx",
",",
"porcelain",
")",
":",
"# type: (click.Context, bool) -> None",
"if",
"ctx",
".",
"invoked_subcommand",
":",
"return",
"from",
"peltak",
".",
"core",
"import",
"log",
"from",
"peltak",
".",
"core",
"import",
"versioning",
"current",
"=",
"versioning",
".",
"current",
"(",
")",
"if",
"porcelain",
":",
"print",
"(",
"current",
")",
"else",
":",
"log",
".",
"info",
"(",
"\"Version: <35>{}\"",
".",
"format",
"(",
"current",
")",
")"
] | Show project version. Has sub commands.
For this command to work you must specify where the project version is
stored. You can do that with version_file conf variable. peltak supports
multiple ways to store the project version. Right now you can store it in a
python file using built-in __version__ variable. You can use node.js
package.json and keep the version there or you can just use a plain text
file that just holds the raw project version. The appropriate storage is
guessed based on the file type and name.
Example Configuration::
version_file: 'src/mypackage/__init__.py'
Examples:
\b
$ peltak version # Pretty print current version
$ peltak version --porcelain # Print version as raw string
$ peltak version bump patch # Bump patch version component
$ peltak version bump minor # Bump minor version component
$ peltak version bump major # Bump major version component
$ peltak version bump release # same as version bump patch
$ peltak version bump --exact=1.2.1 # Set project version to 1.2.1 | [
"Show",
"project",
"version",
".",
"Has",
"sub",
"commands",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/commands/version.py#L25-L64 |
novopl/peltak | src/peltak/commands/version.py | bump_version | def bump_version(component='patch', exact=None):
# type: (str, str) -> None
""" Bump current project version without committing anything.
No tags are created either.
Examples:
\b
$ peltak version bump patch # Bump patch version component
$ peltak version bump minor # Bump minor version component
$ peltak version bump major # Bump major version component
$ peltak version bump release # same as version bump patch
$ peltak version bump --exact=1.2.1 # Set project version to 1.2.1
"""
from peltak.core import log
from peltak.core import versioning
old_ver, new_ver = versioning.bump(component, exact)
log.info("Project version bumped")
log.info(" old version: <35>{}".format(old_ver))
log.info(" new version: <35>{}".format(new_ver)) | python | def bump_version(component='patch', exact=None):
# type: (str, str) -> None
""" Bump current project version without committing anything.
No tags are created either.
Examples:
\b
$ peltak version bump patch # Bump patch version component
$ peltak version bump minor # Bump minor version component
$ peltak version bump major # Bump major version component
$ peltak version bump release # same as version bump patch
$ peltak version bump --exact=1.2.1 # Set project version to 1.2.1
"""
from peltak.core import log
from peltak.core import versioning
old_ver, new_ver = versioning.bump(component, exact)
log.info("Project version bumped")
log.info(" old version: <35>{}".format(old_ver))
log.info(" new version: <35>{}".format(new_ver)) | [
"def",
"bump_version",
"(",
"component",
"=",
"'patch'",
",",
"exact",
"=",
"None",
")",
":",
"# type: (str, str) -> None",
"from",
"peltak",
".",
"core",
"import",
"log",
"from",
"peltak",
".",
"core",
"import",
"versioning",
"old_ver",
",",
"new_ver",
"=",
"versioning",
".",
"bump",
"(",
"component",
",",
"exact",
")",
"log",
".",
"info",
"(",
"\"Project version bumped\"",
")",
"log",
".",
"info",
"(",
"\" old version: <35>{}\"",
".",
"format",
"(",
"old_ver",
")",
")",
"log",
".",
"info",
"(",
"\" new version: <35>{}\"",
".",
"format",
"(",
"new_ver",
")",
")"
] | Bump current project version without committing anything.
No tags are created either.
Examples:
\b
$ peltak version bump patch # Bump patch version component
$ peltak version bump minor # Bump minor version component
$ peltak version bump major # Bump major version component
$ peltak version bump release # same as version bump patch
$ peltak version bump --exact=1.2.1 # Set project version to 1.2.1 | [
"Bump",
"current",
"project",
"version",
"without",
"committing",
"anything",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/commands/version.py#L76-L99 |
nathankw/pulsarpy | pulsarpy/models.py | Meta.get_logfile_name | def get_logfile_name(tag):
"""
Creates a name for a log file that is meant to be used in a call to
``logging.FileHandler``. The log file name will incldue the path to the log directory given
by the `p.LOG_DIR` constant. The format of the file name is: 'log_$HOST_$TAG.txt', where
$HOST is the hostname part of the URL given by ``URL``, and $TAG is the value of the
'tag' argument. The log directory will be created if need be.
Args:
tag: `str`. A tag name to add to at the end of the log file name for clarity on the
log file's purpose.
"""
if not os.path.exists(p.LOG_DIR):
os.mkdir(p.LOG_DIR)
filename = "log_" + p.HOST + "_" + tag + ".txt"
filename = os.path.join(p.LOG_DIR, filename)
return filename | python | def get_logfile_name(tag):
"""
Creates a name for a log file that is meant to be used in a call to
``logging.FileHandler``. The log file name will incldue the path to the log directory given
by the `p.LOG_DIR` constant. The format of the file name is: 'log_$HOST_$TAG.txt', where
$HOST is the hostname part of the URL given by ``URL``, and $TAG is the value of the
'tag' argument. The log directory will be created if need be.
Args:
tag: `str`. A tag name to add to at the end of the log file name for clarity on the
log file's purpose.
"""
if not os.path.exists(p.LOG_DIR):
os.mkdir(p.LOG_DIR)
filename = "log_" + p.HOST + "_" + tag + ".txt"
filename = os.path.join(p.LOG_DIR, filename)
return filename | [
"def",
"get_logfile_name",
"(",
"tag",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"p",
".",
"LOG_DIR",
")",
":",
"os",
".",
"mkdir",
"(",
"p",
".",
"LOG_DIR",
")",
"filename",
"=",
"\"log_\"",
"+",
"p",
".",
"HOST",
"+",
"\"_\"",
"+",
"tag",
"+",
"\".txt\"",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"p",
".",
"LOG_DIR",
",",
"filename",
")",
"return",
"filename"
] | Creates a name for a log file that is meant to be used in a call to
``logging.FileHandler``. The log file name will incldue the path to the log directory given
by the `p.LOG_DIR` constant. The format of the file name is: 'log_$HOST_$TAG.txt', where
$HOST is the hostname part of the URL given by ``URL``, and $TAG is the value of the
'tag' argument. The log directory will be created if need be.
Args:
tag: `str`. A tag name to add to at the end of the log file name for clarity on the
log file's purpose. | [
"Creates",
"a",
"name",
"for",
"a",
"log",
"file",
"that",
"is",
"meant",
"to",
"be",
"used",
"in",
"a",
"call",
"to",
"logging",
".",
"FileHandler",
".",
"The",
"log",
"file",
"name",
"will",
"incldue",
"the",
"path",
"to",
"the",
"log",
"directory",
"given",
"by",
"the",
"p",
".",
"LOG_DIR",
"constant",
".",
"The",
"format",
"of",
"the",
"file",
"name",
"is",
":",
"log_$HOST_$TAG",
".",
"txt",
"where"
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L114-L131 |
nathankw/pulsarpy | pulsarpy/models.py | Model._get | def _get(self, rec_id=None, upstream=None):
"""
Fetches a record by the record's ID or upstream_identifier.
Raises:
`pulsarpy.models.RecordNotFound`: A record could not be found.
"""
if rec_id:
self.record_url = self.__class__.get_record_url(rec_id)
self.debug_logger.debug("GET {} record with ID {}: {}".format(self.__class__.__name__, rec_id, self.record_url))
response = requests.get(url=self.record_url, headers=HEADERS, verify=False)
if not response.ok and response.status_code == requests.codes.NOT_FOUND:
raise RecordNotFound("Search for {} record with ID '{}' returned no results.".format(self.__class__.__name__, rec_id))
self.write_response_html_to_file(response,"get_bob.html")
response.raise_for_status()
return response.json()
elif upstream:
rec_json = self.__class__.find_by({"upstream_identifier": upstream}, require=True)
self.record_url = self.__class__.get_record_url(rec_json["id"])
return rec_json | python | def _get(self, rec_id=None, upstream=None):
"""
Fetches a record by the record's ID or upstream_identifier.
Raises:
`pulsarpy.models.RecordNotFound`: A record could not be found.
"""
if rec_id:
self.record_url = self.__class__.get_record_url(rec_id)
self.debug_logger.debug("GET {} record with ID {}: {}".format(self.__class__.__name__, rec_id, self.record_url))
response = requests.get(url=self.record_url, headers=HEADERS, verify=False)
if not response.ok and response.status_code == requests.codes.NOT_FOUND:
raise RecordNotFound("Search for {} record with ID '{}' returned no results.".format(self.__class__.__name__, rec_id))
self.write_response_html_to_file(response,"get_bob.html")
response.raise_for_status()
return response.json()
elif upstream:
rec_json = self.__class__.find_by({"upstream_identifier": upstream}, require=True)
self.record_url = self.__class__.get_record_url(rec_json["id"])
return rec_json | [
"def",
"_get",
"(",
"self",
",",
"rec_id",
"=",
"None",
",",
"upstream",
"=",
"None",
")",
":",
"if",
"rec_id",
":",
"self",
".",
"record_url",
"=",
"self",
".",
"__class__",
".",
"get_record_url",
"(",
"rec_id",
")",
"self",
".",
"debug_logger",
".",
"debug",
"(",
"\"GET {} record with ID {}: {}\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"rec_id",
",",
"self",
".",
"record_url",
")",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
"=",
"self",
".",
"record_url",
",",
"headers",
"=",
"HEADERS",
",",
"verify",
"=",
"False",
")",
"if",
"not",
"response",
".",
"ok",
"and",
"response",
".",
"status_code",
"==",
"requests",
".",
"codes",
".",
"NOT_FOUND",
":",
"raise",
"RecordNotFound",
"(",
"\"Search for {} record with ID '{}' returned no results.\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"rec_id",
")",
")",
"self",
".",
"write_response_html_to_file",
"(",
"response",
",",
"\"get_bob.html\"",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"response",
".",
"json",
"(",
")",
"elif",
"upstream",
":",
"rec_json",
"=",
"self",
".",
"__class__",
".",
"find_by",
"(",
"{",
"\"upstream_identifier\"",
":",
"upstream",
"}",
",",
"require",
"=",
"True",
")",
"self",
".",
"record_url",
"=",
"self",
".",
"__class__",
".",
"get_record_url",
"(",
"rec_json",
"[",
"\"id\"",
"]",
")",
"return",
"rec_json"
] | Fetches a record by the record's ID or upstream_identifier.
Raises:
`pulsarpy.models.RecordNotFound`: A record could not be found. | [
"Fetches",
"a",
"record",
"by",
"the",
"record",
"s",
"ID",
"or",
"upstream_identifier",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L296-L315 |
nathankw/pulsarpy | pulsarpy/models.py | Model.replace_name_with_id | def replace_name_with_id(cls, name):
"""
Used to replace a foreign key reference using a name with an ID. Works by searching the
record in Pulsar and expects to find exactly one hit. First, will check if the foreign key
reference is an integer value and if so, returns that as it is presumed to be the foreign key.
Raises:
`pulsarpy.elasticsearch_utils.MultipleHitsException`: Multiple hits were returned from the name search.
`pulsarpy.models.RecordNotFound`: No results were produced from the name search.
"""
try:
int(name)
return name #Already a presumed ID.
except ValueError:
pass
#Not an int, so maybe a combination of MODEL_ABBR and Primary Key, i.e. B-8.
if name.split("-")[0] in Meta._MODEL_ABBREVS:
return int(name.split("-", 1)[1])
try:
result = cls.ES.get_record_by_name(cls.ES_INDEX_NAME, name)
if result:
return result["id"]
except pulsarpy.elasticsearch_utils.MultipleHitsException as e:
raise
raise RecordNotFound("Name '{}' for model '{}' not found.".format(name, cls.__name__)) | python | def replace_name_with_id(cls, name):
"""
Used to replace a foreign key reference using a name with an ID. Works by searching the
record in Pulsar and expects to find exactly one hit. First, will check if the foreign key
reference is an integer value and if so, returns that as it is presumed to be the foreign key.
Raises:
`pulsarpy.elasticsearch_utils.MultipleHitsException`: Multiple hits were returned from the name search.
`pulsarpy.models.RecordNotFound`: No results were produced from the name search.
"""
try:
int(name)
return name #Already a presumed ID.
except ValueError:
pass
#Not an int, so maybe a combination of MODEL_ABBR and Primary Key, i.e. B-8.
if name.split("-")[0] in Meta._MODEL_ABBREVS:
return int(name.split("-", 1)[1])
try:
result = cls.ES.get_record_by_name(cls.ES_INDEX_NAME, name)
if result:
return result["id"]
except pulsarpy.elasticsearch_utils.MultipleHitsException as e:
raise
raise RecordNotFound("Name '{}' for model '{}' not found.".format(name, cls.__name__)) | [
"def",
"replace_name_with_id",
"(",
"cls",
",",
"name",
")",
":",
"try",
":",
"int",
"(",
"name",
")",
"return",
"name",
"#Already a presumed ID.",
"except",
"ValueError",
":",
"pass",
"#Not an int, so maybe a combination of MODEL_ABBR and Primary Key, i.e. B-8.",
"if",
"name",
".",
"split",
"(",
"\"-\"",
")",
"[",
"0",
"]",
"in",
"Meta",
".",
"_MODEL_ABBREVS",
":",
"return",
"int",
"(",
"name",
".",
"split",
"(",
"\"-\"",
",",
"1",
")",
"[",
"1",
"]",
")",
"try",
":",
"result",
"=",
"cls",
".",
"ES",
".",
"get_record_by_name",
"(",
"cls",
".",
"ES_INDEX_NAME",
",",
"name",
")",
"if",
"result",
":",
"return",
"result",
"[",
"\"id\"",
"]",
"except",
"pulsarpy",
".",
"elasticsearch_utils",
".",
"MultipleHitsException",
"as",
"e",
":",
"raise",
"raise",
"RecordNotFound",
"(",
"\"Name '{}' for model '{}' not found.\"",
".",
"format",
"(",
"name",
",",
"cls",
".",
"__name__",
")",
")"
] | Used to replace a foreign key reference using a name with an ID. Works by searching the
record in Pulsar and expects to find exactly one hit. First, will check if the foreign key
reference is an integer value and if so, returns that as it is presumed to be the foreign key.
Raises:
`pulsarpy.elasticsearch_utils.MultipleHitsException`: Multiple hits were returned from the name search.
`pulsarpy.models.RecordNotFound`: No results were produced from the name search. | [
"Used",
"to",
"replace",
"a",
"foreign",
"key",
"reference",
"using",
"a",
"name",
"with",
"an",
"ID",
".",
"Works",
"by",
"searching",
"the",
"record",
"in",
"Pulsar",
"and",
"expects",
"to",
"find",
"exactly",
"one",
"hit",
".",
"First",
"will",
"check",
"if",
"the",
"foreign",
"key",
"reference",
"is",
"an",
"integer",
"value",
"and",
"if",
"so",
"returns",
"that",
"as",
"it",
"is",
"presumed",
"to",
"be",
"the",
"foreign",
"key",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L330-L354 |
nathankw/pulsarpy | pulsarpy/models.py | Model.add_model_name_to_payload | def add_model_name_to_payload(cls, payload):
"""
Checks whether the model name in question is in the payload. If not, the entire payload
is set as a value of a key by the name of the model. This method is useful when some
server-side Rails API calls expect the parameters to include the parameterized model name.
For example, server-side endpoints that handle the updating of a biosample record or the
creation of a new biosmample record will expect the payload to be of the form::
{ "biosample": {
"name": "new biosample",
"donor": 3,
...
}
}
Args:
payload: `dict`. The data to send in an HTTP request.
Returns:
`dict`.
"""
if not cls.MODEL_NAME in payload:
payload = {cls.MODEL_NAME: payload}
return payload | python | def add_model_name_to_payload(cls, payload):
"""
Checks whether the model name in question is in the payload. If not, the entire payload
is set as a value of a key by the name of the model. This method is useful when some
server-side Rails API calls expect the parameters to include the parameterized model name.
For example, server-side endpoints that handle the updating of a biosample record or the
creation of a new biosmample record will expect the payload to be of the form::
{ "biosample": {
"name": "new biosample",
"donor": 3,
...
}
}
Args:
payload: `dict`. The data to send in an HTTP request.
Returns:
`dict`.
"""
if not cls.MODEL_NAME in payload:
payload = {cls.MODEL_NAME: payload}
return payload | [
"def",
"add_model_name_to_payload",
"(",
"cls",
",",
"payload",
")",
":",
"if",
"not",
"cls",
".",
"MODEL_NAME",
"in",
"payload",
":",
"payload",
"=",
"{",
"cls",
".",
"MODEL_NAME",
":",
"payload",
"}",
"return",
"payload"
] | Checks whether the model name in question is in the payload. If not, the entire payload
is set as a value of a key by the name of the model. This method is useful when some
server-side Rails API calls expect the parameters to include the parameterized model name.
For example, server-side endpoints that handle the updating of a biosample record or the
creation of a new biosmample record will expect the payload to be of the form::
{ "biosample": {
"name": "new biosample",
"donor": 3,
...
}
}
Args:
payload: `dict`. The data to send in an HTTP request.
Returns:
`dict`. | [
"Checks",
"whether",
"the",
"model",
"name",
"in",
"question",
"is",
"in",
"the",
"payload",
".",
"If",
"not",
"the",
"entire",
"payload",
"is",
"set",
"as",
"a",
"value",
"of",
"a",
"key",
"by",
"the",
"name",
"of",
"the",
"model",
".",
"This",
"method",
"is",
"useful",
"when",
"some",
"server",
"-",
"side",
"Rails",
"API",
"calls",
"expect",
"the",
"parameters",
"to",
"include",
"the",
"parameterized",
"model",
"name",
".",
"For",
"example",
"server",
"-",
"side",
"endpoints",
"that",
"handle",
"the",
"updating",
"of",
"a",
"biosample",
"record",
"or",
"the",
"creation",
"of",
"a",
"new",
"biosmample",
"record",
"will",
"expect",
"the",
"payload",
"to",
"be",
"of",
"the",
"form",
"::"
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L358-L381 |
nathankw/pulsarpy | pulsarpy/models.py | Model.delete | def delete(self):
"""Deletes the record.
"""
res = requests.delete(url=self.record_url, headers=HEADERS, verify=False)
#self.write_response_html_to_file(res,"bob_delete.html")
if res.status_code == 204:
#No content. Can't render json:
return {}
return res.json() | python | def delete(self):
"""Deletes the record.
"""
res = requests.delete(url=self.record_url, headers=HEADERS, verify=False)
#self.write_response_html_to_file(res,"bob_delete.html")
if res.status_code == 204:
#No content. Can't render json:
return {}
return res.json() | [
"def",
"delete",
"(",
"self",
")",
":",
"res",
"=",
"requests",
".",
"delete",
"(",
"url",
"=",
"self",
".",
"record_url",
",",
"headers",
"=",
"HEADERS",
",",
"verify",
"=",
"False",
")",
"#self.write_response_html_to_file(res,\"bob_delete.html\")",
"if",
"res",
".",
"status_code",
"==",
"204",
":",
"#No content. Can't render json:",
"return",
"{",
"}",
"return",
"res",
".",
"json",
"(",
")"
] | Deletes the record. | [
"Deletes",
"the",
"record",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L413-L421 |
nathankw/pulsarpy | pulsarpy/models.py | Model.find_by | def find_by(cls, payload, require=False):
"""
Searches the model in question by AND joining the query parameters.
Implements a Railsy way of looking for a record using a method by the same name and passing
in the query as a dict. as well. Only the first hit is returned, and there is no particular
ordering specified in the server-side API method.
Args:
payload: `dict`. The attributes of a record to restrict the search to.
require: `bool`. True means to raise a `pulsarpy.models.RecordNotFound` exception if no
record is found.
Returns:
`dict`: The JSON serialization of the record, if any, found by the API call.
`None`: If the API call didnt' return any results.
Raises:
`pulsarpy.models.RecordNotFound`: No records were found, and the `require` parameter is
True.
"""
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
url = os.path.join(cls.URL, "find_by")
payload = {"find_by": payload}
cls.debug_logger.debug("Searching Pulsar {} for {}".format(cls.__name__, json.dumps(payload, indent=4)))
res = requests.post(url=url, json=payload, headers=HEADERS, verify=False)
#cls.write_response_html_to_file(res,"bob.html")
res.raise_for_status()
res_json = res.json()
if res_json:
try:
res_json = res_json[cls.MODEL_NAME]
except KeyError:
# Key won't be present if there isn't a serializer for it on the server.
pass
else:
if require:
raise RecordNotFound("Can't find any {} records with search criteria: '{}'.".format(cls.__name__, payload))
return res_json | python | def find_by(cls, payload, require=False):
"""
Searches the model in question by AND joining the query parameters.
Implements a Railsy way of looking for a record using a method by the same name and passing
in the query as a dict. as well. Only the first hit is returned, and there is no particular
ordering specified in the server-side API method.
Args:
payload: `dict`. The attributes of a record to restrict the search to.
require: `bool`. True means to raise a `pulsarpy.models.RecordNotFound` exception if no
record is found.
Returns:
`dict`: The JSON serialization of the record, if any, found by the API call.
`None`: If the API call didnt' return any results.
Raises:
`pulsarpy.models.RecordNotFound`: No records were found, and the `require` parameter is
True.
"""
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
url = os.path.join(cls.URL, "find_by")
payload = {"find_by": payload}
cls.debug_logger.debug("Searching Pulsar {} for {}".format(cls.__name__, json.dumps(payload, indent=4)))
res = requests.post(url=url, json=payload, headers=HEADERS, verify=False)
#cls.write_response_html_to_file(res,"bob.html")
res.raise_for_status()
res_json = res.json()
if res_json:
try:
res_json = res_json[cls.MODEL_NAME]
except KeyError:
# Key won't be present if there isn't a serializer for it on the server.
pass
else:
if require:
raise RecordNotFound("Can't find any {} records with search criteria: '{}'.".format(cls.__name__, payload))
return res_json | [
"def",
"find_by",
"(",
"cls",
",",
"payload",
",",
"require",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"payload",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"\"The 'payload' parameter must be provided a dictionary object.\"",
")",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cls",
".",
"URL",
",",
"\"find_by\"",
")",
"payload",
"=",
"{",
"\"find_by\"",
":",
"payload",
"}",
"cls",
".",
"debug_logger",
".",
"debug",
"(",
"\"Searching Pulsar {} for {}\"",
".",
"format",
"(",
"cls",
".",
"__name__",
",",
"json",
".",
"dumps",
"(",
"payload",
",",
"indent",
"=",
"4",
")",
")",
")",
"res",
"=",
"requests",
".",
"post",
"(",
"url",
"=",
"url",
",",
"json",
"=",
"payload",
",",
"headers",
"=",
"HEADERS",
",",
"verify",
"=",
"False",
")",
"#cls.write_response_html_to_file(res,\"bob.html\")",
"res",
".",
"raise_for_status",
"(",
")",
"res_json",
"=",
"res",
".",
"json",
"(",
")",
"if",
"res_json",
":",
"try",
":",
"res_json",
"=",
"res_json",
"[",
"cls",
".",
"MODEL_NAME",
"]",
"except",
"KeyError",
":",
"# Key won't be present if there isn't a serializer for it on the server.",
"pass",
"else",
":",
"if",
"require",
":",
"raise",
"RecordNotFound",
"(",
"\"Can't find any {} records with search criteria: '{}'.\"",
".",
"format",
"(",
"cls",
".",
"__name__",
",",
"payload",
")",
")",
"return",
"res_json"
] | Searches the model in question by AND joining the query parameters.
Implements a Railsy way of looking for a record using a method by the same name and passing
in the query as a dict. as well. Only the first hit is returned, and there is no particular
ordering specified in the server-side API method.
Args:
payload: `dict`. The attributes of a record to restrict the search to.
require: `bool`. True means to raise a `pulsarpy.models.RecordNotFound` exception if no
record is found.
Returns:
`dict`: The JSON serialization of the record, if any, found by the API call.
`None`: If the API call didnt' return any results.
Raises:
`pulsarpy.models.RecordNotFound`: No records were found, and the `require` parameter is
True. | [
"Searches",
"the",
"model",
"in",
"question",
"by",
"AND",
"joining",
"the",
"query",
"parameters",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L424-L463 |
nathankw/pulsarpy | pulsarpy/models.py | Model.find_by_or | def find_by_or(cls, payload):
"""
Searches the model in question by OR joining the query parameters.
Implements a Railsy way of looking for a record using a method by the same name and passing
in the query as a string (for the OR operator joining to be specified).
Only the first hit is returned, and there is not particular ordering specified in the server-side
API method.
Args:
payload: `dict`. The attributes of a record to search for by using OR operator joining
for each query parameter.
Returns:
`dict`: The JSON serialization of the record, if any, found by the API call.
`None`: If the API call didnt' return any results.
"""
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
url = os.path.join(cls.URL, "find_by_or")
payload = {"find_by_or": payload}
cls.debug_logger.debug("Searching Pulsar {} for {}".format(cls.__name__, json.dumps(payload, indent=4)))
res = requests.post(url=url, json=payload, headers=HEADERS, verify=False)
cls.write_response_html_to_file(res,"bob.html")
if res:
try:
res = res[cls.MODEL_NAME]
except KeyError:
# Key won't be present if there isn't a serializer for it on the server.
pass
return res | python | def find_by_or(cls, payload):
"""
Searches the model in question by OR joining the query parameters.
Implements a Railsy way of looking for a record using a method by the same name and passing
in the query as a string (for the OR operator joining to be specified).
Only the first hit is returned, and there is not particular ordering specified in the server-side
API method.
Args:
payload: `dict`. The attributes of a record to search for by using OR operator joining
for each query parameter.
Returns:
`dict`: The JSON serialization of the record, if any, found by the API call.
`None`: If the API call didnt' return any results.
"""
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
url = os.path.join(cls.URL, "find_by_or")
payload = {"find_by_or": payload}
cls.debug_logger.debug("Searching Pulsar {} for {}".format(cls.__name__, json.dumps(payload, indent=4)))
res = requests.post(url=url, json=payload, headers=HEADERS, verify=False)
cls.write_response_html_to_file(res,"bob.html")
if res:
try:
res = res[cls.MODEL_NAME]
except KeyError:
# Key won't be present if there isn't a serializer for it on the server.
pass
return res | [
"def",
"find_by_or",
"(",
"cls",
",",
"payload",
")",
":",
"if",
"not",
"isinstance",
"(",
"payload",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"\"The 'payload' parameter must be provided a dictionary object.\"",
")",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cls",
".",
"URL",
",",
"\"find_by_or\"",
")",
"payload",
"=",
"{",
"\"find_by_or\"",
":",
"payload",
"}",
"cls",
".",
"debug_logger",
".",
"debug",
"(",
"\"Searching Pulsar {} for {}\"",
".",
"format",
"(",
"cls",
".",
"__name__",
",",
"json",
".",
"dumps",
"(",
"payload",
",",
"indent",
"=",
"4",
")",
")",
")",
"res",
"=",
"requests",
".",
"post",
"(",
"url",
"=",
"url",
",",
"json",
"=",
"payload",
",",
"headers",
"=",
"HEADERS",
",",
"verify",
"=",
"False",
")",
"cls",
".",
"write_response_html_to_file",
"(",
"res",
",",
"\"bob.html\"",
")",
"if",
"res",
":",
"try",
":",
"res",
"=",
"res",
"[",
"cls",
".",
"MODEL_NAME",
"]",
"except",
"KeyError",
":",
"# Key won't be present if there isn't a serializer for it on the server.",
"pass",
"return",
"res"
] | Searches the model in question by OR joining the query parameters.
Implements a Railsy way of looking for a record using a method by the same name and passing
in the query as a string (for the OR operator joining to be specified).
Only the first hit is returned, and there is not particular ordering specified in the server-side
API method.
Args:
payload: `dict`. The attributes of a record to search for by using OR operator joining
for each query parameter.
Returns:
`dict`: The JSON serialization of the record, if any, found by the API call.
`None`: If the API call didnt' return any results. | [
"Searches",
"the",
"model",
"in",
"question",
"by",
"OR",
"joining",
"the",
"query",
"parameters",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L466-L497 |
nathankw/pulsarpy | pulsarpy/models.py | Model.index | def index(cls):
"""Fetches all records.
Returns:
`dict`. The JSON formatted response.
Raises:
`requests.exceptions.HTTPError`: The status code is not ok.
"""
res = requests.get(cls.URL, headers=HEADERS, verify=False)
res.raise_for_status()
return res.json() | python | def index(cls):
"""Fetches all records.
Returns:
`dict`. The JSON formatted response.
Raises:
`requests.exceptions.HTTPError`: The status code is not ok.
"""
res = requests.get(cls.URL, headers=HEADERS, verify=False)
res.raise_for_status()
return res.json() | [
"def",
"index",
"(",
"cls",
")",
":",
"res",
"=",
"requests",
".",
"get",
"(",
"cls",
".",
"URL",
",",
"headers",
"=",
"HEADERS",
",",
"verify",
"=",
"False",
")",
"res",
".",
"raise_for_status",
"(",
")",
"return",
"res",
".",
"json",
"(",
")"
] | Fetches all records.
Returns:
`dict`. The JSON formatted response.
Raises:
`requests.exceptions.HTTPError`: The status code is not ok. | [
"Fetches",
"all",
"records",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L500-L511 |
nathankw/pulsarpy | pulsarpy/models.py | Model.patch | def patch(self, payload, append_to_arrays=True):
"""
Patches current record and udpates the current instance's 'attrs'
attribute to reflect the new changes.
Args:
payload - hash. This will be JSON-formatted prior to sending the request.
Returns:
`dict`. The JSON formatted response.
Raises:
`requests.exceptions.HTTPError`: The status code is not ok.
"""
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
payload = self.__class__.set_id_in_fkeys(payload)
if append_to_arrays:
for key in payload:
val = payload[key]
if type(val) == list:
val.extend(getattr(self, key))
payload[key] = list(set(val))
payload = self.check_boolean_fields(payload)
payload = self.__class__.add_model_name_to_payload(payload)
self.debug_logger.debug("PATCHING payload {}".format(json.dumps(payload, indent=4)))
res = requests.patch(url=self.record_url, json=payload, headers=HEADERS, verify=False)
self.write_response_html_to_file(res,"bob.html")
res.raise_for_status()
json_res = res.json()
self.debug_logger.debug("Success")
self.attrs = json_res
return json_res | python | def patch(self, payload, append_to_arrays=True):
"""
Patches current record and udpates the current instance's 'attrs'
attribute to reflect the new changes.
Args:
payload - hash. This will be JSON-formatted prior to sending the request.
Returns:
`dict`. The JSON formatted response.
Raises:
`requests.exceptions.HTTPError`: The status code is not ok.
"""
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
payload = self.__class__.set_id_in_fkeys(payload)
if append_to_arrays:
for key in payload:
val = payload[key]
if type(val) == list:
val.extend(getattr(self, key))
payload[key] = list(set(val))
payload = self.check_boolean_fields(payload)
payload = self.__class__.add_model_name_to_payload(payload)
self.debug_logger.debug("PATCHING payload {}".format(json.dumps(payload, indent=4)))
res = requests.patch(url=self.record_url, json=payload, headers=HEADERS, verify=False)
self.write_response_html_to_file(res,"bob.html")
res.raise_for_status()
json_res = res.json()
self.debug_logger.debug("Success")
self.attrs = json_res
return json_res | [
"def",
"patch",
"(",
"self",
",",
"payload",
",",
"append_to_arrays",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"payload",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"\"The 'payload' parameter must be provided a dictionary object.\"",
")",
"payload",
"=",
"self",
".",
"__class__",
".",
"set_id_in_fkeys",
"(",
"payload",
")",
"if",
"append_to_arrays",
":",
"for",
"key",
"in",
"payload",
":",
"val",
"=",
"payload",
"[",
"key",
"]",
"if",
"type",
"(",
"val",
")",
"==",
"list",
":",
"val",
".",
"extend",
"(",
"getattr",
"(",
"self",
",",
"key",
")",
")",
"payload",
"[",
"key",
"]",
"=",
"list",
"(",
"set",
"(",
"val",
")",
")",
"payload",
"=",
"self",
".",
"check_boolean_fields",
"(",
"payload",
")",
"payload",
"=",
"self",
".",
"__class__",
".",
"add_model_name_to_payload",
"(",
"payload",
")",
"self",
".",
"debug_logger",
".",
"debug",
"(",
"\"PATCHING payload {}\"",
".",
"format",
"(",
"json",
".",
"dumps",
"(",
"payload",
",",
"indent",
"=",
"4",
")",
")",
")",
"res",
"=",
"requests",
".",
"patch",
"(",
"url",
"=",
"self",
".",
"record_url",
",",
"json",
"=",
"payload",
",",
"headers",
"=",
"HEADERS",
",",
"verify",
"=",
"False",
")",
"self",
".",
"write_response_html_to_file",
"(",
"res",
",",
"\"bob.html\"",
")",
"res",
".",
"raise_for_status",
"(",
")",
"json_res",
"=",
"res",
".",
"json",
"(",
")",
"self",
".",
"debug_logger",
".",
"debug",
"(",
"\"Success\"",
")",
"self",
".",
"attrs",
"=",
"json_res",
"return",
"json_res"
] | Patches current record and udpates the current instance's 'attrs'
attribute to reflect the new changes.
Args:
payload - hash. This will be JSON-formatted prior to sending the request.
Returns:
`dict`. The JSON formatted response.
Raises:
`requests.exceptions.HTTPError`: The status code is not ok. | [
"Patches",
"current",
"record",
"and",
"udpates",
"the",
"current",
"instance",
"s",
"attrs",
"attribute",
"to",
"reflect",
"the",
"new",
"changes",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L513-L545 |
nathankw/pulsarpy | pulsarpy/models.py | Model.set_id_in_fkeys | def set_id_in_fkeys(cls, payload):
"""
Looks for any keys in the payload that end with either _id or _ids, signaling a foreign
key field. For each foreign key field, checks whether the value is using the name of the
record or the actual primary ID of the record (which may include the model abbreviation, i.e.
B-1). If the former case, the name is replaced with
the record's primary ID.
Args:
payload: `dict`. The payload to POST or PATCH.
Returns:
`dict`. The payload.
"""
for key in payload:
val = payload[key]
if not val:
continue
if key.endswith("_id"):
model = getattr(THIS_MODULE, cls.FKEY_MAP[key])
rec_id = model.replace_name_with_id(name=val)
payload[key] = rec_id
elif key.endswith("_ids"):
model = getattr(THIS_MODULE, cls.FKEY_MAP[key])
rec_ids = []
for v in val:
rec_id = model.replace_name_with_id(name=v)
rec_ids.append(rec_id)
payload[key] = rec_ids
return payload | python | def set_id_in_fkeys(cls, payload):
"""
Looks for any keys in the payload that end with either _id or _ids, signaling a foreign
key field. For each foreign key field, checks whether the value is using the name of the
record or the actual primary ID of the record (which may include the model abbreviation, i.e.
B-1). If the former case, the name is replaced with
the record's primary ID.
Args:
payload: `dict`. The payload to POST or PATCH.
Returns:
`dict`. The payload.
"""
for key in payload:
val = payload[key]
if not val:
continue
if key.endswith("_id"):
model = getattr(THIS_MODULE, cls.FKEY_MAP[key])
rec_id = model.replace_name_with_id(name=val)
payload[key] = rec_id
elif key.endswith("_ids"):
model = getattr(THIS_MODULE, cls.FKEY_MAP[key])
rec_ids = []
for v in val:
rec_id = model.replace_name_with_id(name=v)
rec_ids.append(rec_id)
payload[key] = rec_ids
return payload | [
"def",
"set_id_in_fkeys",
"(",
"cls",
",",
"payload",
")",
":",
"for",
"key",
"in",
"payload",
":",
"val",
"=",
"payload",
"[",
"key",
"]",
"if",
"not",
"val",
":",
"continue",
"if",
"key",
".",
"endswith",
"(",
"\"_id\"",
")",
":",
"model",
"=",
"getattr",
"(",
"THIS_MODULE",
",",
"cls",
".",
"FKEY_MAP",
"[",
"key",
"]",
")",
"rec_id",
"=",
"model",
".",
"replace_name_with_id",
"(",
"name",
"=",
"val",
")",
"payload",
"[",
"key",
"]",
"=",
"rec_id",
"elif",
"key",
".",
"endswith",
"(",
"\"_ids\"",
")",
":",
"model",
"=",
"getattr",
"(",
"THIS_MODULE",
",",
"cls",
".",
"FKEY_MAP",
"[",
"key",
"]",
")",
"rec_ids",
"=",
"[",
"]",
"for",
"v",
"in",
"val",
":",
"rec_id",
"=",
"model",
".",
"replace_name_with_id",
"(",
"name",
"=",
"v",
")",
"rec_ids",
".",
"append",
"(",
"rec_id",
")",
"payload",
"[",
"key",
"]",
"=",
"rec_ids",
"return",
"payload"
] | Looks for any keys in the payload that end with either _id or _ids, signaling a foreign
key field. For each foreign key field, checks whether the value is using the name of the
record or the actual primary ID of the record (which may include the model abbreviation, i.e.
B-1). If the former case, the name is replaced with
the record's primary ID.
Args:
payload: `dict`. The payload to POST or PATCH.
Returns:
`dict`. The payload. | [
"Looks",
"for",
"any",
"keys",
"in",
"the",
"payload",
"that",
"end",
"with",
"either",
"_id",
"or",
"_ids",
"signaling",
"a",
"foreign",
"key",
"field",
".",
"For",
"each",
"foreign",
"key",
"field",
"checks",
"whether",
"the",
"value",
"is",
"using",
"the",
"name",
"of",
"the",
"record",
"or",
"the",
"actual",
"primary",
"ID",
"of",
"the",
"record",
"(",
"which",
"may",
"include",
"the",
"model",
"abbreviation",
"i",
".",
"e",
".",
"B",
"-",
"1",
")",
".",
"If",
"the",
"former",
"case",
"the",
"name",
"is",
"replaced",
"with",
"the",
"record",
"s",
"primary",
"ID",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L548-L577 |
nathankw/pulsarpy | pulsarpy/models.py | Model.post | def post(cls, payload):
"""Posts the data to the specified record.
Args:
payload: `dict`. This will be JSON-formatted prior to sending the request.
Returns:
`dict`. The JSON formatted response.
Raises:
`Requests.exceptions.HTTPError`: The status code is not ok.
`RecordNotUnique`: The Rails server returned the exception ActiveRecord::RecordNotUnique.
"""
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
payload = cls.set_id_in_fkeys(payload)
payload = cls.check_boolean_fields(payload)
payload = cls.add_model_name_to_payload(payload)
# Run any pre-post hooks:
payload = cls.prepost_hooks(payload)
cls.debug_logger.debug("POSTING payload {}".format(json.dumps(payload, indent=4)))
res = requests.post(url=cls.URL, json=(payload), headers=HEADERS, verify=False)
cls.write_response_html_to_file(res,"bob.html")
if not res.ok:
cls.log_error(res.text)
res_json = res.json()
if "exception" in res_json:
exc_type = res_json["exception"]
if exc_type == "ActiveRecord::RecordNotUnique":
raise RecordNotUnique()
res.raise_for_status()
res = res.json()
cls.log_post(res)
cls.debug_logger.debug("Success")
return res | python | def post(cls, payload):
"""Posts the data to the specified record.
Args:
payload: `dict`. This will be JSON-formatted prior to sending the request.
Returns:
`dict`. The JSON formatted response.
Raises:
`Requests.exceptions.HTTPError`: The status code is not ok.
`RecordNotUnique`: The Rails server returned the exception ActiveRecord::RecordNotUnique.
"""
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
payload = cls.set_id_in_fkeys(payload)
payload = cls.check_boolean_fields(payload)
payload = cls.add_model_name_to_payload(payload)
# Run any pre-post hooks:
payload = cls.prepost_hooks(payload)
cls.debug_logger.debug("POSTING payload {}".format(json.dumps(payload, indent=4)))
res = requests.post(url=cls.URL, json=(payload), headers=HEADERS, verify=False)
cls.write_response_html_to_file(res,"bob.html")
if not res.ok:
cls.log_error(res.text)
res_json = res.json()
if "exception" in res_json:
exc_type = res_json["exception"]
if exc_type == "ActiveRecord::RecordNotUnique":
raise RecordNotUnique()
res.raise_for_status()
res = res.json()
cls.log_post(res)
cls.debug_logger.debug("Success")
return res | [
"def",
"post",
"(",
"cls",
",",
"payload",
")",
":",
"if",
"not",
"isinstance",
"(",
"payload",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"\"The 'payload' parameter must be provided a dictionary object.\"",
")",
"payload",
"=",
"cls",
".",
"set_id_in_fkeys",
"(",
"payload",
")",
"payload",
"=",
"cls",
".",
"check_boolean_fields",
"(",
"payload",
")",
"payload",
"=",
"cls",
".",
"add_model_name_to_payload",
"(",
"payload",
")",
"# Run any pre-post hooks:",
"payload",
"=",
"cls",
".",
"prepost_hooks",
"(",
"payload",
")",
"cls",
".",
"debug_logger",
".",
"debug",
"(",
"\"POSTING payload {}\"",
".",
"format",
"(",
"json",
".",
"dumps",
"(",
"payload",
",",
"indent",
"=",
"4",
")",
")",
")",
"res",
"=",
"requests",
".",
"post",
"(",
"url",
"=",
"cls",
".",
"URL",
",",
"json",
"=",
"(",
"payload",
")",
",",
"headers",
"=",
"HEADERS",
",",
"verify",
"=",
"False",
")",
"cls",
".",
"write_response_html_to_file",
"(",
"res",
",",
"\"bob.html\"",
")",
"if",
"not",
"res",
".",
"ok",
":",
"cls",
".",
"log_error",
"(",
"res",
".",
"text",
")",
"res_json",
"=",
"res",
".",
"json",
"(",
")",
"if",
"\"exception\"",
"in",
"res_json",
":",
"exc_type",
"=",
"res_json",
"[",
"\"exception\"",
"]",
"if",
"exc_type",
"==",
"\"ActiveRecord::RecordNotUnique\"",
":",
"raise",
"RecordNotUnique",
"(",
")",
"res",
".",
"raise_for_status",
"(",
")",
"res",
"=",
"res",
".",
"json",
"(",
")",
"cls",
".",
"log_post",
"(",
"res",
")",
"cls",
".",
"debug_logger",
".",
"debug",
"(",
"\"Success\"",
")",
"return",
"res"
] | Posts the data to the specified record.
Args:
payload: `dict`. This will be JSON-formatted prior to sending the request.
Returns:
`dict`. The JSON formatted response.
Raises:
`Requests.exceptions.HTTPError`: The status code is not ok.
`RecordNotUnique`: The Rails server returned the exception ActiveRecord::RecordNotUnique. | [
"Posts",
"the",
"data",
"to",
"the",
"specified",
"record",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L585-L619 |
nathankw/pulsarpy | pulsarpy/models.py | Model.log_error | def log_error(cls, msg):
"""
Logs the provided error message to both the error logger and the debug logger logging
instances.
Args:
msg: `str`. The error message to log.
"""
cls.error_logger.error(msg)
cls.debug_logger.debug(msg) | python | def log_error(cls, msg):
"""
Logs the provided error message to both the error logger and the debug logger logging
instances.
Args:
msg: `str`. The error message to log.
"""
cls.error_logger.error(msg)
cls.debug_logger.debug(msg) | [
"def",
"log_error",
"(",
"cls",
",",
"msg",
")",
":",
"cls",
".",
"error_logger",
".",
"error",
"(",
"msg",
")",
"cls",
".",
"debug_logger",
".",
"debug",
"(",
"msg",
")"
] | Logs the provided error message to both the error logger and the debug logger logging
instances.
Args:
msg: `str`. The error message to log. | [
"Logs",
"the",
"provided",
"error",
"message",
"to",
"both",
"the",
"error",
"logger",
"and",
"the",
"debug",
"logger",
"logging",
"instances",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L622-L631 |
nathankw/pulsarpy | pulsarpy/models.py | Model.write_response_html_to_file | def write_response_html_to_file(response,filename):
"""
An aid in troubleshooting internal application errors, i.e. <Response [500]>, to be mainly
beneficial when developing the server-side API. This method will write the response HTML
for viewing the error details in the browesr.
Args:
response: `requests.models.Response` instance.
filename: `str`. The output file name.
"""
fout = open(filename,'w')
if not str(response.status_code).startswith("2"):
Model.debug_logger.debug(response.text)
fout.write(response.text)
fout.close() | python | def write_response_html_to_file(response,filename):
"""
An aid in troubleshooting internal application errors, i.e. <Response [500]>, to be mainly
beneficial when developing the server-side API. This method will write the response HTML
for viewing the error details in the browesr.
Args:
response: `requests.models.Response` instance.
filename: `str`. The output file name.
"""
fout = open(filename,'w')
if not str(response.status_code).startswith("2"):
Model.debug_logger.debug(response.text)
fout.write(response.text)
fout.close() | [
"def",
"write_response_html_to_file",
"(",
"response",
",",
"filename",
")",
":",
"fout",
"=",
"open",
"(",
"filename",
",",
"'w'",
")",
"if",
"not",
"str",
"(",
"response",
".",
"status_code",
")",
".",
"startswith",
"(",
"\"2\"",
")",
":",
"Model",
".",
"debug_logger",
".",
"debug",
"(",
"response",
".",
"text",
")",
"fout",
".",
"write",
"(",
"response",
".",
"text",
")",
"fout",
".",
"close",
"(",
")"
] | An aid in troubleshooting internal application errors, i.e. <Response [500]>, to be mainly
beneficial when developing the server-side API. This method will write the response HTML
for viewing the error details in the browesr.
Args:
response: `requests.models.Response` instance.
filename: `str`. The output file name. | [
"An",
"aid",
"in",
"troubleshooting",
"internal",
"application",
"errors",
"i",
".",
"e",
".",
"<Response",
"[",
"500",
"]",
">",
"to",
"be",
"mainly",
"beneficial",
"when",
"developing",
"the",
"server",
"-",
"side",
"API",
".",
"This",
"method",
"will",
"write",
"the",
"response",
"HTML",
"for",
"viewing",
"the",
"error",
"details",
"in",
"the",
"browesr",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L634-L648 |
nathankw/pulsarpy | pulsarpy/models.py | Biosample.parent_ids | def parent_ids(self):
"""
Returns an array of parent Biosample IDs. If the current Biosample has a part_of relationship,
the Biosampled referenced there will be returned. Otherwise, if the current Biosample was
generated from a pool of Biosamples (pooled_from_biosample_ids), then those will be returned.
Otherwise, the result will be an empty array.
"""
action = os.path.join(self.record_url, "parent_ids")
res = requests.get(url=action, headers=HEADERS, verify=False)
res.raise_for_status()
return res.json()["biosamples"] | python | def parent_ids(self):
"""
Returns an array of parent Biosample IDs. If the current Biosample has a part_of relationship,
the Biosampled referenced there will be returned. Otherwise, if the current Biosample was
generated from a pool of Biosamples (pooled_from_biosample_ids), then those will be returned.
Otherwise, the result will be an empty array.
"""
action = os.path.join(self.record_url, "parent_ids")
res = requests.get(url=action, headers=HEADERS, verify=False)
res.raise_for_status()
return res.json()["biosamples"] | [
"def",
"parent_ids",
"(",
"self",
")",
":",
"action",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"record_url",
",",
"\"parent_ids\"",
")",
"res",
"=",
"requests",
".",
"get",
"(",
"url",
"=",
"action",
",",
"headers",
"=",
"HEADERS",
",",
"verify",
"=",
"False",
")",
"res",
".",
"raise_for_status",
"(",
")",
"return",
"res",
".",
"json",
"(",
")",
"[",
"\"biosamples\"",
"]"
] | Returns an array of parent Biosample IDs. If the current Biosample has a part_of relationship,
the Biosampled referenced there will be returned. Otherwise, if the current Biosample was
generated from a pool of Biosamples (pooled_from_biosample_ids), then those will be returned.
Otherwise, the result will be an empty array. | [
"Returns",
"an",
"array",
"of",
"parent",
"Biosample",
"IDs",
".",
"If",
"the",
"current",
"Biosample",
"has",
"a",
"part_of",
"relationship",
"the",
"Biosampled",
"referenced",
"there",
"will",
"be",
"returned",
".",
"Otherwise",
"if",
"the",
"current",
"Biosample",
"was",
"generated",
"from",
"a",
"pool",
"of",
"Biosamples",
"(",
"pooled_from_biosample_ids",
")",
"then",
"those",
"will",
"be",
"returned",
".",
"Otherwise",
"the",
"result",
"will",
"be",
"an",
"empty",
"array",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L696-L706 |
nathankw/pulsarpy | pulsarpy/models.py | Biosample.find_first_wt_parent | def find_first_wt_parent(self, with_ip=False):
"""
Recursively looks at the part_of parent ancestry line (ignoring pooled_from parents) and returns
a parent Biosample ID if its wild_type attribute is True.
Args:
with_ip: `bool`. True means to restrict the search to the first parental Wild Type that
also has an Immunoblot linked to it, which may serve as a control between another
immunoblot. For example, it could be useful to compare the target protein bands in
Immunoblots between a Wild Type sample and a CRISPR eGFP-tagged gene in a
descendent sample.
Returns:
`False`: There isn't a WT parent, or there is but not one with an Immunoblot linked to
it (if the `with_ip` parameter is set to True).
`int`: The ID of the WT parent.
"""
parent_id = self.part_of_id
if not parent_id:
return False
parent = Biosample(parent_id)
if parent.wild_type:
if with_ip and parent.immunoblot_ids:
return parent.id
elif not with_ip:
return parent.id
return parent.find_first_wt_parent(with_ip=with_ip) | python | def find_first_wt_parent(self, with_ip=False):
"""
Recursively looks at the part_of parent ancestry line (ignoring pooled_from parents) and returns
a parent Biosample ID if its wild_type attribute is True.
Args:
with_ip: `bool`. True means to restrict the search to the first parental Wild Type that
also has an Immunoblot linked to it, which may serve as a control between another
immunoblot. For example, it could be useful to compare the target protein bands in
Immunoblots between a Wild Type sample and a CRISPR eGFP-tagged gene in a
descendent sample.
Returns:
`False`: There isn't a WT parent, or there is but not one with an Immunoblot linked to
it (if the `with_ip` parameter is set to True).
`int`: The ID of the WT parent.
"""
parent_id = self.part_of_id
if not parent_id:
return False
parent = Biosample(parent_id)
if parent.wild_type:
if with_ip and parent.immunoblot_ids:
return parent.id
elif not with_ip:
return parent.id
return parent.find_first_wt_parent(with_ip=with_ip) | [
"def",
"find_first_wt_parent",
"(",
"self",
",",
"with_ip",
"=",
"False",
")",
":",
"parent_id",
"=",
"self",
".",
"part_of_id",
"if",
"not",
"parent_id",
":",
"return",
"False",
"parent",
"=",
"Biosample",
"(",
"parent_id",
")",
"if",
"parent",
".",
"wild_type",
":",
"if",
"with_ip",
"and",
"parent",
".",
"immunoblot_ids",
":",
"return",
"parent",
".",
"id",
"elif",
"not",
"with_ip",
":",
"return",
"parent",
".",
"id",
"return",
"parent",
".",
"find_first_wt_parent",
"(",
"with_ip",
"=",
"with_ip",
")"
] | Recursively looks at the part_of parent ancestry line (ignoring pooled_from parents) and returns
a parent Biosample ID if its wild_type attribute is True.
Args:
with_ip: `bool`. True means to restrict the search to the first parental Wild Type that
also has an Immunoblot linked to it, which may serve as a control between another
immunoblot. For example, it could be useful to compare the target protein bands in
Immunoblots between a Wild Type sample and a CRISPR eGFP-tagged gene in a
descendent sample.
Returns:
`False`: There isn't a WT parent, or there is but not one with an Immunoblot linked to
it (if the `with_ip` parameter is set to True).
`int`: The ID of the WT parent. | [
"Recursively",
"looks",
"at",
"the",
"part_of",
"parent",
"ancestry",
"line",
"(",
"ignoring",
"pooled_from",
"parents",
")",
"and",
"returns",
"a",
"parent",
"Biosample",
"ID",
"if",
"its",
"wild_type",
"attribute",
"is",
"True",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L708-L734 |
nathankw/pulsarpy | pulsarpy/models.py | Document.upload | def upload(cls, path, document_type, is_protocol, description=""):
"""
Args:
path: `str`. The path to the document to upload.
document_type: `str`. DocumentType identified by the value of its name attribute.
is_protocol: `bool`.
description: `str`.
"""
file_name = os.path.basename(path)
mime_type = mimetypes.guess_type(file_name)[0]
data = base64.b64encode(open(path, 'rb').read())
temp_uri = str(data, "utf-8")
#href = "data:{mime_type};base64,{temp_uri}".format(mime_type=mime_type, temp_uri=temp_uri)
payload = {}
payload["content_type"] = mime_type
payload["data"] = temp_uri
payload["description"] = description
payload["document_type_id"] = DocumentType(document_type).id
payload["name"] = file_name
payload["is_protocol"] = is_protocol
cls.post(payload) | python | def upload(cls, path, document_type, is_protocol, description=""):
"""
Args:
path: `str`. The path to the document to upload.
document_type: `str`. DocumentType identified by the value of its name attribute.
is_protocol: `bool`.
description: `str`.
"""
file_name = os.path.basename(path)
mime_type = mimetypes.guess_type(file_name)[0]
data = base64.b64encode(open(path, 'rb').read())
temp_uri = str(data, "utf-8")
#href = "data:{mime_type};base64,{temp_uri}".format(mime_type=mime_type, temp_uri=temp_uri)
payload = {}
payload["content_type"] = mime_type
payload["data"] = temp_uri
payload["description"] = description
payload["document_type_id"] = DocumentType(document_type).id
payload["name"] = file_name
payload["is_protocol"] = is_protocol
cls.post(payload) | [
"def",
"upload",
"(",
"cls",
",",
"path",
",",
"document_type",
",",
"is_protocol",
",",
"description",
"=",
"\"\"",
")",
":",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"mime_type",
"=",
"mimetypes",
".",
"guess_type",
"(",
"file_name",
")",
"[",
"0",
"]",
"data",
"=",
"base64",
".",
"b64encode",
"(",
"open",
"(",
"path",
",",
"'rb'",
")",
".",
"read",
"(",
")",
")",
"temp_uri",
"=",
"str",
"(",
"data",
",",
"\"utf-8\"",
")",
"#href = \"data:{mime_type};base64,{temp_uri}\".format(mime_type=mime_type, temp_uri=temp_uri) ",
"payload",
"=",
"{",
"}",
"payload",
"[",
"\"content_type\"",
"]",
"=",
"mime_type",
"payload",
"[",
"\"data\"",
"]",
"=",
"temp_uri",
"payload",
"[",
"\"description\"",
"]",
"=",
"description",
"payload",
"[",
"\"document_type_id\"",
"]",
"=",
"DocumentType",
"(",
"document_type",
")",
".",
"id",
"payload",
"[",
"\"name\"",
"]",
"=",
"file_name",
"payload",
"[",
"\"is_protocol\"",
"]",
"=",
"is_protocol",
"cls",
".",
"post",
"(",
"payload",
")"
] | Args:
path: `str`. The path to the document to upload.
document_type: `str`. DocumentType identified by the value of its name attribute.
is_protocol: `bool`.
description: `str`. | [
"Args",
":",
"path",
":",
"str",
".",
"The",
"path",
"to",
"the",
"document",
"to",
"upload",
".",
"document_type",
":",
"str",
".",
"DocumentType",
"identified",
"by",
"the",
"value",
"of",
"its",
"name",
"attribute",
".",
"is_protocol",
":",
"bool",
".",
"description",
":",
"str",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L838-L858 |
nathankw/pulsarpy | pulsarpy/models.py | Library.post | def post(cls, payload):
"""
A wrapper over Model.post() that handles the case where a Library has a PairedBarcode
and the user may have supplied the PairedBarcode in the form of index1-index2, i.e.
GATTTCCA-GGCGTCGA. This isn't the PairedBarcode's record name or a record ID, thus
Model.post() won't be able to figure out the PairedBarcode's ID to substitute in the payload
(via a call to cls.replace_name_with_id()). Thus, this wrapper will attempt to replace
a PairedBarcode sequence in the payload with a PairedBarcode ID, then pass the payload off
to Model.post().
"""
slpk_attr_name = "sequencing_library_prep_kit_id"
paired_bc_id_attr_name = "paired_barcode_id"
seq_reg = re.compile("^[ACGTN]+$")
if paired_bc_id_attr_name in payload:
try:
index1, index2 = payload[paired_bc_id_attr_name].upper().split("-")
except ValueError:
# Not in GATTTCCA-GGCGTCGA format so let it be.
return Model.post(cls=cls, payload=payload)
if not seq_reg.match(index1) or not seq_reg.match(index2):
# Not in GATTTCCA-GGCGTCGA format so let it be.
return Model.post(cls=cls, payload=payload)
if not slpk_attr_name in payload:
raise Exception("You need to include the " + slpk + " attribute name.")
slpk_id = SequencingLibraryPrepKit.replace_name_with_id(payload[slpk_attr_name])
payload[slpk_attr_name] = slpk_id
index1_id = Barcode.find_by(payload={slpk_attr_name: slpk_id, "index_number": 1, "sequence": index1}, require=True)["id"]
index2_id = Barcode.find_by(payload={slpk_attr_name: slpk_id, "index_number": 2, "sequence": index2}, require=True)["id"]
# Ensure that PairedBarcode for this index combo already exists:
pbc_payload = {"index1_id": index1_id, "index2_id": index2_id, slpk_attr_name: slpk_id}
pbc_exists = PairedBarcode.find_by(payload=pbc_payload)
if not pbc_exists:
pbc_exists = PairedBarcode.post(payload=pbc_payload)
pbc_id = pbc_exists["id"]
payload[paired_bc_id_attr_name] = pbc_id
return super().post(payload=payload) | python | def post(cls, payload):
"""
A wrapper over Model.post() that handles the case where a Library has a PairedBarcode
and the user may have supplied the PairedBarcode in the form of index1-index2, i.e.
GATTTCCA-GGCGTCGA. This isn't the PairedBarcode's record name or a record ID, thus
Model.post() won't be able to figure out the PairedBarcode's ID to substitute in the payload
(via a call to cls.replace_name_with_id()). Thus, this wrapper will attempt to replace
a PairedBarcode sequence in the payload with a PairedBarcode ID, then pass the payload off
to Model.post().
"""
slpk_attr_name = "sequencing_library_prep_kit_id"
paired_bc_id_attr_name = "paired_barcode_id"
seq_reg = re.compile("^[ACGTN]+$")
if paired_bc_id_attr_name in payload:
try:
index1, index2 = payload[paired_bc_id_attr_name].upper().split("-")
except ValueError:
# Not in GATTTCCA-GGCGTCGA format so let it be.
return Model.post(cls=cls, payload=payload)
if not seq_reg.match(index1) or not seq_reg.match(index2):
# Not in GATTTCCA-GGCGTCGA format so let it be.
return Model.post(cls=cls, payload=payload)
if not slpk_attr_name in payload:
raise Exception("You need to include the " + slpk + " attribute name.")
slpk_id = SequencingLibraryPrepKit.replace_name_with_id(payload[slpk_attr_name])
payload[slpk_attr_name] = slpk_id
index1_id = Barcode.find_by(payload={slpk_attr_name: slpk_id, "index_number": 1, "sequence": index1}, require=True)["id"]
index2_id = Barcode.find_by(payload={slpk_attr_name: slpk_id, "index_number": 2, "sequence": index2}, require=True)["id"]
# Ensure that PairedBarcode for this index combo already exists:
pbc_payload = {"index1_id": index1_id, "index2_id": index2_id, slpk_attr_name: slpk_id}
pbc_exists = PairedBarcode.find_by(payload=pbc_payload)
if not pbc_exists:
pbc_exists = PairedBarcode.post(payload=pbc_payload)
pbc_id = pbc_exists["id"]
payload[paired_bc_id_attr_name] = pbc_id
return super().post(payload=payload) | [
"def",
"post",
"(",
"cls",
",",
"payload",
")",
":",
"slpk_attr_name",
"=",
"\"sequencing_library_prep_kit_id\"",
"paired_bc_id_attr_name",
"=",
"\"paired_barcode_id\"",
"seq_reg",
"=",
"re",
".",
"compile",
"(",
"\"^[ACGTN]+$\"",
")",
"if",
"paired_bc_id_attr_name",
"in",
"payload",
":",
"try",
":",
"index1",
",",
"index2",
"=",
"payload",
"[",
"paired_bc_id_attr_name",
"]",
".",
"upper",
"(",
")",
".",
"split",
"(",
"\"-\"",
")",
"except",
"ValueError",
":",
"# Not in GATTTCCA-GGCGTCGA format so let it be. ",
"return",
"Model",
".",
"post",
"(",
"cls",
"=",
"cls",
",",
"payload",
"=",
"payload",
")",
"if",
"not",
"seq_reg",
".",
"match",
"(",
"index1",
")",
"or",
"not",
"seq_reg",
".",
"match",
"(",
"index2",
")",
":",
"# Not in GATTTCCA-GGCGTCGA format so let it be. ",
"return",
"Model",
".",
"post",
"(",
"cls",
"=",
"cls",
",",
"payload",
"=",
"payload",
")",
"if",
"not",
"slpk_attr_name",
"in",
"payload",
":",
"raise",
"Exception",
"(",
"\"You need to include the \"",
"+",
"slpk",
"+",
"\" attribute name.\"",
")",
"slpk_id",
"=",
"SequencingLibraryPrepKit",
".",
"replace_name_with_id",
"(",
"payload",
"[",
"slpk_attr_name",
"]",
")",
"payload",
"[",
"slpk_attr_name",
"]",
"=",
"slpk_id",
"index1_id",
"=",
"Barcode",
".",
"find_by",
"(",
"payload",
"=",
"{",
"slpk_attr_name",
":",
"slpk_id",
",",
"\"index_number\"",
":",
"1",
",",
"\"sequence\"",
":",
"index1",
"}",
",",
"require",
"=",
"True",
")",
"[",
"\"id\"",
"]",
"index2_id",
"=",
"Barcode",
".",
"find_by",
"(",
"payload",
"=",
"{",
"slpk_attr_name",
":",
"slpk_id",
",",
"\"index_number\"",
":",
"2",
",",
"\"sequence\"",
":",
"index2",
"}",
",",
"require",
"=",
"True",
")",
"[",
"\"id\"",
"]",
"# Ensure that PairedBarcode for this index combo already exists:",
"pbc_payload",
"=",
"{",
"\"index1_id\"",
":",
"index1_id",
",",
"\"index2_id\"",
":",
"index2_id",
",",
"slpk_attr_name",
":",
"slpk_id",
"}",
"pbc_exists",
"=",
"PairedBarcode",
".",
"find_by",
"(",
"payload",
"=",
"pbc_payload",
")",
"if",
"not",
"pbc_exists",
":",
"pbc_exists",
"=",
"PairedBarcode",
".",
"post",
"(",
"payload",
"=",
"pbc_payload",
")",
"pbc_id",
"=",
"pbc_exists",
"[",
"\"id\"",
"]",
"payload",
"[",
"paired_bc_id_attr_name",
"]",
"=",
"pbc_id",
"return",
"super",
"(",
")",
".",
"post",
"(",
"payload",
"=",
"payload",
")"
] | A wrapper over Model.post() that handles the case where a Library has a PairedBarcode
and the user may have supplied the PairedBarcode in the form of index1-index2, i.e.
GATTTCCA-GGCGTCGA. This isn't the PairedBarcode's record name or a record ID, thus
Model.post() won't be able to figure out the PairedBarcode's ID to substitute in the payload
(via a call to cls.replace_name_with_id()). Thus, this wrapper will attempt to replace
a PairedBarcode sequence in the payload with a PairedBarcode ID, then pass the payload off
to Model.post(). | [
"A",
"wrapper",
"over",
"Model",
".",
"post",
"()",
"that",
"handles",
"the",
"case",
"where",
"a",
"Library",
"has",
"a",
"PairedBarcode",
"and",
"the",
"user",
"may",
"have",
"supplied",
"the",
"PairedBarcode",
"in",
"the",
"form",
"of",
"index1",
"-",
"index2",
"i",
".",
"e",
".",
"GATTTCCA",
"-",
"GGCGTCGA",
".",
"This",
"isn",
"t",
"the",
"PairedBarcode",
"s",
"record",
"name",
"or",
"a",
"record",
"ID",
"thus",
"Model",
".",
"post",
"()",
"won",
"t",
"be",
"able",
"to",
"figure",
"out",
"the",
"PairedBarcode",
"s",
"ID",
"to",
"substitute",
"in",
"the",
"payload",
"(",
"via",
"a",
"call",
"to",
"cls",
".",
"replace_name_with_id",
"()",
")",
".",
"Thus",
"this",
"wrapper",
"will",
"attempt",
"to",
"replace",
"a",
"PairedBarcode",
"sequence",
"in",
"the",
"payload",
"with",
"a",
"PairedBarcode",
"ID",
"then",
"pass",
"the",
"payload",
"off",
"to",
"Model",
".",
"post",
"()",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L969-L1005 |
nathankw/pulsarpy | pulsarpy/models.py | SequencingRequest.get_library_barcode_sequence_hash | def get_library_barcode_sequence_hash(self, inverse=False):
"""
Calls the SequencingRequest's get_library_barcode_sequence_hash server-side endpoint to
create a hash of the form {LibraryID -> barcode_sequence} for all Libraries on the
SequencingRequest.
Args:
inverse: `bool`. True means to inverse the key and value pairs such that the barcode
sequence serves as the key.
Returns: `dict`.
"""
action = os.path.join(self.record_url, "get_library_barcode_sequence_hash")
res = requests.get(url=action, headers=HEADERS, verify=False)
res.raise_for_status()
res_json = res.json()
# Convert library ID from string to int
new_res = {}
for lib_id in res_json:
new_res[int(lib_id)] = res_json[lib_id]
res_json = new_res
if inverse:
rev = {}
for lib_id in res_json:
rev[res_json[lib_id]] = lib_id
res_json = rev
return res_json | python | def get_library_barcode_sequence_hash(self, inverse=False):
"""
Calls the SequencingRequest's get_library_barcode_sequence_hash server-side endpoint to
create a hash of the form {LibraryID -> barcode_sequence} for all Libraries on the
SequencingRequest.
Args:
inverse: `bool`. True means to inverse the key and value pairs such that the barcode
sequence serves as the key.
Returns: `dict`.
"""
action = os.path.join(self.record_url, "get_library_barcode_sequence_hash")
res = requests.get(url=action, headers=HEADERS, verify=False)
res.raise_for_status()
res_json = res.json()
# Convert library ID from string to int
new_res = {}
for lib_id in res_json:
new_res[int(lib_id)] = res_json[lib_id]
res_json = new_res
if inverse:
rev = {}
for lib_id in res_json:
rev[res_json[lib_id]] = lib_id
res_json = rev
return res_json | [
"def",
"get_library_barcode_sequence_hash",
"(",
"self",
",",
"inverse",
"=",
"False",
")",
":",
"action",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"record_url",
",",
"\"get_library_barcode_sequence_hash\"",
")",
"res",
"=",
"requests",
".",
"get",
"(",
"url",
"=",
"action",
",",
"headers",
"=",
"HEADERS",
",",
"verify",
"=",
"False",
")",
"res",
".",
"raise_for_status",
"(",
")",
"res_json",
"=",
"res",
".",
"json",
"(",
")",
"# Convert library ID from string to int",
"new_res",
"=",
"{",
"}",
"for",
"lib_id",
"in",
"res_json",
":",
"new_res",
"[",
"int",
"(",
"lib_id",
")",
"]",
"=",
"res_json",
"[",
"lib_id",
"]",
"res_json",
"=",
"new_res",
"if",
"inverse",
":",
"rev",
"=",
"{",
"}",
"for",
"lib_id",
"in",
"res_json",
":",
"rev",
"[",
"res_json",
"[",
"lib_id",
"]",
"]",
"=",
"lib_id",
"res_json",
"=",
"rev",
"return",
"res_json"
] | Calls the SequencingRequest's get_library_barcode_sequence_hash server-side endpoint to
create a hash of the form {LibraryID -> barcode_sequence} for all Libraries on the
SequencingRequest.
Args:
inverse: `bool`. True means to inverse the key and value pairs such that the barcode
sequence serves as the key.
Returns: `dict`. | [
"Calls",
"the",
"SequencingRequest",
"s",
"get_library_barcode_sequence_hash",
"server",
"-",
"side",
"endpoint",
"to",
"create",
"a",
"hash",
"of",
"the",
"form",
"{",
"LibraryID",
"-",
">",
"barcode_sequence",
"}",
"for",
"all",
"Libraries",
"on",
"the",
"SequencingRequest",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L1047-L1074 |
nathankw/pulsarpy | pulsarpy/models.py | SequencingRun.library_sequencing_results | def library_sequencing_results(self):
"""
Generates a dict. where each key is a Library ID on the SequencingRequest and each value
is the associated SequencingResult. Libraries that aren't yet with a SequencingResult are
not inlcuded in the dict.
"""
sres_ids = self.sequencing_result_ids
res = {}
for i in sres_ids:
sres = SequencingResult(i)
res[sres.library_id] = sres
return res | python | def library_sequencing_results(self):
"""
Generates a dict. where each key is a Library ID on the SequencingRequest and each value
is the associated SequencingResult. Libraries that aren't yet with a SequencingResult are
not inlcuded in the dict.
"""
sres_ids = self.sequencing_result_ids
res = {}
for i in sres_ids:
sres = SequencingResult(i)
res[sres.library_id] = sres
return res | [
"def",
"library_sequencing_results",
"(",
"self",
")",
":",
"sres_ids",
"=",
"self",
".",
"sequencing_result_ids",
"res",
"=",
"{",
"}",
"for",
"i",
"in",
"sres_ids",
":",
"sres",
"=",
"SequencingResult",
"(",
"i",
")",
"res",
"[",
"sres",
".",
"library_id",
"]",
"=",
"sres",
"return",
"res"
] | Generates a dict. where each key is a Library ID on the SequencingRequest and each value
is the associated SequencingResult. Libraries that aren't yet with a SequencingResult are
not inlcuded in the dict. | [
"Generates",
"a",
"dict",
".",
"where",
"each",
"key",
"is",
"a",
"Library",
"ID",
"on",
"the",
"SequencingRequest",
"and",
"each",
"value",
"is",
"the",
"associated",
"SequencingResult",
".",
"Libraries",
"that",
"aren",
"t",
"yet",
"with",
"a",
"SequencingResult",
"are",
"not",
"inlcuded",
"in",
"the",
"dict",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L1098-L1109 |
nathankw/pulsarpy | pulsarpy/models.py | User.unarchive_user | def unarchive_user(self, user_id):
"""Unarchives the user with the specified user ID.
Args:
user_id: `int`. The ID of the user to unarchive.
Returns:
`NoneType`: None.
"""
url = self.record_url + "/unarchive"
res = requests.patch(url=url, json={"user_id": user_id}, headers=HEADERS, verify=False)
self.write_response_html_to_file(res,"bob.html")
res.raise_for_status() | python | def unarchive_user(self, user_id):
"""Unarchives the user with the specified user ID.
Args:
user_id: `int`. The ID of the user to unarchive.
Returns:
`NoneType`: None.
"""
url = self.record_url + "/unarchive"
res = requests.patch(url=url, json={"user_id": user_id}, headers=HEADERS, verify=False)
self.write_response_html_to_file(res,"bob.html")
res.raise_for_status() | [
"def",
"unarchive_user",
"(",
"self",
",",
"user_id",
")",
":",
"url",
"=",
"self",
".",
"record_url",
"+",
"\"/unarchive\"",
"res",
"=",
"requests",
".",
"patch",
"(",
"url",
"=",
"url",
",",
"json",
"=",
"{",
"\"user_id\"",
":",
"user_id",
"}",
",",
"headers",
"=",
"HEADERS",
",",
"verify",
"=",
"False",
")",
"self",
".",
"write_response_html_to_file",
"(",
"res",
",",
"\"bob.html\"",
")",
"res",
".",
"raise_for_status",
"(",
")"
] | Unarchives the user with the specified user ID.
Args:
user_id: `int`. The ID of the user to unarchive.
Returns:
`NoneType`: None. | [
"Unarchives",
"the",
"user",
"with",
"the",
"specified",
"user",
"ID",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L1179-L1191 |
nathankw/pulsarpy | pulsarpy/models.py | User.remove_api_key | def remove_api_key(self):
"""
Removes the user's existing API key, if present, and sets the current instance's 'api_key'
attribute to the empty string.
Returns:
`NoneType`: None.
"""
url = self.record_url + "/remove_api_key"
res = requests.patch(url=url, headers=HEADERS, verify=False)
res.raise_for_status()
self.api_key = "" | python | def remove_api_key(self):
"""
Removes the user's existing API key, if present, and sets the current instance's 'api_key'
attribute to the empty string.
Returns:
`NoneType`: None.
"""
url = self.record_url + "/remove_api_key"
res = requests.patch(url=url, headers=HEADERS, verify=False)
res.raise_for_status()
self.api_key = "" | [
"def",
"remove_api_key",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"record_url",
"+",
"\"/remove_api_key\"",
"res",
"=",
"requests",
".",
"patch",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"HEADERS",
",",
"verify",
"=",
"False",
")",
"res",
".",
"raise_for_status",
"(",
")",
"self",
".",
"api_key",
"=",
"\"\""
] | Removes the user's existing API key, if present, and sets the current instance's 'api_key'
attribute to the empty string.
Returns:
`NoneType`: None. | [
"Removes",
"the",
"user",
"s",
"existing",
"API",
"key",
"if",
"present",
"and",
"sets",
"the",
"current",
"instance",
"s",
"api_key",
"attribute",
"to",
"the",
"empty",
"string",
"."
] | train | https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L1206-L1217 |
Vital-Fernandez/dazer | bin/lib/ssp_functions/ssp_synthesis_tools.py | ssp_fitter.generate_synthObs | def generate_synthObs(self, bases_wave, bases_flux, basesCoeff, Av_star, z_star, sigma_star, resample_range = None, resample_int = 1):
'''basesWave: Bases wavelength must be at rest'''
nbases = basesCoeff.shape[0]
bases_wave_resam = arange(int(resample_range[0]), int(resample_range[-1]), resample_int, dtype=float)
npix_resample = len(bases_wave_resam)
#Resampling the range
bases_flux_resam = empty((nbases, npix_resample))
for i in range(nbases):
# print bases_wave[i][0], bases_wave[i][-1]
# print bases_wave_resam[0], bases_wave_resam[-1]
bases_flux_resam[i,:] = interp1d(bases_wave[i], bases_flux[i], bounds_error=True)(bases_wave_resam)
#Display physical parameters
synth_wave = bases_wave_resam * (1 + z_star)
Av_vector = Av_star * ones(nbases)
Xx_redd = CCM89_Bal07(3.4, bases_wave_resam)
r_sigma = sigma_star/(synth_wave[1] - synth_wave[0])
#Defining empty kernel
box = int(3 * r_sigma) if int(3 * r_sigma) < 3 else 3
kernel_len = 2 * box + 1
kernel = zeros((1, kernel_len))
kernel_range = arange(0, 2 * box + 1)
#Generating the kernel with sigma (the norm factor is the sum of the gaussian)
kernel[0,:] = exp(-0.5 * ((square(kernel_range-box)/r_sigma)))
norm = np_sum(kernel[0,:])
kernel = kernel / norm
#Convove bases with respect to kernel for dispersion velocity calculation
bases_grid_convolve = convolve2d(bases_flux_resam, kernel, mode='same', boundary='symm')
#Interpolate bases to wavelength range
interBases_matrix = (interp1d(bases_wave_resam, bases_grid_convolve, axis=1, bounds_error=True)(bases_wave_resam)).T
#Generate final flux model including dust
dust_attenuation = power(10, -0.4 * outer(Xx_redd, Av_vector))
bases_grid_model = interBases_matrix * dust_attenuation
#Generate combined flux
synth_flux = np_sum(basesCoeff.T * bases_grid_model, axis=1)
return synth_wave, synth_flux | python | def generate_synthObs(self, bases_wave, bases_flux, basesCoeff, Av_star, z_star, sigma_star, resample_range = None, resample_int = 1):
'''basesWave: Bases wavelength must be at rest'''
nbases = basesCoeff.shape[0]
bases_wave_resam = arange(int(resample_range[0]), int(resample_range[-1]), resample_int, dtype=float)
npix_resample = len(bases_wave_resam)
#Resampling the range
bases_flux_resam = empty((nbases, npix_resample))
for i in range(nbases):
# print bases_wave[i][0], bases_wave[i][-1]
# print bases_wave_resam[0], bases_wave_resam[-1]
bases_flux_resam[i,:] = interp1d(bases_wave[i], bases_flux[i], bounds_error=True)(bases_wave_resam)
#Display physical parameters
synth_wave = bases_wave_resam * (1 + z_star)
Av_vector = Av_star * ones(nbases)
Xx_redd = CCM89_Bal07(3.4, bases_wave_resam)
r_sigma = sigma_star/(synth_wave[1] - synth_wave[0])
#Defining empty kernel
box = int(3 * r_sigma) if int(3 * r_sigma) < 3 else 3
kernel_len = 2 * box + 1
kernel = zeros((1, kernel_len))
kernel_range = arange(0, 2 * box + 1)
#Generating the kernel with sigma (the norm factor is the sum of the gaussian)
kernel[0,:] = exp(-0.5 * ((square(kernel_range-box)/r_sigma)))
norm = np_sum(kernel[0,:])
kernel = kernel / norm
#Convove bases with respect to kernel for dispersion velocity calculation
bases_grid_convolve = convolve2d(bases_flux_resam, kernel, mode='same', boundary='symm')
#Interpolate bases to wavelength range
interBases_matrix = (interp1d(bases_wave_resam, bases_grid_convolve, axis=1, bounds_error=True)(bases_wave_resam)).T
#Generate final flux model including dust
dust_attenuation = power(10, -0.4 * outer(Xx_redd, Av_vector))
bases_grid_model = interBases_matrix * dust_attenuation
#Generate combined flux
synth_flux = np_sum(basesCoeff.T * bases_grid_model, axis=1)
return synth_wave, synth_flux | [
"def",
"generate_synthObs",
"(",
"self",
",",
"bases_wave",
",",
"bases_flux",
",",
"basesCoeff",
",",
"Av_star",
",",
"z_star",
",",
"sigma_star",
",",
"resample_range",
"=",
"None",
",",
"resample_int",
"=",
"1",
")",
":",
"nbases",
"=",
"basesCoeff",
".",
"shape",
"[",
"0",
"]",
"bases_wave_resam",
"=",
"arange",
"(",
"int",
"(",
"resample_range",
"[",
"0",
"]",
")",
",",
"int",
"(",
"resample_range",
"[",
"-",
"1",
"]",
")",
",",
"resample_int",
",",
"dtype",
"=",
"float",
")",
"npix_resample",
"=",
"len",
"(",
"bases_wave_resam",
")",
"#Resampling the range",
"bases_flux_resam",
"=",
"empty",
"(",
"(",
"nbases",
",",
"npix_resample",
")",
")",
"for",
"i",
"in",
"range",
"(",
"nbases",
")",
":",
"# print bases_wave[i][0], bases_wave[i][-1]",
"# print bases_wave_resam[0], bases_wave_resam[-1]",
"bases_flux_resam",
"[",
"i",
",",
":",
"]",
"=",
"interp1d",
"(",
"bases_wave",
"[",
"i",
"]",
",",
"bases_flux",
"[",
"i",
"]",
",",
"bounds_error",
"=",
"True",
")",
"(",
"bases_wave_resam",
")",
"#Display physical parameters",
"synth_wave",
"=",
"bases_wave_resam",
"*",
"(",
"1",
"+",
"z_star",
")",
"Av_vector",
"=",
"Av_star",
"*",
"ones",
"(",
"nbases",
")",
"Xx_redd",
"=",
"CCM89_Bal07",
"(",
"3.4",
",",
"bases_wave_resam",
")",
"r_sigma",
"=",
"sigma_star",
"/",
"(",
"synth_wave",
"[",
"1",
"]",
"-",
"synth_wave",
"[",
"0",
"]",
")",
"#Defining empty kernel",
"box",
"=",
"int",
"(",
"3",
"*",
"r_sigma",
")",
"if",
"int",
"(",
"3",
"*",
"r_sigma",
")",
"<",
"3",
"else",
"3",
"kernel_len",
"=",
"2",
"*",
"box",
"+",
"1",
"kernel",
"=",
"zeros",
"(",
"(",
"1",
",",
"kernel_len",
")",
")",
"kernel_range",
"=",
"arange",
"(",
"0",
",",
"2",
"*",
"box",
"+",
"1",
")",
"#Generating the kernel with sigma (the norm factor is the sum of the gaussian)",
"kernel",
"[",
"0",
",",
":",
"]",
"=",
"exp",
"(",
"-",
"0.5",
"*",
"(",
"(",
"square",
"(",
"kernel_range",
"-",
"box",
")",
"/",
"r_sigma",
")",
")",
")",
"norm",
"=",
"np_sum",
"(",
"kernel",
"[",
"0",
",",
":",
"]",
")",
"kernel",
"=",
"kernel",
"/",
"norm",
"#Convove bases with respect to kernel for dispersion velocity calculation",
"bases_grid_convolve",
"=",
"convolve2d",
"(",
"bases_flux_resam",
",",
"kernel",
",",
"mode",
"=",
"'same'",
",",
"boundary",
"=",
"'symm'",
")",
"#Interpolate bases to wavelength range",
"interBases_matrix",
"=",
"(",
"interp1d",
"(",
"bases_wave_resam",
",",
"bases_grid_convolve",
",",
"axis",
"=",
"1",
",",
"bounds_error",
"=",
"True",
")",
"(",
"bases_wave_resam",
")",
")",
".",
"T",
"#Generate final flux model including dust ",
"dust_attenuation",
"=",
"power",
"(",
"10",
",",
"-",
"0.4",
"*",
"outer",
"(",
"Xx_redd",
",",
"Av_vector",
")",
")",
"bases_grid_model",
"=",
"interBases_matrix",
"*",
"dust_attenuation",
"#Generate combined flux",
"synth_flux",
"=",
"np_sum",
"(",
"basesCoeff",
".",
"T",
"*",
"bases_grid_model",
",",
"axis",
"=",
"1",
")",
"return",
"synth_wave",
",",
"synth_flux"
] | basesWave: Bases wavelength must be at rest | [
"basesWave",
":",
"Bases",
"wavelength",
"must",
"be",
"at",
"rest"
] | train | https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/ssp_functions/ssp_synthesis_tools.py#L683-L728 |
paydunya/paydunya-python | paydunya/__init__.py | Payment._process | def _process(self, resource=None, data={}):
"""Processes the current transaction
Sends an HTTP request to the PAYDUNYA API server
"""
# use object's data if no data is passed
_data = data or self._data
rsc_url = self.get_rsc_endpoint(resource)
if _data:
req = requests.post(rsc_url, data=json.dumps(_data),
headers=self.headers)
else:
req = requests.get(rsc_url, params=_data,
headers=self.headers)
if req.status_code == 200:
self._response = json.loads(req.text)
if int(self._response['response_code']) == 00:
return (True, self._response)
else:
return (False, self._response['response_text'])
else:
return (500, "Request Failed") | python | def _process(self, resource=None, data={}):
"""Processes the current transaction
Sends an HTTP request to the PAYDUNYA API server
"""
# use object's data if no data is passed
_data = data or self._data
rsc_url = self.get_rsc_endpoint(resource)
if _data:
req = requests.post(rsc_url, data=json.dumps(_data),
headers=self.headers)
else:
req = requests.get(rsc_url, params=_data,
headers=self.headers)
if req.status_code == 200:
self._response = json.loads(req.text)
if int(self._response['response_code']) == 00:
return (True, self._response)
else:
return (False, self._response['response_text'])
else:
return (500, "Request Failed") | [
"def",
"_process",
"(",
"self",
",",
"resource",
"=",
"None",
",",
"data",
"=",
"{",
"}",
")",
":",
"# use object's data if no data is passed",
"_data",
"=",
"data",
"or",
"self",
".",
"_data",
"rsc_url",
"=",
"self",
".",
"get_rsc_endpoint",
"(",
"resource",
")",
"if",
"_data",
":",
"req",
"=",
"requests",
".",
"post",
"(",
"rsc_url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"_data",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"else",
":",
"req",
"=",
"requests",
".",
"get",
"(",
"rsc_url",
",",
"params",
"=",
"_data",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"if",
"req",
".",
"status_code",
"==",
"200",
":",
"self",
".",
"_response",
"=",
"json",
".",
"loads",
"(",
"req",
".",
"text",
")",
"if",
"int",
"(",
"self",
".",
"_response",
"[",
"'response_code'",
"]",
")",
"==",
"00",
":",
"return",
"(",
"True",
",",
"self",
".",
"_response",
")",
"else",
":",
"return",
"(",
"False",
",",
"self",
".",
"_response",
"[",
"'response_text'",
"]",
")",
"else",
":",
"return",
"(",
"500",
",",
"\"Request Failed\"",
")"
] | Processes the current transaction
Sends an HTTP request to the PAYDUNYA API server | [
"Processes",
"the",
"current",
"transaction"
] | train | https://github.com/paydunya/paydunya-python/blob/bb55791e2814788aec74162d9d78970815f37c30/paydunya/__init__.py#L86-L107 |
paydunya/paydunya-python | paydunya/__init__.py | Payment.add_header | def add_header(self, header):
"""Add a custom HTTP header to the client's request headers"""
if type(header) is dict:
self._headers.update(header)
else:
raise ValueError(
"Dictionary expected, got '%s' instead" % type(header)
) | python | def add_header(self, header):
"""Add a custom HTTP header to the client's request headers"""
if type(header) is dict:
self._headers.update(header)
else:
raise ValueError(
"Dictionary expected, got '%s' instead" % type(header)
) | [
"def",
"add_header",
"(",
"self",
",",
"header",
")",
":",
"if",
"type",
"(",
"header",
")",
"is",
"dict",
":",
"self",
".",
"_headers",
".",
"update",
"(",
"header",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Dictionary expected, got '%s' instead\"",
"%",
"type",
"(",
"header",
")",
")"
] | Add a custom HTTP header to the client's request headers | [
"Add",
"a",
"custom",
"HTTP",
"header",
"to",
"the",
"client",
"s",
"request",
"headers"
] | train | https://github.com/paydunya/paydunya-python/blob/bb55791e2814788aec74162d9d78970815f37c30/paydunya/__init__.py#L114-L121 |
novopl/peltak | src/peltak/extra/changelog/commands.py | changelog_cli | def changelog_cli(ctx):
# type: () -> None
""" Generate changelog from commit messages. """
if ctx.invoked_subcommand:
return
from peltak.core import shell
from . import logic
shell.cprint(logic.changelog()) | python | def changelog_cli(ctx):
# type: () -> None
""" Generate changelog from commit messages. """
if ctx.invoked_subcommand:
return
from peltak.core import shell
from . import logic
shell.cprint(logic.changelog()) | [
"def",
"changelog_cli",
"(",
"ctx",
")",
":",
"# type: () -> None",
"if",
"ctx",
".",
"invoked_subcommand",
":",
"return",
"from",
"peltak",
".",
"core",
"import",
"shell",
"from",
".",
"import",
"logic",
"shell",
".",
"cprint",
"(",
"logic",
".",
"changelog",
"(",
")",
")"
] | Generate changelog from commit messages. | [
"Generate",
"changelog",
"from",
"commit",
"messages",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/extra/changelog/commands.py#L24-L32 |
Varkal/chuda | chuda/arguments.py | Argument.convert_to_argument | def convert_to_argument(self):
'''
Convert the Argument object to a tuple use in :meth:`~argparse.ArgumentParser.add_argument` calls on the parser
'''
field_list = [
"action", "nargs", "const", "default", "type",
"choices", "required", "help", "metavar", "dest"
]
return (
self.name,
{
field: getattr(self, field) for field in field_list if getattr(self, field) is not None
}
) | python | def convert_to_argument(self):
'''
Convert the Argument object to a tuple use in :meth:`~argparse.ArgumentParser.add_argument` calls on the parser
'''
field_list = [
"action", "nargs", "const", "default", "type",
"choices", "required", "help", "metavar", "dest"
]
return (
self.name,
{
field: getattr(self, field) for field in field_list if getattr(self, field) is not None
}
) | [
"def",
"convert_to_argument",
"(",
"self",
")",
":",
"field_list",
"=",
"[",
"\"action\"",
",",
"\"nargs\"",
",",
"\"const\"",
",",
"\"default\"",
",",
"\"type\"",
",",
"\"choices\"",
",",
"\"required\"",
",",
"\"help\"",
",",
"\"metavar\"",
",",
"\"dest\"",
"]",
"return",
"(",
"self",
".",
"name",
",",
"{",
"field",
":",
"getattr",
"(",
"self",
",",
"field",
")",
"for",
"field",
"in",
"field_list",
"if",
"getattr",
"(",
"self",
",",
"field",
")",
"is",
"not",
"None",
"}",
")"
] | Convert the Argument object to a tuple use in :meth:`~argparse.ArgumentParser.add_argument` calls on the parser | [
"Convert",
"the",
"Argument",
"object",
"to",
"a",
"tuple",
"use",
"in",
":",
"meth",
":",
"~argparse",
".",
"ArgumentParser",
".",
"add_argument",
"calls",
"on",
"the",
"parser"
] | train | https://github.com/Varkal/chuda/blob/0d93b716dede35231c21be97bcc19a656655983f/chuda/arguments.py#L52-L67 |
Varkal/chuda | chuda/arguments.py | Option.get_default_name | def get_default_name(self):
'''
Return the default generated name to store value on the parser for this option.
eg. An option *['-s', '--use-ssl']* will generate the *use_ssl* name
Returns:
str: the default name of the option
'''
long_names = [name for name in self.name if name.startswith("--")]
short_names = [name for name in self.name if not name.startswith("--")]
if long_names:
return to_snake_case(long_names[0].lstrip("-"))
return to_snake_case(short_names[0].lstrip("-")) | python | def get_default_name(self):
'''
Return the default generated name to store value on the parser for this option.
eg. An option *['-s', '--use-ssl']* will generate the *use_ssl* name
Returns:
str: the default name of the option
'''
long_names = [name for name in self.name if name.startswith("--")]
short_names = [name for name in self.name if not name.startswith("--")]
if long_names:
return to_snake_case(long_names[0].lstrip("-"))
return to_snake_case(short_names[0].lstrip("-")) | [
"def",
"get_default_name",
"(",
"self",
")",
":",
"long_names",
"=",
"[",
"name",
"for",
"name",
"in",
"self",
".",
"name",
"if",
"name",
".",
"startswith",
"(",
"\"--\"",
")",
"]",
"short_names",
"=",
"[",
"name",
"for",
"name",
"in",
"self",
".",
"name",
"if",
"not",
"name",
".",
"startswith",
"(",
"\"--\"",
")",
"]",
"if",
"long_names",
":",
"return",
"to_snake_case",
"(",
"long_names",
"[",
"0",
"]",
".",
"lstrip",
"(",
"\"-\"",
")",
")",
"return",
"to_snake_case",
"(",
"short_names",
"[",
"0",
"]",
".",
"lstrip",
"(",
"\"-\"",
")",
")"
] | Return the default generated name to store value on the parser for this option.
eg. An option *['-s', '--use-ssl']* will generate the *use_ssl* name
Returns:
str: the default name of the option | [
"Return",
"the",
"default",
"generated",
"name",
"to",
"store",
"value",
"on",
"the",
"parser",
"for",
"this",
"option",
"."
] | train | https://github.com/Varkal/chuda/blob/0d93b716dede35231c21be97bcc19a656655983f/chuda/arguments.py#L84-L99 |
gpiantoni/bidso | bidso/files.py | file_Core.get_filename | def get_filename(self, base_dir=None, modality=None):
"""Construct filename based on the attributes.
Parameters
----------
base_dir : Path
path of the root directory. If specified, the return value is a Path,
with base_dir / sub-XXX / (ses-XXX /) modality / filename
otherwise the return value is a string.
modality : str
overwrite value for modality (i.e. the directory inside subject/session).
This is necessary because sometimes the modality attribute is ambiguous.
Returns
-------
str or Path
str of the filename if base_dir is not specified, otherwise the full
Path
"""
filename = 'sub-' + self.subject
if self.session is not None:
filename += '_ses-' + self.session
if self.task is not None:
filename += '_task-' + self.task
if self.run is not None and self.direction is None:
filename += '_run-' + self.run
if self.acquisition is not None:
filename += '_acq-' + self.acquisition
if self.direction is not None:
filename += '_dir-' + self.direction
if self.run is not None and self.direction is not None:
filename += '_run-' + self.run
if self.modality is not None:
filename += '_' + self.modality
if self.extension is not None:
filename += self.extension
if base_dir is None:
return filename
else:
dir_name = base_dir / ('sub-' + self.subject)
if self.session is not None:
dir_name /= 'ses-' + self.session
if modality is not None:
dir_name /= modality
else:
dir_name = add_modality(dir_name, self.modality)
return dir_name / filename | python | def get_filename(self, base_dir=None, modality=None):
"""Construct filename based on the attributes.
Parameters
----------
base_dir : Path
path of the root directory. If specified, the return value is a Path,
with base_dir / sub-XXX / (ses-XXX /) modality / filename
otherwise the return value is a string.
modality : str
overwrite value for modality (i.e. the directory inside subject/session).
This is necessary because sometimes the modality attribute is ambiguous.
Returns
-------
str or Path
str of the filename if base_dir is not specified, otherwise the full
Path
"""
filename = 'sub-' + self.subject
if self.session is not None:
filename += '_ses-' + self.session
if self.task is not None:
filename += '_task-' + self.task
if self.run is not None and self.direction is None:
filename += '_run-' + self.run
if self.acquisition is not None:
filename += '_acq-' + self.acquisition
if self.direction is not None:
filename += '_dir-' + self.direction
if self.run is not None and self.direction is not None:
filename += '_run-' + self.run
if self.modality is not None:
filename += '_' + self.modality
if self.extension is not None:
filename += self.extension
if base_dir is None:
return filename
else:
dir_name = base_dir / ('sub-' + self.subject)
if self.session is not None:
dir_name /= 'ses-' + self.session
if modality is not None:
dir_name /= modality
else:
dir_name = add_modality(dir_name, self.modality)
return dir_name / filename | [
"def",
"get_filename",
"(",
"self",
",",
"base_dir",
"=",
"None",
",",
"modality",
"=",
"None",
")",
":",
"filename",
"=",
"'sub-'",
"+",
"self",
".",
"subject",
"if",
"self",
".",
"session",
"is",
"not",
"None",
":",
"filename",
"+=",
"'_ses-'",
"+",
"self",
".",
"session",
"if",
"self",
".",
"task",
"is",
"not",
"None",
":",
"filename",
"+=",
"'_task-'",
"+",
"self",
".",
"task",
"if",
"self",
".",
"run",
"is",
"not",
"None",
"and",
"self",
".",
"direction",
"is",
"None",
":",
"filename",
"+=",
"'_run-'",
"+",
"self",
".",
"run",
"if",
"self",
".",
"acquisition",
"is",
"not",
"None",
":",
"filename",
"+=",
"'_acq-'",
"+",
"self",
".",
"acquisition",
"if",
"self",
".",
"direction",
"is",
"not",
"None",
":",
"filename",
"+=",
"'_dir-'",
"+",
"self",
".",
"direction",
"if",
"self",
".",
"run",
"is",
"not",
"None",
"and",
"self",
".",
"direction",
"is",
"not",
"None",
":",
"filename",
"+=",
"'_run-'",
"+",
"self",
".",
"run",
"if",
"self",
".",
"modality",
"is",
"not",
"None",
":",
"filename",
"+=",
"'_'",
"+",
"self",
".",
"modality",
"if",
"self",
".",
"extension",
"is",
"not",
"None",
":",
"filename",
"+=",
"self",
".",
"extension",
"if",
"base_dir",
"is",
"None",
":",
"return",
"filename",
"else",
":",
"dir_name",
"=",
"base_dir",
"/",
"(",
"'sub-'",
"+",
"self",
".",
"subject",
")",
"if",
"self",
".",
"session",
"is",
"not",
"None",
":",
"dir_name",
"/=",
"'ses-'",
"+",
"self",
".",
"session",
"if",
"modality",
"is",
"not",
"None",
":",
"dir_name",
"/=",
"modality",
"else",
":",
"dir_name",
"=",
"add_modality",
"(",
"dir_name",
",",
"self",
".",
"modality",
")",
"return",
"dir_name",
"/",
"filename"
] | Construct filename based on the attributes.
Parameters
----------
base_dir : Path
path of the root directory. If specified, the return value is a Path,
with base_dir / sub-XXX / (ses-XXX /) modality / filename
otherwise the return value is a string.
modality : str
overwrite value for modality (i.e. the directory inside subject/session).
This is necessary because sometimes the modality attribute is ambiguous.
Returns
-------
str or Path
str of the filename if base_dir is not specified, otherwise the full
Path | [
"Construct",
"filename",
"based",
"on",
"the",
"attributes",
"."
] | train | https://github.com/gpiantoni/bidso/blob/af163b921ec4e3d70802de07f174de184491cfce/bidso/files.py#L37-L87 |
gpiantoni/bidso | bidso/files.py | file_Tsv.get | def get(self, filter_lambda=None, map_lambda=None):
"""Select elements of the TSV, using python filter and map.
Parameters
----------
filter_lambda : function
function to filter the tsv rows (the function needs to return True/False)
map_lambda : function
function to select the tsv columns
Returns
-------
list
list (not a generator, because that's the most common case)
Examples
--------
To select all the channels in one list, called "good_labels"::
>>> file_Tsv.get(lambda x: x['name'] in good_labels)
To select all the names of the channels:
>>> file_Tsv.get(map_filter=lambda x: x['name'])
"""
if filter_lambda is None:
filter_lambda = lambda x: True
if map_lambda is None:
map_lambda = lambda x: x
return list(map(map_lambda, filter(filter_lambda, self.tsv))) | python | def get(self, filter_lambda=None, map_lambda=None):
"""Select elements of the TSV, using python filter and map.
Parameters
----------
filter_lambda : function
function to filter the tsv rows (the function needs to return True/False)
map_lambda : function
function to select the tsv columns
Returns
-------
list
list (not a generator, because that's the most common case)
Examples
--------
To select all the channels in one list, called "good_labels"::
>>> file_Tsv.get(lambda x: x['name'] in good_labels)
To select all the names of the channels:
>>> file_Tsv.get(map_filter=lambda x: x['name'])
"""
if filter_lambda is None:
filter_lambda = lambda x: True
if map_lambda is None:
map_lambda = lambda x: x
return list(map(map_lambda, filter(filter_lambda, self.tsv))) | [
"def",
"get",
"(",
"self",
",",
"filter_lambda",
"=",
"None",
",",
"map_lambda",
"=",
"None",
")",
":",
"if",
"filter_lambda",
"is",
"None",
":",
"filter_lambda",
"=",
"lambda",
"x",
":",
"True",
"if",
"map_lambda",
"is",
"None",
":",
"map_lambda",
"=",
"lambda",
"x",
":",
"x",
"return",
"list",
"(",
"map",
"(",
"map_lambda",
",",
"filter",
"(",
"filter_lambda",
",",
"self",
".",
"tsv",
")",
")",
")"
] | Select elements of the TSV, using python filter and map.
Parameters
----------
filter_lambda : function
function to filter the tsv rows (the function needs to return True/False)
map_lambda : function
function to select the tsv columns
Returns
-------
list
list (not a generator, because that's the most common case)
Examples
--------
To select all the channels in one list, called "good_labels"::
>>> file_Tsv.get(lambda x: x['name'] in good_labels)
To select all the names of the channels:
>>> file_Tsv.get(map_filter=lambda x: x['name']) | [
"Select",
"elements",
"of",
"the",
"TSV",
"using",
"python",
"filter",
"and",
"map",
"."
] | train | https://github.com/gpiantoni/bidso/blob/af163b921ec4e3d70802de07f174de184491cfce/bidso/files.py#L95-L125 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.connect | def connect(self, client_id: str = None, client_secret: str = None) -> dict:
"""Authenticate application and get token bearer.
Isogeo API uses oAuth 2.0 protocol (https://tools.ietf.org/html/rfc6749)
see: http://help.isogeo.com/api/fr/authentication/groupsapps.html
:param str client_id: application oAuth2 identifier
:param str client_secret: application oAuth2 secret
"""
# instanciated or direct call
if not client_id and not client_secret:
client_id = self.client_id
client_secret = self.client_secret
else:
pass
# Basic Authentication header in Base64 (https://en.wikipedia.org/wiki/Base64)
# see: http://tools.ietf.org/html/rfc2617#section-2
# using Client Credentials Grant method
# see: http://tools.ietf.org/html/rfc6749#section-4.4
payload = {"grant_type": "client_credentials"}
head = {"user-agent": self.app_name}
# passing request to get a 24h bearer
# see: http://tools.ietf.org/html/rfc6750#section-2
id_url = "https://id.{}.isogeo.com/oauth/token".format(self.api_url)
try:
conn = self.post(
id_url,
auth=(client_id, client_secret),
headers=head,
data=payload,
proxies=self.proxies,
verify=self.ssl,
)
except ConnectionError as e:
raise ConnectionError("Connection to Isogeo ID" "failed: {}".format(e))
# just a fast check
check_params = checker.check_api_response(conn)
if check_params == 1:
pass
elif isinstance(check_params, tuple) and len(check_params) == 2:
raise ValueError(2, check_params)
# getting access
self.token = conn.json()
# add expiration date - calculating with a prevention of 10%
expiration_delay = self.token.get("expires_in", 3600) - (
self.token.get("expires_in", 3600) / 10
)
self.token["expires_at"] = datetime.utcnow() + timedelta(
seconds=expiration_delay
)
# end of method
return self.token | python | def connect(self, client_id: str = None, client_secret: str = None) -> dict:
"""Authenticate application and get token bearer.
Isogeo API uses oAuth 2.0 protocol (https://tools.ietf.org/html/rfc6749)
see: http://help.isogeo.com/api/fr/authentication/groupsapps.html
:param str client_id: application oAuth2 identifier
:param str client_secret: application oAuth2 secret
"""
# instanciated or direct call
if not client_id and not client_secret:
client_id = self.client_id
client_secret = self.client_secret
else:
pass
# Basic Authentication header in Base64 (https://en.wikipedia.org/wiki/Base64)
# see: http://tools.ietf.org/html/rfc2617#section-2
# using Client Credentials Grant method
# see: http://tools.ietf.org/html/rfc6749#section-4.4
payload = {"grant_type": "client_credentials"}
head = {"user-agent": self.app_name}
# passing request to get a 24h bearer
# see: http://tools.ietf.org/html/rfc6750#section-2
id_url = "https://id.{}.isogeo.com/oauth/token".format(self.api_url)
try:
conn = self.post(
id_url,
auth=(client_id, client_secret),
headers=head,
data=payload,
proxies=self.proxies,
verify=self.ssl,
)
except ConnectionError as e:
raise ConnectionError("Connection to Isogeo ID" "failed: {}".format(e))
# just a fast check
check_params = checker.check_api_response(conn)
if check_params == 1:
pass
elif isinstance(check_params, tuple) and len(check_params) == 2:
raise ValueError(2, check_params)
# getting access
self.token = conn.json()
# add expiration date - calculating with a prevention of 10%
expiration_delay = self.token.get("expires_in", 3600) - (
self.token.get("expires_in", 3600) / 10
)
self.token["expires_at"] = datetime.utcnow() + timedelta(
seconds=expiration_delay
)
# end of method
return self.token | [
"def",
"connect",
"(",
"self",
",",
"client_id",
":",
"str",
"=",
"None",
",",
"client_secret",
":",
"str",
"=",
"None",
")",
"->",
"dict",
":",
"# instanciated or direct call",
"if",
"not",
"client_id",
"and",
"not",
"client_secret",
":",
"client_id",
"=",
"self",
".",
"client_id",
"client_secret",
"=",
"self",
".",
"client_secret",
"else",
":",
"pass",
"# Basic Authentication header in Base64 (https://en.wikipedia.org/wiki/Base64)",
"# see: http://tools.ietf.org/html/rfc2617#section-2",
"# using Client Credentials Grant method",
"# see: http://tools.ietf.org/html/rfc6749#section-4.4",
"payload",
"=",
"{",
"\"grant_type\"",
":",
"\"client_credentials\"",
"}",
"head",
"=",
"{",
"\"user-agent\"",
":",
"self",
".",
"app_name",
"}",
"# passing request to get a 24h bearer",
"# see: http://tools.ietf.org/html/rfc6750#section-2",
"id_url",
"=",
"\"https://id.{}.isogeo.com/oauth/token\"",
".",
"format",
"(",
"self",
".",
"api_url",
")",
"try",
":",
"conn",
"=",
"self",
".",
"post",
"(",
"id_url",
",",
"auth",
"=",
"(",
"client_id",
",",
"client_secret",
")",
",",
"headers",
"=",
"head",
",",
"data",
"=",
"payload",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
",",
")",
"except",
"ConnectionError",
"as",
"e",
":",
"raise",
"ConnectionError",
"(",
"\"Connection to Isogeo ID\"",
"\"failed: {}\"",
".",
"format",
"(",
"e",
")",
")",
"# just a fast check",
"check_params",
"=",
"checker",
".",
"check_api_response",
"(",
"conn",
")",
"if",
"check_params",
"==",
"1",
":",
"pass",
"elif",
"isinstance",
"(",
"check_params",
",",
"tuple",
")",
"and",
"len",
"(",
"check_params",
")",
"==",
"2",
":",
"raise",
"ValueError",
"(",
"2",
",",
"check_params",
")",
"# getting access",
"self",
".",
"token",
"=",
"conn",
".",
"json",
"(",
")",
"# add expiration date - calculating with a prevention of 10%",
"expiration_delay",
"=",
"self",
".",
"token",
".",
"get",
"(",
"\"expires_in\"",
",",
"3600",
")",
"-",
"(",
"self",
".",
"token",
".",
"get",
"(",
"\"expires_in\"",
",",
"3600",
")",
"/",
"10",
")",
"self",
".",
"token",
"[",
"\"expires_at\"",
"]",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"+",
"timedelta",
"(",
"seconds",
"=",
"expiration_delay",
")",
"# end of method",
"return",
"self",
".",
"token"
] | Authenticate application and get token bearer.
Isogeo API uses oAuth 2.0 protocol (https://tools.ietf.org/html/rfc6749)
see: http://help.isogeo.com/api/fr/authentication/groupsapps.html
:param str client_id: application oAuth2 identifier
:param str client_secret: application oAuth2 secret | [
"Authenticate",
"application",
"and",
"get",
"token",
"bearer",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L177-L234 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo._check_bearer_validity | def _check_bearer_validity(decorated_func):
"""Check API Bearer token validity.
Isogeo ID delivers authentication bearers which are valid during
a certain time. So this decorator checks the validity of the token
comparing with actual datetime (UTC) and renews it if necessary.
See: http://tools.ietf.org/html/rfc6750#section-2
:param decorated_func token: original function to execute after check
"""
@wraps(decorated_func)
def wrapper(self, *args, **kwargs):
# compare token expiration date and ask for a new one if it's expired
if datetime.now() < self.token.get("expires_at"):
self.connect()
logging.debug("Token was about to expire, so has been renewed.")
else:
logging.debug("Token is still valid.")
pass
# let continue running the original function
return decorated_func(self, *args, **kwargs)
return wrapper | python | def _check_bearer_validity(decorated_func):
"""Check API Bearer token validity.
Isogeo ID delivers authentication bearers which are valid during
a certain time. So this decorator checks the validity of the token
comparing with actual datetime (UTC) and renews it if necessary.
See: http://tools.ietf.org/html/rfc6750#section-2
:param decorated_func token: original function to execute after check
"""
@wraps(decorated_func)
def wrapper(self, *args, **kwargs):
# compare token expiration date and ask for a new one if it's expired
if datetime.now() < self.token.get("expires_at"):
self.connect()
logging.debug("Token was about to expire, so has been renewed.")
else:
logging.debug("Token is still valid.")
pass
# let continue running the original function
return decorated_func(self, *args, **kwargs)
return wrapper | [
"def",
"_check_bearer_validity",
"(",
"decorated_func",
")",
":",
"@",
"wraps",
"(",
"decorated_func",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# compare token expiration date and ask for a new one if it's expired",
"if",
"datetime",
".",
"now",
"(",
")",
"<",
"self",
".",
"token",
".",
"get",
"(",
"\"expires_at\"",
")",
":",
"self",
".",
"connect",
"(",
")",
"logging",
".",
"debug",
"(",
"\"Token was about to expire, so has been renewed.\"",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"\"Token is still valid.\"",
")",
"pass",
"# let continue running the original function",
"return",
"decorated_func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | Check API Bearer token validity.
Isogeo ID delivers authentication bearers which are valid during
a certain time. So this decorator checks the validity of the token
comparing with actual datetime (UTC) and renews it if necessary.
See: http://tools.ietf.org/html/rfc6750#section-2
:param decorated_func token: original function to execute after check | [
"Check",
"API",
"Bearer",
"token",
"validity",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L246-L270 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.search | def search(
self,
token: dict = None,
query: str = "",
bbox: list = None,
poly: str = None,
georel: str = None,
order_by: str = "_created",
order_dir: str = "desc",
page_size: int = 100,
offset: int = 0,
share: str = None,
specific_md: list = [],
include: list = [],
whole_share: bool = True,
check: bool = True,
augment: bool = False,
tags_as_dicts: bool = False,
prot: str = "https",
) -> dict:
"""Search within the resources shared to the application.
It's the main method to use.
:param str token: API auth token - DEPRECATED: token is now automatically included
:param str query: search terms and semantic filters. Equivalent of
**q** parameter in Isogeo API. It could be a simple
string like *oil* or a tag like *keyword:isogeo:formations*
or *keyword:inspire-theme:landcover*. The *AND* operator
is applied when various tags are passed.
:param list bbox: Bounding box to limit the search.
Must be a 4 list of coordinates in WGS84 (EPSG 4326).
Could be associated with *georel*.
:param str poly: Geographic criteria for the search, in WKT format.
Could be associated with *georel*.
:param str georel: geometric operator to apply to the bbox or poly
parameters.
Available values (see: *isogeo.GEORELATIONS*):
* 'contains',
* 'disjoint',
* 'equals',
* 'intersects' - [APPLIED BY API if NOT SPECIFIED]
* 'overlaps',
* 'within'.
:param str order_by: sorting results.
Available values:
* '_created': metadata creation date [DEFAULT if relevance is null]
* '_modified': metadata last update
* 'title': metadata title
* 'created': data creation date (possibly None)
* 'modified': data last update date
* 'relevance': relevance score calculated by API [DEFAULT].
:param str order_dir: sorting direction.
Available values:
* 'desc': descending
* 'asc': ascending
:param int page_size: limits the number of results.
Useful to paginate results display. Default value: 100.
:param int offset: offset to start page size
from a specific results index
:param str share: share UUID to filter on
:param list specific_md: list of metadata UUIDs to filter on
:param list include: subresources that should be returned.
Must be a list of strings. Available values: *isogeo.SUBRESOURCES*
:param bool whole_share: option to return all results or only the
page size. *True* by DEFAULT.
:param bool check: option to check query parameters and avoid erros.
*True* by DEFAULT.
:param bool augment: option to improve API response by adding
some tags on the fly (like shares_id)
:param bool tags_as_dicts: option to store tags as key/values by filter.
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# specific resources specific parsing
specific_md = checker._check_filter_specific_md(specific_md)
# sub resources specific parsing
include = checker._check_filter_includes(include)
# handling request parameters
payload = {
"_id": specific_md,
"_include": include,
"_lang": self.lang,
"_limit": page_size,
"_offset": offset,
"box": bbox,
"geo": poly,
"rel": georel,
"ob": order_by,
"od": order_dir,
"q": query,
"s": share,
}
if check:
checker.check_request_parameters(payload)
else:
pass
# search request
search_url = "{}://v1.{}.isogeo.com/resources/search".format(prot, self.api_url)
try:
search_req = self.get(
search_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
except Exception as e:
logging.error(e)
raise Exception
# fast response check
checker.check_api_response(search_req)
# serializing result into dict and storing resources in variables
search_rez = search_req.json()
resources_count = search_rez.get("total") # total of metadatas shared
# handling Isogeo API pagination
# see: http://help.isogeo.com/api/fr/methods/pagination.html
if resources_count > page_size and whole_share:
# if API returned more than one page of results, let's get the rest!
metadatas = [] # a recipient list
payload["_limit"] = 100 # now it'll get pages of 100 resources
# let's parse pages
for idx in range(0, int(ceil(resources_count / 100)) + 1):
payload["_offset"] = idx * 100
search_req = self.get(
search_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# storing results by addition
metadatas.extend(search_req.json().get("results"))
search_rez["results"] = metadatas
else:
pass
# add shares to tags and query
if augment:
self.add_tags_shares(search_rez.get("tags"))
if share:
search_rez.get("query")["_shares"] = [share]
else:
search_rez.get("query")["_shares"] = []
else:
pass
# store tags in dicts
if tags_as_dicts:
new_tags = utils.tags_to_dict(
tags=search_rez.get("tags"), prev_query=search_rez.get("query")
)
# clear
search_rez.get("tags").clear()
search_rez.get("query").clear()
# update
search_rez.get("tags").update(new_tags[0])
search_rez.get("query").update(new_tags[1])
else:
pass
# end of method
return search_rez | python | def search(
self,
token: dict = None,
query: str = "",
bbox: list = None,
poly: str = None,
georel: str = None,
order_by: str = "_created",
order_dir: str = "desc",
page_size: int = 100,
offset: int = 0,
share: str = None,
specific_md: list = [],
include: list = [],
whole_share: bool = True,
check: bool = True,
augment: bool = False,
tags_as_dicts: bool = False,
prot: str = "https",
) -> dict:
"""Search within the resources shared to the application.
It's the main method to use.
:param str token: API auth token - DEPRECATED: token is now automatically included
:param str query: search terms and semantic filters. Equivalent of
**q** parameter in Isogeo API. It could be a simple
string like *oil* or a tag like *keyword:isogeo:formations*
or *keyword:inspire-theme:landcover*. The *AND* operator
is applied when various tags are passed.
:param list bbox: Bounding box to limit the search.
Must be a 4 list of coordinates in WGS84 (EPSG 4326).
Could be associated with *georel*.
:param str poly: Geographic criteria for the search, in WKT format.
Could be associated with *georel*.
:param str georel: geometric operator to apply to the bbox or poly
parameters.
Available values (see: *isogeo.GEORELATIONS*):
* 'contains',
* 'disjoint',
* 'equals',
* 'intersects' - [APPLIED BY API if NOT SPECIFIED]
* 'overlaps',
* 'within'.
:param str order_by: sorting results.
Available values:
* '_created': metadata creation date [DEFAULT if relevance is null]
* '_modified': metadata last update
* 'title': metadata title
* 'created': data creation date (possibly None)
* 'modified': data last update date
* 'relevance': relevance score calculated by API [DEFAULT].
:param str order_dir: sorting direction.
Available values:
* 'desc': descending
* 'asc': ascending
:param int page_size: limits the number of results.
Useful to paginate results display. Default value: 100.
:param int offset: offset to start page size
from a specific results index
:param str share: share UUID to filter on
:param list specific_md: list of metadata UUIDs to filter on
:param list include: subresources that should be returned.
Must be a list of strings. Available values: *isogeo.SUBRESOURCES*
:param bool whole_share: option to return all results or only the
page size. *True* by DEFAULT.
:param bool check: option to check query parameters and avoid erros.
*True* by DEFAULT.
:param bool augment: option to improve API response by adding
some tags on the fly (like shares_id)
:param bool tags_as_dicts: option to store tags as key/values by filter.
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# specific resources specific parsing
specific_md = checker._check_filter_specific_md(specific_md)
# sub resources specific parsing
include = checker._check_filter_includes(include)
# handling request parameters
payload = {
"_id": specific_md,
"_include": include,
"_lang": self.lang,
"_limit": page_size,
"_offset": offset,
"box": bbox,
"geo": poly,
"rel": georel,
"ob": order_by,
"od": order_dir,
"q": query,
"s": share,
}
if check:
checker.check_request_parameters(payload)
else:
pass
# search request
search_url = "{}://v1.{}.isogeo.com/resources/search".format(prot, self.api_url)
try:
search_req = self.get(
search_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
except Exception as e:
logging.error(e)
raise Exception
# fast response check
checker.check_api_response(search_req)
# serializing result into dict and storing resources in variables
search_rez = search_req.json()
resources_count = search_rez.get("total") # total of metadatas shared
# handling Isogeo API pagination
# see: http://help.isogeo.com/api/fr/methods/pagination.html
if resources_count > page_size and whole_share:
# if API returned more than one page of results, let's get the rest!
metadatas = [] # a recipient list
payload["_limit"] = 100 # now it'll get pages of 100 resources
# let's parse pages
for idx in range(0, int(ceil(resources_count / 100)) + 1):
payload["_offset"] = idx * 100
search_req = self.get(
search_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# storing results by addition
metadatas.extend(search_req.json().get("results"))
search_rez["results"] = metadatas
else:
pass
# add shares to tags and query
if augment:
self.add_tags_shares(search_rez.get("tags"))
if share:
search_rez.get("query")["_shares"] = [share]
else:
search_rez.get("query")["_shares"] = []
else:
pass
# store tags in dicts
if tags_as_dicts:
new_tags = utils.tags_to_dict(
tags=search_rez.get("tags"), prev_query=search_rez.get("query")
)
# clear
search_rez.get("tags").clear()
search_rez.get("query").clear()
# update
search_rez.get("tags").update(new_tags[0])
search_rez.get("query").update(new_tags[1])
else:
pass
# end of method
return search_rez | [
"def",
"search",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"query",
":",
"str",
"=",
"\"\"",
",",
"bbox",
":",
"list",
"=",
"None",
",",
"poly",
":",
"str",
"=",
"None",
",",
"georel",
":",
"str",
"=",
"None",
",",
"order_by",
":",
"str",
"=",
"\"_created\"",
",",
"order_dir",
":",
"str",
"=",
"\"desc\"",
",",
"page_size",
":",
"int",
"=",
"100",
",",
"offset",
":",
"int",
"=",
"0",
",",
"share",
":",
"str",
"=",
"None",
",",
"specific_md",
":",
"list",
"=",
"[",
"]",
",",
"include",
":",
"list",
"=",
"[",
"]",
",",
"whole_share",
":",
"bool",
"=",
"True",
",",
"check",
":",
"bool",
"=",
"True",
",",
"augment",
":",
"bool",
"=",
"False",
",",
"tags_as_dicts",
":",
"bool",
"=",
"False",
",",
"prot",
":",
"str",
"=",
"\"https\"",
",",
")",
"->",
"dict",
":",
"# specific resources specific parsing",
"specific_md",
"=",
"checker",
".",
"_check_filter_specific_md",
"(",
"specific_md",
")",
"# sub resources specific parsing",
"include",
"=",
"checker",
".",
"_check_filter_includes",
"(",
"include",
")",
"# handling request parameters",
"payload",
"=",
"{",
"\"_id\"",
":",
"specific_md",
",",
"\"_include\"",
":",
"include",
",",
"\"_lang\"",
":",
"self",
".",
"lang",
",",
"\"_limit\"",
":",
"page_size",
",",
"\"_offset\"",
":",
"offset",
",",
"\"box\"",
":",
"bbox",
",",
"\"geo\"",
":",
"poly",
",",
"\"rel\"",
":",
"georel",
",",
"\"ob\"",
":",
"order_by",
",",
"\"od\"",
":",
"order_dir",
",",
"\"q\"",
":",
"query",
",",
"\"s\"",
":",
"share",
",",
"}",
"if",
"check",
":",
"checker",
".",
"check_request_parameters",
"(",
"payload",
")",
"else",
":",
"pass",
"# search request",
"search_url",
"=",
"\"{}://v1.{}.isogeo.com/resources/search\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
")",
"try",
":",
"search_req",
"=",
"self",
".",
"get",
"(",
"search_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"params",
"=",
"payload",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
",",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"e",
")",
"raise",
"Exception",
"# fast response check",
"checker",
".",
"check_api_response",
"(",
"search_req",
")",
"# serializing result into dict and storing resources in variables",
"search_rez",
"=",
"search_req",
".",
"json",
"(",
")",
"resources_count",
"=",
"search_rez",
".",
"get",
"(",
"\"total\"",
")",
"# total of metadatas shared",
"# handling Isogeo API pagination",
"# see: http://help.isogeo.com/api/fr/methods/pagination.html",
"if",
"resources_count",
">",
"page_size",
"and",
"whole_share",
":",
"# if API returned more than one page of results, let's get the rest!",
"metadatas",
"=",
"[",
"]",
"# a recipient list",
"payload",
"[",
"\"_limit\"",
"]",
"=",
"100",
"# now it'll get pages of 100 resources",
"# let's parse pages",
"for",
"idx",
"in",
"range",
"(",
"0",
",",
"int",
"(",
"ceil",
"(",
"resources_count",
"/",
"100",
")",
")",
"+",
"1",
")",
":",
"payload",
"[",
"\"_offset\"",
"]",
"=",
"idx",
"*",
"100",
"search_req",
"=",
"self",
".",
"get",
"(",
"search_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"params",
"=",
"payload",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
",",
")",
"# storing results by addition",
"metadatas",
".",
"extend",
"(",
"search_req",
".",
"json",
"(",
")",
".",
"get",
"(",
"\"results\"",
")",
")",
"search_rez",
"[",
"\"results\"",
"]",
"=",
"metadatas",
"else",
":",
"pass",
"# add shares to tags and query",
"if",
"augment",
":",
"self",
".",
"add_tags_shares",
"(",
"search_rez",
".",
"get",
"(",
"\"tags\"",
")",
")",
"if",
"share",
":",
"search_rez",
".",
"get",
"(",
"\"query\"",
")",
"[",
"\"_shares\"",
"]",
"=",
"[",
"share",
"]",
"else",
":",
"search_rez",
".",
"get",
"(",
"\"query\"",
")",
"[",
"\"_shares\"",
"]",
"=",
"[",
"]",
"else",
":",
"pass",
"# store tags in dicts",
"if",
"tags_as_dicts",
":",
"new_tags",
"=",
"utils",
".",
"tags_to_dict",
"(",
"tags",
"=",
"search_rez",
".",
"get",
"(",
"\"tags\"",
")",
",",
"prev_query",
"=",
"search_rez",
".",
"get",
"(",
"\"query\"",
")",
")",
"# clear",
"search_rez",
".",
"get",
"(",
"\"tags\"",
")",
".",
"clear",
"(",
")",
"search_rez",
".",
"get",
"(",
"\"query\"",
")",
".",
"clear",
"(",
")",
"# update",
"search_rez",
".",
"get",
"(",
"\"tags\"",
")",
".",
"update",
"(",
"new_tags",
"[",
"0",
"]",
")",
"search_rez",
".",
"get",
"(",
"\"query\"",
")",
".",
"update",
"(",
"new_tags",
"[",
"1",
"]",
")",
"else",
":",
"pass",
"# end of method",
"return",
"search_rez"
] | Search within the resources shared to the application.
It's the main method to use.
:param str token: API auth token - DEPRECATED: token is now automatically included
:param str query: search terms and semantic filters. Equivalent of
**q** parameter in Isogeo API. It could be a simple
string like *oil* or a tag like *keyword:isogeo:formations*
or *keyword:inspire-theme:landcover*. The *AND* operator
is applied when various tags are passed.
:param list bbox: Bounding box to limit the search.
Must be a 4 list of coordinates in WGS84 (EPSG 4326).
Could be associated with *georel*.
:param str poly: Geographic criteria for the search, in WKT format.
Could be associated with *georel*.
:param str georel: geometric operator to apply to the bbox or poly
parameters.
Available values (see: *isogeo.GEORELATIONS*):
* 'contains',
* 'disjoint',
* 'equals',
* 'intersects' - [APPLIED BY API if NOT SPECIFIED]
* 'overlaps',
* 'within'.
:param str order_by: sorting results.
Available values:
* '_created': metadata creation date [DEFAULT if relevance is null]
* '_modified': metadata last update
* 'title': metadata title
* 'created': data creation date (possibly None)
* 'modified': data last update date
* 'relevance': relevance score calculated by API [DEFAULT].
:param str order_dir: sorting direction.
Available values:
* 'desc': descending
* 'asc': ascending
:param int page_size: limits the number of results.
Useful to paginate results display. Default value: 100.
:param int offset: offset to start page size
from a specific results index
:param str share: share UUID to filter on
:param list specific_md: list of metadata UUIDs to filter on
:param list include: subresources that should be returned.
Must be a list of strings. Available values: *isogeo.SUBRESOURCES*
:param bool whole_share: option to return all results or only the
page size. *True* by DEFAULT.
:param bool check: option to check query parameters and avoid erros.
*True* by DEFAULT.
:param bool augment: option to improve API response by adding
some tags on the fly (like shares_id)
:param bool tags_as_dicts: option to store tags as key/values by filter.
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Search",
"within",
"the",
"resources",
"shared",
"to",
"the",
"application",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L274-L450 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.resource | def resource(
self,
token: dict = None,
id_resource: str = None,
subresource=None,
include: list = [],
prot: str = "https",
) -> dict:
"""Get complete or partial metadata about one specific resource.
:param str token: API auth token
:param str id_resource: metadata UUID to get
:param list include: subresources that should be included.
Must be a list of strings. Available values: 'isogeo.SUBRESOURCES'
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# if subresource route
if isinstance(subresource, str):
subresource = "/{}".format(checker._check_subresource(subresource))
else:
subresource = ""
# _includes specific parsing
include = checker._check_filter_includes(include)
# handling request parameters
payload = {"id": id_resource, "_include": include}
# resource search
md_url = "{}://v1.{}.isogeo.com/resources/{}{}".format(
prot, self.api_url, id_resource, subresource
)
resource_req = self.get(
md_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
checker.check_api_response(resource_req)
# end of method
return resource_req.json() | python | def resource(
self,
token: dict = None,
id_resource: str = None,
subresource=None,
include: list = [],
prot: str = "https",
) -> dict:
"""Get complete or partial metadata about one specific resource.
:param str token: API auth token
:param str id_resource: metadata UUID to get
:param list include: subresources that should be included.
Must be a list of strings. Available values: 'isogeo.SUBRESOURCES'
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# if subresource route
if isinstance(subresource, str):
subresource = "/{}".format(checker._check_subresource(subresource))
else:
subresource = ""
# _includes specific parsing
include = checker._check_filter_includes(include)
# handling request parameters
payload = {"id": id_resource, "_include": include}
# resource search
md_url = "{}://v1.{}.isogeo.com/resources/{}{}".format(
prot, self.api_url, id_resource, subresource
)
resource_req = self.get(
md_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
checker.check_api_response(resource_req)
# end of method
return resource_req.json() | [
"def",
"resource",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"id_resource",
":",
"str",
"=",
"None",
",",
"subresource",
"=",
"None",
",",
"include",
":",
"list",
"=",
"[",
"]",
",",
"prot",
":",
"str",
"=",
"\"https\"",
",",
")",
"->",
"dict",
":",
"# if subresource route",
"if",
"isinstance",
"(",
"subresource",
",",
"str",
")",
":",
"subresource",
"=",
"\"/{}\"",
".",
"format",
"(",
"checker",
".",
"_check_subresource",
"(",
"subresource",
")",
")",
"else",
":",
"subresource",
"=",
"\"\"",
"# _includes specific parsing",
"include",
"=",
"checker",
".",
"_check_filter_includes",
"(",
"include",
")",
"# handling request parameters",
"payload",
"=",
"{",
"\"id\"",
":",
"id_resource",
",",
"\"_include\"",
":",
"include",
"}",
"# resource search",
"md_url",
"=",
"\"{}://v1.{}.isogeo.com/resources/{}{}\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
",",
"id_resource",
",",
"subresource",
")",
"resource_req",
"=",
"self",
".",
"get",
"(",
"md_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"params",
"=",
"payload",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
",",
")",
"checker",
".",
"check_api_response",
"(",
"resource_req",
")",
"# end of method",
"return",
"resource_req",
".",
"json",
"(",
")"
] | Get complete or partial metadata about one specific resource.
:param str token: API auth token
:param str id_resource: metadata UUID to get
:param list include: subresources that should be included.
Must be a list of strings. Available values: 'isogeo.SUBRESOURCES'
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Get",
"complete",
"or",
"partial",
"metadata",
"about",
"one",
"specific",
"resource",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L453-L494 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.shares | def shares(self, token: dict = None, prot: str = "https") -> dict:
"""Get information about shares which feed the application.
:param str token: API auth token
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# passing auth parameter
shares_url = "{}://v1.{}.isogeo.com/shares/".format(prot, self.api_url)
shares_req = self.get(
shares_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(shares_req)
# end of method
return shares_req.json() | python | def shares(self, token: dict = None, prot: str = "https") -> dict:
"""Get information about shares which feed the application.
:param str token: API auth token
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# passing auth parameter
shares_url = "{}://v1.{}.isogeo.com/shares/".format(prot, self.api_url)
shares_req = self.get(
shares_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(shares_req)
# end of method
return shares_req.json() | [
"def",
"shares",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"prot",
":",
"str",
"=",
"\"https\"",
")",
"->",
"dict",
":",
"# passing auth parameter",
"shares_url",
"=",
"\"{}://v1.{}.isogeo.com/shares/\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
")",
"shares_req",
"=",
"self",
".",
"get",
"(",
"shares_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
")",
"# checking response",
"checker",
".",
"check_api_response",
"(",
"shares_req",
")",
"# end of method",
"return",
"shares_req",
".",
"json",
"(",
")"
] | Get information about shares which feed the application.
:param str token: API auth token
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Get",
"information",
"about",
"shares",
"which",
"feed",
"the",
"application",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L498-L515 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.share | def share(
self,
share_id: str,
token: dict = None,
augment: bool = False,
prot: str = "https",
) -> dict:
"""Get information about a specific share and its applications.
:param str token: API auth token
:param str share_id: share UUID
:param bool augment: option to improve API response by adding
some tags on the fly.
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# passing auth parameter
share_url = "{}://v1.{}.isogeo.com/shares/{}".format(
prot, self.api_url, share_id
)
share_req = self.get(
share_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(share_req)
# enhance share model
share = share_req.json()
if augment:
share = utils.share_extender(
share, self.search(whole_share=1, share=share_id).get("results")
)
else:
pass
# end of method
return share | python | def share(
self,
share_id: str,
token: dict = None,
augment: bool = False,
prot: str = "https",
) -> dict:
"""Get information about a specific share and its applications.
:param str token: API auth token
:param str share_id: share UUID
:param bool augment: option to improve API response by adding
some tags on the fly.
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# passing auth parameter
share_url = "{}://v1.{}.isogeo.com/shares/{}".format(
prot, self.api_url, share_id
)
share_req = self.get(
share_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(share_req)
# enhance share model
share = share_req.json()
if augment:
share = utils.share_extender(
share, self.search(whole_share=1, share=share_id).get("results")
)
else:
pass
# end of method
return share | [
"def",
"share",
"(",
"self",
",",
"share_id",
":",
"str",
",",
"token",
":",
"dict",
"=",
"None",
",",
"augment",
":",
"bool",
"=",
"False",
",",
"prot",
":",
"str",
"=",
"\"https\"",
",",
")",
"->",
"dict",
":",
"# passing auth parameter",
"share_url",
"=",
"\"{}://v1.{}.isogeo.com/shares/{}\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
",",
"share_id",
")",
"share_req",
"=",
"self",
".",
"get",
"(",
"share_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
")",
"# checking response",
"checker",
".",
"check_api_response",
"(",
"share_req",
")",
"# enhance share model",
"share",
"=",
"share_req",
".",
"json",
"(",
")",
"if",
"augment",
":",
"share",
"=",
"utils",
".",
"share_extender",
"(",
"share",
",",
"self",
".",
"search",
"(",
"whole_share",
"=",
"1",
",",
"share",
"=",
"share_id",
")",
".",
"get",
"(",
"\"results\"",
")",
")",
"else",
":",
"pass",
"# end of method",
"return",
"share"
] | Get information about a specific share and its applications.
:param str token: API auth token
:param str share_id: share UUID
:param bool augment: option to improve API response by adding
some tags on the fly.
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Get",
"information",
"about",
"a",
"specific",
"share",
"and",
"its",
"applications",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L518-L555 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.licenses | def licenses(
self, token: dict = None, owner_id: str = None, prot: str = "https"
) -> dict:
"""Get information about licenses owned by a specific workgroup.
:param str token: API auth token
:param str owner_id: workgroup UUID
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# handling request parameters
payload = {"gid": owner_id}
# search request
licenses_url = "{}://v1.{}.isogeo.com/groups/{}/licenses".format(
prot, self.api_url, owner_id
)
licenses_req = self.get(
licenses_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# checking response
req_check = checker.check_api_response(licenses_req)
if isinstance(req_check, tuple):
return req_check
# end of method
return licenses_req.json() | python | def licenses(
self, token: dict = None, owner_id: str = None, prot: str = "https"
) -> dict:
"""Get information about licenses owned by a specific workgroup.
:param str token: API auth token
:param str owner_id: workgroup UUID
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# handling request parameters
payload = {"gid": owner_id}
# search request
licenses_url = "{}://v1.{}.isogeo.com/groups/{}/licenses".format(
prot, self.api_url, owner_id
)
licenses_req = self.get(
licenses_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# checking response
req_check = checker.check_api_response(licenses_req)
if isinstance(req_check, tuple):
return req_check
# end of method
return licenses_req.json() | [
"def",
"licenses",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"owner_id",
":",
"str",
"=",
"None",
",",
"prot",
":",
"str",
"=",
"\"https\"",
")",
"->",
"dict",
":",
"# handling request parameters",
"payload",
"=",
"{",
"\"gid\"",
":",
"owner_id",
"}",
"# search request",
"licenses_url",
"=",
"\"{}://v1.{}.isogeo.com/groups/{}/licenses\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
",",
"owner_id",
")",
"licenses_req",
"=",
"self",
".",
"get",
"(",
"licenses_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"params",
"=",
"payload",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
",",
")",
"# checking response",
"req_check",
"=",
"checker",
".",
"check_api_response",
"(",
"licenses_req",
")",
"if",
"isinstance",
"(",
"req_check",
",",
"tuple",
")",
":",
"return",
"req_check",
"# end of method",
"return",
"licenses_req",
".",
"json",
"(",
")"
] | Get information about licenses owned by a specific workgroup.
:param str token: API auth token
:param str owner_id: workgroup UUID
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Get",
"information",
"about",
"licenses",
"owned",
"by",
"a",
"specific",
"workgroup",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L559-L590 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.license | def license(self, license_id: str, token: dict = None, prot: str = "https") -> dict:
"""Get details about a specific license.
:param str token: API auth token
:param str license_id: license UUID
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# handling request parameters
payload = {"lid": license_id}
# search request
license_url = "{}://v1.{}.isogeo.com/licenses/{}".format(
prot, self.api_url, license_id
)
license_req = self.get(
license_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# checking response
checker.check_api_response(license_req)
# end of method
return license_req.json() | python | def license(self, license_id: str, token: dict = None, prot: str = "https") -> dict:
"""Get details about a specific license.
:param str token: API auth token
:param str license_id: license UUID
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# handling request parameters
payload = {"lid": license_id}
# search request
license_url = "{}://v1.{}.isogeo.com/licenses/{}".format(
prot, self.api_url, license_id
)
license_req = self.get(
license_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# checking response
checker.check_api_response(license_req)
# end of method
return license_req.json() | [
"def",
"license",
"(",
"self",
",",
"license_id",
":",
"str",
",",
"token",
":",
"dict",
"=",
"None",
",",
"prot",
":",
"str",
"=",
"\"https\"",
")",
"->",
"dict",
":",
"# handling request parameters",
"payload",
"=",
"{",
"\"lid\"",
":",
"license_id",
"}",
"# search request",
"license_url",
"=",
"\"{}://v1.{}.isogeo.com/licenses/{}\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
",",
"license_id",
")",
"license_req",
"=",
"self",
".",
"get",
"(",
"license_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"params",
"=",
"payload",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
",",
")",
"# checking response",
"checker",
".",
"check_api_response",
"(",
"license_req",
")",
"# end of method",
"return",
"license_req",
".",
"json",
"(",
")"
] | Get details about a specific license.
:param str token: API auth token
:param str license_id: license UUID
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Get",
"details",
"about",
"a",
"specific",
"license",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L593-L620 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.thesauri | def thesauri(self, token: dict = None, prot: str = "https") -> dict:
"""Get list of available thesauri.
:param str token: API auth token
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# passing auth parameter
thez_url = "{}://v1.{}.isogeo.com/thesauri".format(prot, self.api_url)
thez_req = self.get(
thez_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(thez_req)
# end of method
return thez_req.json() | python | def thesauri(self, token: dict = None, prot: str = "https") -> dict:
"""Get list of available thesauri.
:param str token: API auth token
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# passing auth parameter
thez_url = "{}://v1.{}.isogeo.com/thesauri".format(prot, self.api_url)
thez_req = self.get(
thez_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(thez_req)
# end of method
return thez_req.json() | [
"def",
"thesauri",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"prot",
":",
"str",
"=",
"\"https\"",
")",
"->",
"dict",
":",
"# passing auth parameter",
"thez_url",
"=",
"\"{}://v1.{}.isogeo.com/thesauri\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
")",
"thez_req",
"=",
"self",
".",
"get",
"(",
"thez_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
")",
"# checking response",
"checker",
".",
"check_api_response",
"(",
"thez_req",
")",
"# end of method",
"return",
"thez_req",
".",
"json",
"(",
")"
] | Get list of available thesauri.
:param str token: API auth token
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Get",
"list",
"of",
"available",
"thesauri",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L625-L642 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.thesaurus | def thesaurus(
self,
token: dict = None,
thez_id: str = "1616597fbc4348c8b11ef9d59cf594c8",
prot: str = "https",
) -> dict:
"""Get a thesaurus.
:param str token: API auth token
:param str thez_id: thesaurus UUID
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# handling request parameters
payload = {"tid": thez_id}
# passing auth parameter
thez_url = "{}://v1.{}.isogeo.com/thesauri/{}".format(
prot, self.api_url, thez_id
)
thez_req = self.get(
thez_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# checking response
checker.check_api_response(thez_req)
# end of method
return thez_req.json() | python | def thesaurus(
self,
token: dict = None,
thez_id: str = "1616597fbc4348c8b11ef9d59cf594c8",
prot: str = "https",
) -> dict:
"""Get a thesaurus.
:param str token: API auth token
:param str thez_id: thesaurus UUID
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# handling request parameters
payload = {"tid": thez_id}
# passing auth parameter
thez_url = "{}://v1.{}.isogeo.com/thesauri/{}".format(
prot, self.api_url, thez_id
)
thez_req = self.get(
thez_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# checking response
checker.check_api_response(thez_req)
# end of method
return thez_req.json() | [
"def",
"thesaurus",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"thez_id",
":",
"str",
"=",
"\"1616597fbc4348c8b11ef9d59cf594c8\"",
",",
"prot",
":",
"str",
"=",
"\"https\"",
",",
")",
"->",
"dict",
":",
"# handling request parameters",
"payload",
"=",
"{",
"\"tid\"",
":",
"thez_id",
"}",
"# passing auth parameter",
"thez_url",
"=",
"\"{}://v1.{}.isogeo.com/thesauri/{}\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
",",
"thez_id",
")",
"thez_req",
"=",
"self",
".",
"get",
"(",
"thez_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"params",
"=",
"payload",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
",",
")",
"# checking response",
"checker",
".",
"check_api_response",
"(",
"thez_req",
")",
"# end of method",
"return",
"thez_req",
".",
"json",
"(",
")"
] | Get a thesaurus.
:param str token: API auth token
:param str thez_id: thesaurus UUID
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Get",
"a",
"thesaurus",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L645-L677 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.keywords | def keywords(
self,
token: dict = None,
thez_id: str = "1616597fbc4348c8b11ef9d59cf594c8",
query: str = "",
offset: int = 0,
order_by: str = "text",
order_dir: str = "desc",
page_size: int = 20,
specific_md: list = [],
specific_tag: list = [],
include: list = [],
prot: str = "https",
) -> dict:
"""Search for keywords within a specific thesaurus.
:param str token: API auth token
:param str thez_id: thesaurus UUID
:param str query: search terms
:param int offset: pagination start
:param str order_by: sort criteria. Available values :
- count.group,
- count.isogeo,
- text
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# specific resources specific parsing
specific_md = checker._check_filter_specific_md(specific_md)
# sub resources specific parsing
include = checker._check_filter_includes(include, "keyword")
# specific tag specific parsing
specific_tag = checker._check_filter_specific_tag(specific_tag)
# handling request parameters
payload = {
"_id": specific_md,
"_include": include,
"_limit": page_size,
"_offset": offset,
"_tag": specific_tag,
"tid": thez_id,
"ob": order_by,
"od": order_dir,
"q": query,
}
# search request
keywords_url = "{}://v1.{}.isogeo.com/thesauri/{}/keywords/search".format(
prot, self.api_url, thez_id
)
kwds_req = self.get(
keywords_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# checking response
checker.check_api_response(kwds_req)
# end of method
return kwds_req.json() | python | def keywords(
self,
token: dict = None,
thez_id: str = "1616597fbc4348c8b11ef9d59cf594c8",
query: str = "",
offset: int = 0,
order_by: str = "text",
order_dir: str = "desc",
page_size: int = 20,
specific_md: list = [],
specific_tag: list = [],
include: list = [],
prot: str = "https",
) -> dict:
"""Search for keywords within a specific thesaurus.
:param str token: API auth token
:param str thez_id: thesaurus UUID
:param str query: search terms
:param int offset: pagination start
:param str order_by: sort criteria. Available values :
- count.group,
- count.isogeo,
- text
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# specific resources specific parsing
specific_md = checker._check_filter_specific_md(specific_md)
# sub resources specific parsing
include = checker._check_filter_includes(include, "keyword")
# specific tag specific parsing
specific_tag = checker._check_filter_specific_tag(specific_tag)
# handling request parameters
payload = {
"_id": specific_md,
"_include": include,
"_limit": page_size,
"_offset": offset,
"_tag": specific_tag,
"tid": thez_id,
"ob": order_by,
"od": order_dir,
"q": query,
}
# search request
keywords_url = "{}://v1.{}.isogeo.com/thesauri/{}/keywords/search".format(
prot, self.api_url, thez_id
)
kwds_req = self.get(
keywords_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# checking response
checker.check_api_response(kwds_req)
# end of method
return kwds_req.json() | [
"def",
"keywords",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"thez_id",
":",
"str",
"=",
"\"1616597fbc4348c8b11ef9d59cf594c8\"",
",",
"query",
":",
"str",
"=",
"\"\"",
",",
"offset",
":",
"int",
"=",
"0",
",",
"order_by",
":",
"str",
"=",
"\"text\"",
",",
"order_dir",
":",
"str",
"=",
"\"desc\"",
",",
"page_size",
":",
"int",
"=",
"20",
",",
"specific_md",
":",
"list",
"=",
"[",
"]",
",",
"specific_tag",
":",
"list",
"=",
"[",
"]",
",",
"include",
":",
"list",
"=",
"[",
"]",
",",
"prot",
":",
"str",
"=",
"\"https\"",
",",
")",
"->",
"dict",
":",
"# specific resources specific parsing",
"specific_md",
"=",
"checker",
".",
"_check_filter_specific_md",
"(",
"specific_md",
")",
"# sub resources specific parsing",
"include",
"=",
"checker",
".",
"_check_filter_includes",
"(",
"include",
",",
"\"keyword\"",
")",
"# specific tag specific parsing",
"specific_tag",
"=",
"checker",
".",
"_check_filter_specific_tag",
"(",
"specific_tag",
")",
"# handling request parameters",
"payload",
"=",
"{",
"\"_id\"",
":",
"specific_md",
",",
"\"_include\"",
":",
"include",
",",
"\"_limit\"",
":",
"page_size",
",",
"\"_offset\"",
":",
"offset",
",",
"\"_tag\"",
":",
"specific_tag",
",",
"\"tid\"",
":",
"thez_id",
",",
"\"ob\"",
":",
"order_by",
",",
"\"od\"",
":",
"order_dir",
",",
"\"q\"",
":",
"query",
",",
"}",
"# search request",
"keywords_url",
"=",
"\"{}://v1.{}.isogeo.com/thesauri/{}/keywords/search\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
",",
"thez_id",
")",
"kwds_req",
"=",
"self",
".",
"get",
"(",
"keywords_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"params",
"=",
"payload",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
",",
")",
"# checking response",
"checker",
".",
"check_api_response",
"(",
"kwds_req",
")",
"# end of method",
"return",
"kwds_req",
".",
"json",
"(",
")"
] | Search for keywords within a specific thesaurus.
:param str token: API auth token
:param str thez_id: thesaurus UUID
:param str query: search terms
:param int offset: pagination start
:param str order_by: sort criteria. Available values :
- count.group,
- count.isogeo,
- text
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Search",
"for",
"keywords",
"within",
"a",
"specific",
"thesaurus",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L680-L746 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.dl_hosted | def dl_hosted(
self,
token: dict = None,
resource_link: dict = None,
encode_clean: bool = 1,
proxy_url: str = None,
prot: str = "https",
) -> tuple:
"""Download hosted resource.
:param str token: API auth token
:param dict resource_link: link dictionary
:param bool encode_clean: option to ensure a clean filename and avoid OS errors
:param str proxy_url: proxy to use to download
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
Example of resource_link dict:
.. code-block:: json
{
"_id": "g8h9i0j11k12l13m14n15o16p17Q18rS",
"type": "hosted",
"title": "label_of_hosted_file.zip",
"url": "/resources/1a2b3c4d5e6f7g8h9i0j11k12l13m14n/links/g8h9i0j11k12l13m14n15o16p17Q18rS.bin",
"kind": "data",
"actions": ["download", ],
"size": "2253029",
}
"""
# check resource link parameter type
if not isinstance(resource_link, dict):
raise TypeError("Resource link expects a dictionary.")
else:
pass
# check resource link type
if not resource_link.get("type") == "hosted":
raise ValueError(
"Resource link passed is not a hosted one: {}".format(
resource_link.get("type")
)
)
else:
pass
# handling request parameters
payload = {"proxyUrl": proxy_url}
# prepare URL request
hosted_url = "{}://v1.{}.isogeo.com/{}".format(
prot, self.api_url, resource_link.get("url")
)
# send stream request
hosted_req = self.get(
hosted_url,
headers=self.header,
stream=True,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# quick check
req_check = checker.check_api_response(hosted_req)
if not req_check:
raise ConnectionError(req_check[1])
else:
pass
# get filename from header
content_disposition = hosted_req.headers.get("Content-Disposition")
if content_disposition:
filename = re.findall("filename=(.+)", content_disposition)[0]
else:
filename = resource_link.get("title")
# remove special characters
if encode_clean:
filename = utils.encoded_words_to_text(filename)
filename = re.sub(r"[^\w\-_\. ]", "", filename)
# well-formed size
in_size = resource_link.get("size")
for size_cat in ("octets", "Ko", "Mo", "Go"):
if in_size < 1024.0:
out_size = "%3.1f %s" % (in_size, size_cat)
in_size /= 1024.0
out_size = "%3.1f %s" % (in_size, " To")
# end of method
return (hosted_req, filename, out_size) | python | def dl_hosted(
self,
token: dict = None,
resource_link: dict = None,
encode_clean: bool = 1,
proxy_url: str = None,
prot: str = "https",
) -> tuple:
"""Download hosted resource.
:param str token: API auth token
:param dict resource_link: link dictionary
:param bool encode_clean: option to ensure a clean filename and avoid OS errors
:param str proxy_url: proxy to use to download
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
Example of resource_link dict:
.. code-block:: json
{
"_id": "g8h9i0j11k12l13m14n15o16p17Q18rS",
"type": "hosted",
"title": "label_of_hosted_file.zip",
"url": "/resources/1a2b3c4d5e6f7g8h9i0j11k12l13m14n/links/g8h9i0j11k12l13m14n15o16p17Q18rS.bin",
"kind": "data",
"actions": ["download", ],
"size": "2253029",
}
"""
# check resource link parameter type
if not isinstance(resource_link, dict):
raise TypeError("Resource link expects a dictionary.")
else:
pass
# check resource link type
if not resource_link.get("type") == "hosted":
raise ValueError(
"Resource link passed is not a hosted one: {}".format(
resource_link.get("type")
)
)
else:
pass
# handling request parameters
payload = {"proxyUrl": proxy_url}
# prepare URL request
hosted_url = "{}://v1.{}.isogeo.com/{}".format(
prot, self.api_url, resource_link.get("url")
)
# send stream request
hosted_req = self.get(
hosted_url,
headers=self.header,
stream=True,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# quick check
req_check = checker.check_api_response(hosted_req)
if not req_check:
raise ConnectionError(req_check[1])
else:
pass
# get filename from header
content_disposition = hosted_req.headers.get("Content-Disposition")
if content_disposition:
filename = re.findall("filename=(.+)", content_disposition)[0]
else:
filename = resource_link.get("title")
# remove special characters
if encode_clean:
filename = utils.encoded_words_to_text(filename)
filename = re.sub(r"[^\w\-_\. ]", "", filename)
# well-formed size
in_size = resource_link.get("size")
for size_cat in ("octets", "Ko", "Mo", "Go"):
if in_size < 1024.0:
out_size = "%3.1f %s" % (in_size, size_cat)
in_size /= 1024.0
out_size = "%3.1f %s" % (in_size, " To")
# end of method
return (hosted_req, filename, out_size) | [
"def",
"dl_hosted",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"resource_link",
":",
"dict",
"=",
"None",
",",
"encode_clean",
":",
"bool",
"=",
"1",
",",
"proxy_url",
":",
"str",
"=",
"None",
",",
"prot",
":",
"str",
"=",
"\"https\"",
",",
")",
"->",
"tuple",
":",
"# check resource link parameter type",
"if",
"not",
"isinstance",
"(",
"resource_link",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"\"Resource link expects a dictionary.\"",
")",
"else",
":",
"pass",
"# check resource link type",
"if",
"not",
"resource_link",
".",
"get",
"(",
"\"type\"",
")",
"==",
"\"hosted\"",
":",
"raise",
"ValueError",
"(",
"\"Resource link passed is not a hosted one: {}\"",
".",
"format",
"(",
"resource_link",
".",
"get",
"(",
"\"type\"",
")",
")",
")",
"else",
":",
"pass",
"# handling request parameters",
"payload",
"=",
"{",
"\"proxyUrl\"",
":",
"proxy_url",
"}",
"# prepare URL request",
"hosted_url",
"=",
"\"{}://v1.{}.isogeo.com/{}\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
",",
"resource_link",
".",
"get",
"(",
"\"url\"",
")",
")",
"# send stream request",
"hosted_req",
"=",
"self",
".",
"get",
"(",
"hosted_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"stream",
"=",
"True",
",",
"params",
"=",
"payload",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
",",
")",
"# quick check",
"req_check",
"=",
"checker",
".",
"check_api_response",
"(",
"hosted_req",
")",
"if",
"not",
"req_check",
":",
"raise",
"ConnectionError",
"(",
"req_check",
"[",
"1",
"]",
")",
"else",
":",
"pass",
"# get filename from header",
"content_disposition",
"=",
"hosted_req",
".",
"headers",
".",
"get",
"(",
"\"Content-Disposition\"",
")",
"if",
"content_disposition",
":",
"filename",
"=",
"re",
".",
"findall",
"(",
"\"filename=(.+)\"",
",",
"content_disposition",
")",
"[",
"0",
"]",
"else",
":",
"filename",
"=",
"resource_link",
".",
"get",
"(",
"\"title\"",
")",
"# remove special characters",
"if",
"encode_clean",
":",
"filename",
"=",
"utils",
".",
"encoded_words_to_text",
"(",
"filename",
")",
"filename",
"=",
"re",
".",
"sub",
"(",
"r\"[^\\w\\-_\\. ]\"",
",",
"\"\"",
",",
"filename",
")",
"# well-formed size",
"in_size",
"=",
"resource_link",
".",
"get",
"(",
"\"size\"",
")",
"for",
"size_cat",
"in",
"(",
"\"octets\"",
",",
"\"Ko\"",
",",
"\"Mo\"",
",",
"\"Go\"",
")",
":",
"if",
"in_size",
"<",
"1024.0",
":",
"out_size",
"=",
"\"%3.1f %s\"",
"%",
"(",
"in_size",
",",
"size_cat",
")",
"in_size",
"/=",
"1024.0",
"out_size",
"=",
"\"%3.1f %s\"",
"%",
"(",
"in_size",
",",
"\" To\"",
")",
"# end of method",
"return",
"(",
"hosted_req",
",",
"filename",
",",
"out_size",
")"
] | Download hosted resource.
:param str token: API auth token
:param dict resource_link: link dictionary
:param bool encode_clean: option to ensure a clean filename and avoid OS errors
:param str proxy_url: proxy to use to download
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
Example of resource_link dict:
.. code-block:: json
{
"_id": "g8h9i0j11k12l13m14n15o16p17Q18rS",
"type": "hosted",
"title": "label_of_hosted_file.zip",
"url": "/resources/1a2b3c4d5e6f7g8h9i0j11k12l13m14n/links/g8h9i0j11k12l13m14n15o16p17Q18rS.bin",
"kind": "data",
"actions": ["download", ],
"size": "2253029",
} | [
"Download",
"hosted",
"resource",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L750-L843 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.xml19139 | def xml19139(
self,
token: dict = None,
id_resource: str = None,
proxy_url=None,
prot: str = "https",
):
"""Get resource exported into XML ISO 19139.
:param str token: API auth token
:param str id_resource: metadata UUID to export
:param str proxy_url: proxy to use to download
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# check metadata UUID
if not checker.check_is_uuid(id_resource):
raise ValueError("Metadata ID is not a correct UUID.")
else:
pass
# handling request parameters
payload = {"proxyUrl": proxy_url, "id": id_resource}
# resource search
md_url = "{}://v1.{}.isogeo.com/resources/{}.xml".format(
prot, self.api_url, id_resource
)
xml_req = self.get(
md_url,
headers=self.header,
stream=True,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# end of method
return xml_req | python | def xml19139(
self,
token: dict = None,
id_resource: str = None,
proxy_url=None,
prot: str = "https",
):
"""Get resource exported into XML ISO 19139.
:param str token: API auth token
:param str id_resource: metadata UUID to export
:param str proxy_url: proxy to use to download
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# check metadata UUID
if not checker.check_is_uuid(id_resource):
raise ValueError("Metadata ID is not a correct UUID.")
else:
pass
# handling request parameters
payload = {"proxyUrl": proxy_url, "id": id_resource}
# resource search
md_url = "{}://v1.{}.isogeo.com/resources/{}.xml".format(
prot, self.api_url, id_resource
)
xml_req = self.get(
md_url,
headers=self.header,
stream=True,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# end of method
return xml_req | [
"def",
"xml19139",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"id_resource",
":",
"str",
"=",
"None",
",",
"proxy_url",
"=",
"None",
",",
"prot",
":",
"str",
"=",
"\"https\"",
",",
")",
":",
"# check metadata UUID",
"if",
"not",
"checker",
".",
"check_is_uuid",
"(",
"id_resource",
")",
":",
"raise",
"ValueError",
"(",
"\"Metadata ID is not a correct UUID.\"",
")",
"else",
":",
"pass",
"# handling request parameters",
"payload",
"=",
"{",
"\"proxyUrl\"",
":",
"proxy_url",
",",
"\"id\"",
":",
"id_resource",
"}",
"# resource search",
"md_url",
"=",
"\"{}://v1.{}.isogeo.com/resources/{}.xml\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
",",
"id_resource",
")",
"xml_req",
"=",
"self",
".",
"get",
"(",
"md_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"stream",
"=",
"True",
",",
"params",
"=",
"payload",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
",",
")",
"# end of method",
"return",
"xml_req"
] | Get resource exported into XML ISO 19139.
:param str token: API auth token
:param str id_resource: metadata UUID to export
:param str proxy_url: proxy to use to download
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Get",
"resource",
"exported",
"into",
"XML",
"ISO",
"19139",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L846-L884 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.add_tags_shares | def add_tags_shares(self, tags: dict = dict()):
"""Add shares list to the tags attributes in search results.
:param dict tags: tags dictionary from a search request
"""
# check if shares_id have already been retrieved or not
if not hasattr(self, "shares_id"):
shares = self.shares()
self.shares_id = {
"share:{}".format(i.get("_id")): i.get("name") for i in shares
}
else:
pass
# update query tags
tags.update(self.shares_id) | python | def add_tags_shares(self, tags: dict = dict()):
"""Add shares list to the tags attributes in search results.
:param dict tags: tags dictionary from a search request
"""
# check if shares_id have already been retrieved or not
if not hasattr(self, "shares_id"):
shares = self.shares()
self.shares_id = {
"share:{}".format(i.get("_id")): i.get("name") for i in shares
}
else:
pass
# update query tags
tags.update(self.shares_id) | [
"def",
"add_tags_shares",
"(",
"self",
",",
"tags",
":",
"dict",
"=",
"dict",
"(",
")",
")",
":",
"# check if shares_id have already been retrieved or not",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"shares_id\"",
")",
":",
"shares",
"=",
"self",
".",
"shares",
"(",
")",
"self",
".",
"shares_id",
"=",
"{",
"\"share:{}\"",
".",
"format",
"(",
"i",
".",
"get",
"(",
"\"_id\"",
")",
")",
":",
"i",
".",
"get",
"(",
"\"name\"",
")",
"for",
"i",
"in",
"shares",
"}",
"else",
":",
"pass",
"# update query tags",
"tags",
".",
"update",
"(",
"self",
".",
"shares_id",
")"
] | Add shares list to the tags attributes in search results.
:param dict tags: tags dictionary from a search request | [
"Add",
"shares",
"list",
"to",
"the",
"tags",
"attributes",
"in",
"search",
"results",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L887-L901 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.get_app_properties | def get_app_properties(self, token: dict = None, prot: str = "https"):
"""Get information about the application declared on Isogeo.
:param str token: API auth token
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# check if app properties have already been retrieved or not
if not hasattr(self, "app_properties"):
first_app = self.shares()[0].get("applications")[0]
app = {
"admin_url": "{}/applications/{}".format(
self.mng_url, first_app.get("_id")
),
"creation_date": first_app.get("_created"),
"last_update": first_app.get("_modified"),
"name": first_app.get("name"),
"type": first_app.get("type"),
"kind": first_app.get("kind"),
"url": first_app.get("url"),
}
self.app_properties = app
else:
pass | python | def get_app_properties(self, token: dict = None, prot: str = "https"):
"""Get information about the application declared on Isogeo.
:param str token: API auth token
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# check if app properties have already been retrieved or not
if not hasattr(self, "app_properties"):
first_app = self.shares()[0].get("applications")[0]
app = {
"admin_url": "{}/applications/{}".format(
self.mng_url, first_app.get("_id")
),
"creation_date": first_app.get("_created"),
"last_update": first_app.get("_modified"),
"name": first_app.get("name"),
"type": first_app.get("type"),
"kind": first_app.get("kind"),
"url": first_app.get("url"),
}
self.app_properties = app
else:
pass | [
"def",
"get_app_properties",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"prot",
":",
"str",
"=",
"\"https\"",
")",
":",
"# check if app properties have already been retrieved or not",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"app_properties\"",
")",
":",
"first_app",
"=",
"self",
".",
"shares",
"(",
")",
"[",
"0",
"]",
".",
"get",
"(",
"\"applications\"",
")",
"[",
"0",
"]",
"app",
"=",
"{",
"\"admin_url\"",
":",
"\"{}/applications/{}\"",
".",
"format",
"(",
"self",
".",
"mng_url",
",",
"first_app",
".",
"get",
"(",
"\"_id\"",
")",
")",
",",
"\"creation_date\"",
":",
"first_app",
".",
"get",
"(",
"\"_created\"",
")",
",",
"\"last_update\"",
":",
"first_app",
".",
"get",
"(",
"\"_modified\"",
")",
",",
"\"name\"",
":",
"first_app",
".",
"get",
"(",
"\"name\"",
")",
",",
"\"type\"",
":",
"first_app",
".",
"get",
"(",
"\"type\"",
")",
",",
"\"kind\"",
":",
"first_app",
".",
"get",
"(",
"\"kind\"",
")",
",",
"\"url\"",
":",
"first_app",
".",
"get",
"(",
"\"url\"",
")",
",",
"}",
"self",
".",
"app_properties",
"=",
"app",
"else",
":",
"pass"
] | Get information about the application declared on Isogeo.
:param str token: API auth token
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Get",
"information",
"about",
"the",
"application",
"declared",
"on",
"Isogeo",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L904-L927 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.get_directives | def get_directives(self, token: dict = None, prot: str = "https") -> dict:
"""Get environment directives which represent INSPIRE limitations.
:param str token: API auth token
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# search request
req_url = "{}://v1.{}.isogeo.com/directives".format(prot, self.api_url)
req = self.get(
req_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(req)
# end of method
return req.json() | python | def get_directives(self, token: dict = None, prot: str = "https") -> dict:
"""Get environment directives which represent INSPIRE limitations.
:param str token: API auth token
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# search request
req_url = "{}://v1.{}.isogeo.com/directives".format(prot, self.api_url)
req = self.get(
req_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(req)
# end of method
return req.json() | [
"def",
"get_directives",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"prot",
":",
"str",
"=",
"\"https\"",
")",
"->",
"dict",
":",
"# search request",
"req_url",
"=",
"\"{}://v1.{}.isogeo.com/directives\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
")",
"req",
"=",
"self",
".",
"get",
"(",
"req_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
")",
"# checking response",
"checker",
".",
"check_api_response",
"(",
"req",
")",
"# end of method",
"return",
"req",
".",
"json",
"(",
")"
] | Get environment directives which represent INSPIRE limitations.
:param str token: API auth token
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Get",
"environment",
"directives",
"which",
"represent",
"INSPIRE",
"limitations",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L951-L969 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.get_coordinate_systems | def get_coordinate_systems(
self, token: dict = None, srs_code: str = None, prot: str = "https"
) -> dict:
"""Get available coordinate systems in Isogeo API.
:param str token: API auth token
:param str srs_code: code of a specific coordinate system
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# if specific format
if isinstance(srs_code, str):
specific_srs = "/{}".format(srs_code)
else:
specific_srs = ""
# search request
req_url = "{}://v1.{}.isogeo.com/coordinate-systems{}".format(
prot, self.api_url, specific_srs
)
req = self.get(
req_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(req)
# end of method
return req.json() | python | def get_coordinate_systems(
self, token: dict = None, srs_code: str = None, prot: str = "https"
) -> dict:
"""Get available coordinate systems in Isogeo API.
:param str token: API auth token
:param str srs_code: code of a specific coordinate system
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# if specific format
if isinstance(srs_code, str):
specific_srs = "/{}".format(srs_code)
else:
specific_srs = ""
# search request
req_url = "{}://v1.{}.isogeo.com/coordinate-systems{}".format(
prot, self.api_url, specific_srs
)
req = self.get(
req_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(req)
# end of method
return req.json() | [
"def",
"get_coordinate_systems",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"srs_code",
":",
"str",
"=",
"None",
",",
"prot",
":",
"str",
"=",
"\"https\"",
")",
"->",
"dict",
":",
"# if specific format",
"if",
"isinstance",
"(",
"srs_code",
",",
"str",
")",
":",
"specific_srs",
"=",
"\"/{}\"",
".",
"format",
"(",
"srs_code",
")",
"else",
":",
"specific_srs",
"=",
"\"\"",
"# search request",
"req_url",
"=",
"\"{}://v1.{}.isogeo.com/coordinate-systems{}\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
",",
"specific_srs",
")",
"req",
"=",
"self",
".",
"get",
"(",
"req_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
")",
"# checking response",
"checker",
".",
"check_api_response",
"(",
"req",
")",
"# end of method",
"return",
"req",
".",
"json",
"(",
")"
] | Get available coordinate systems in Isogeo API.
:param str token: API auth token
:param str srs_code: code of a specific coordinate system
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Get",
"available",
"coordinate",
"systems",
"in",
"Isogeo",
"API",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L972-L1001 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/isogeo_sdk.py | Isogeo.get_formats | def get_formats(
self, token: dict = None, format_code: str = None, prot: str = "https"
) -> dict:
"""Get formats.
:param str token: API auth token
:param str format_code: code of a specific format
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# if specific format
if isinstance(format_code, str):
specific_format = "/{}".format(format_code)
else:
specific_format = ""
# search request
req_url = "{}://v1.{}.isogeo.com/formats{}".format(
prot, self.api_url, specific_format
)
req = self.get(
req_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(req)
# end of method
return req.json() | python | def get_formats(
self, token: dict = None, format_code: str = None, prot: str = "https"
) -> dict:
"""Get formats.
:param str token: API auth token
:param str format_code: code of a specific format
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# if specific format
if isinstance(format_code, str):
specific_format = "/{}".format(format_code)
else:
specific_format = ""
# search request
req_url = "{}://v1.{}.isogeo.com/formats{}".format(
prot, self.api_url, specific_format
)
req = self.get(
req_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(req)
# end of method
return req.json() | [
"def",
"get_formats",
"(",
"self",
",",
"token",
":",
"dict",
"=",
"None",
",",
"format_code",
":",
"str",
"=",
"None",
",",
"prot",
":",
"str",
"=",
"\"https\"",
")",
"->",
"dict",
":",
"# if specific format",
"if",
"isinstance",
"(",
"format_code",
",",
"str",
")",
":",
"specific_format",
"=",
"\"/{}\"",
".",
"format",
"(",
"format_code",
")",
"else",
":",
"specific_format",
"=",
"\"\"",
"# search request",
"req_url",
"=",
"\"{}://v1.{}.isogeo.com/formats{}\"",
".",
"format",
"(",
"prot",
",",
"self",
".",
"api_url",
",",
"specific_format",
")",
"req",
"=",
"self",
".",
"get",
"(",
"req_url",
",",
"headers",
"=",
"self",
".",
"header",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"ssl",
")",
"# checking response",
"checker",
".",
"check_api_response",
"(",
"req",
")",
"# end of method",
"return",
"req",
".",
"json",
"(",
")"
] | Get formats.
:param str token: API auth token
:param str format_code: code of a specific format
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). | [
"Get",
"formats",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/isogeo_sdk.py#L1004-L1033 |
kajala/django-jutil | jutil/parse.py | parse_bool | def parse_bool(v, default=None, exceptions: bool=True) -> bool:
"""
Parses boolean value
:param v: Input string
:param default: Default value if exceptions=False
:param exceptions: Raise exception on error or not
:return: bool
"""
if isinstance(v, bool):
return v
s = str(v).lower()
if s in TRUE_VALUES:
return True
elif s in FALSE_VALUES:
return False
else:
if exceptions:
raise ValidationError('Failed to parse boolean from "{}"'.format(v))
return default | python | def parse_bool(v, default=None, exceptions: bool=True) -> bool:
"""
Parses boolean value
:param v: Input string
:param default: Default value if exceptions=False
:param exceptions: Raise exception on error or not
:return: bool
"""
if isinstance(v, bool):
return v
s = str(v).lower()
if s in TRUE_VALUES:
return True
elif s in FALSE_VALUES:
return False
else:
if exceptions:
raise ValidationError('Failed to parse boolean from "{}"'.format(v))
return default | [
"def",
"parse_bool",
"(",
"v",
",",
"default",
"=",
"None",
",",
"exceptions",
":",
"bool",
"=",
"True",
")",
"->",
"bool",
":",
"if",
"isinstance",
"(",
"v",
",",
"bool",
")",
":",
"return",
"v",
"s",
"=",
"str",
"(",
"v",
")",
".",
"lower",
"(",
")",
"if",
"s",
"in",
"TRUE_VALUES",
":",
"return",
"True",
"elif",
"s",
"in",
"FALSE_VALUES",
":",
"return",
"False",
"else",
":",
"if",
"exceptions",
":",
"raise",
"ValidationError",
"(",
"'Failed to parse boolean from \"{}\"'",
".",
"format",
"(",
"v",
")",
")",
"return",
"default"
] | Parses boolean value
:param v: Input string
:param default: Default value if exceptions=False
:param exceptions: Raise exception on error or not
:return: bool | [
"Parses",
"boolean",
"value",
":",
"param",
"v",
":",
"Input",
"string",
":",
"param",
"default",
":",
"Default",
"value",
"if",
"exceptions",
"=",
"False",
":",
"param",
"exceptions",
":",
"Raise",
"exception",
"on",
"error",
"or",
"not",
":",
"return",
":",
"bool"
] | train | https://github.com/kajala/django-jutil/blob/2abd93ebad51042744eaeb1ee1074ed0eb55ad0c/jutil/parse.py#L22-L40 |
kajala/django-jutil | jutil/parse.py | parse_datetime | def parse_datetime(v, default=None, tz=None, exceptions: bool=True) -> datetime:
"""
Parses datetime
:param v: Input string
:param default: Default value if exceptions=False
:param tz: Default pytz timezone or None if utc
:param exceptions: Raise exception on error or not
:return: datetime
"""
try:
t = dateutil_parse(v, default=datetime(2000, 1, 1))
if tz is None:
tz = pytz.utc
return t if t.tzinfo else tz.localize(t)
except Exception:
if exceptions:
raise ValidationError('Failed to parse datetime from "{}"'.format(v))
return default | python | def parse_datetime(v, default=None, tz=None, exceptions: bool=True) -> datetime:
"""
Parses datetime
:param v: Input string
:param default: Default value if exceptions=False
:param tz: Default pytz timezone or None if utc
:param exceptions: Raise exception on error or not
:return: datetime
"""
try:
t = dateutil_parse(v, default=datetime(2000, 1, 1))
if tz is None:
tz = pytz.utc
return t if t.tzinfo else tz.localize(t)
except Exception:
if exceptions:
raise ValidationError('Failed to parse datetime from "{}"'.format(v))
return default | [
"def",
"parse_datetime",
"(",
"v",
",",
"default",
"=",
"None",
",",
"tz",
"=",
"None",
",",
"exceptions",
":",
"bool",
"=",
"True",
")",
"->",
"datetime",
":",
"try",
":",
"t",
"=",
"dateutil_parse",
"(",
"v",
",",
"default",
"=",
"datetime",
"(",
"2000",
",",
"1",
",",
"1",
")",
")",
"if",
"tz",
"is",
"None",
":",
"tz",
"=",
"pytz",
".",
"utc",
"return",
"t",
"if",
"t",
".",
"tzinfo",
"else",
"tz",
".",
"localize",
"(",
"t",
")",
"except",
"Exception",
":",
"if",
"exceptions",
":",
"raise",
"ValidationError",
"(",
"'Failed to parse datetime from \"{}\"'",
".",
"format",
"(",
"v",
")",
")",
"return",
"default"
] | Parses datetime
:param v: Input string
:param default: Default value if exceptions=False
:param tz: Default pytz timezone or None if utc
:param exceptions: Raise exception on error or not
:return: datetime | [
"Parses",
"datetime",
":",
"param",
"v",
":",
"Input",
"string",
":",
"param",
"default",
":",
"Default",
"value",
"if",
"exceptions",
"=",
"False",
":",
"param",
"tz",
":",
"Default",
"pytz",
"timezone",
"or",
"None",
"if",
"utc",
":",
"param",
"exceptions",
":",
"Raise",
"exception",
"on",
"error",
"or",
"not",
":",
"return",
":",
"datetime"
] | train | https://github.com/kajala/django-jutil/blob/2abd93ebad51042744eaeb1ee1074ed0eb55ad0c/jutil/parse.py#L43-L60 |
InformaticsMatters/pipelines-utils | src/python/pipelines_utils/TypedColumnReader.py | convert_boolean | def convert_boolean(string_value):
"""Converts a string to a boolean (see CONVERTERS).
There is a converter function for each column type.
Boolean strings are independent of case. Values interpreted as True
are: "yes", "true", "on", "1". values interpreted as False are
"no", "false", "off", "0". Any other value will result in a ValueError.
:param string_value: The string to convert
:raises: ValueError if the string cannot be represented by a boolean
"""
lean_string_value = string_value.strip().lower()
if lean_string_value in ['yes', 'true', 'on', '1']:
return True
elif lean_string_value in ['no', 'false', 'off', '0']:
return False
# Not recognised boolean if we get here
raise ValueError('Unrecognised boolean ({})'.format(lean_string_value)) | python | def convert_boolean(string_value):
"""Converts a string to a boolean (see CONVERTERS).
There is a converter function for each column type.
Boolean strings are independent of case. Values interpreted as True
are: "yes", "true", "on", "1". values interpreted as False are
"no", "false", "off", "0". Any other value will result in a ValueError.
:param string_value: The string to convert
:raises: ValueError if the string cannot be represented by a boolean
"""
lean_string_value = string_value.strip().lower()
if lean_string_value in ['yes', 'true', 'on', '1']:
return True
elif lean_string_value in ['no', 'false', 'off', '0']:
return False
# Not recognised boolean if we get here
raise ValueError('Unrecognised boolean ({})'.format(lean_string_value)) | [
"def",
"convert_boolean",
"(",
"string_value",
")",
":",
"lean_string_value",
"=",
"string_value",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"lean_string_value",
"in",
"[",
"'yes'",
",",
"'true'",
",",
"'on'",
",",
"'1'",
"]",
":",
"return",
"True",
"elif",
"lean_string_value",
"in",
"[",
"'no'",
",",
"'false'",
",",
"'off'",
",",
"'0'",
"]",
":",
"return",
"False",
"# Not recognised boolean if we get here",
"raise",
"ValueError",
"(",
"'Unrecognised boolean ({})'",
".",
"format",
"(",
"lean_string_value",
")",
")"
] | Converts a string to a boolean (see CONVERTERS).
There is a converter function for each column type.
Boolean strings are independent of case. Values interpreted as True
are: "yes", "true", "on", "1". values interpreted as False are
"no", "false", "off", "0". Any other value will result in a ValueError.
:param string_value: The string to convert
:raises: ValueError if the string cannot be represented by a boolean | [
"Converts",
"a",
"string",
"to",
"a",
"boolean",
"(",
"see",
"CONVERTERS",
")",
".",
"There",
"is",
"a",
"converter",
"function",
"for",
"each",
"column",
"type",
"."
] | train | https://github.com/InformaticsMatters/pipelines-utils/blob/058aa6eceeff28c4ae402f6f58c58720bff0298e/src/python/pipelines_utils/TypedColumnReader.py#L68-L88 |
InformaticsMatters/pipelines-utils | src/python/pipelines_utils/TypedColumnReader.py | TypedColumnReader._handle_hdr | def _handle_hdr(self, hdr):
"""Given the file header line (or one provided when the object
is instantiated) this method populates the ``self._converters`` array,
a list of type converters indexed by the column name.
:param hdr: The header line.
:raises: ContentError for any formatting problems
:raises: UnknownTypeError if the type is not known
"""
column_number = 1
for cell in hdr:
cell_parts = cell.split(self._type_sep)
if len(cell_parts) not in [1, 2]:
raise ContentError(column_number, self._c_reader.line_num,
cell, 'Expected name and type (up to 2 items)')
name = cell_parts[0].strip()
if len(name) == 0:
raise ContentError(column_number, self._c_reader.line_num,
cell, 'Column name is empty')
if name in self._column_names:
raise ContentError(column_number, self._c_reader.line_num,
name, 'Duplicate column name')
if len(cell_parts) == 2:
column_type = cell_parts[1].strip().lower()
if column_type not in CONVERTERS:
raise UnknownTypeError(column_number, column_type)
else:
# Unspecified - assume built-in 'string'
column_type = 'string'
self._converters.append([name, CONVERTERS[column_type]])
self._column_names.append(name)
column_number += 1 | python | def _handle_hdr(self, hdr):
"""Given the file header line (or one provided when the object
is instantiated) this method populates the ``self._converters`` array,
a list of type converters indexed by the column name.
:param hdr: The header line.
:raises: ContentError for any formatting problems
:raises: UnknownTypeError if the type is not known
"""
column_number = 1
for cell in hdr:
cell_parts = cell.split(self._type_sep)
if len(cell_parts) not in [1, 2]:
raise ContentError(column_number, self._c_reader.line_num,
cell, 'Expected name and type (up to 2 items)')
name = cell_parts[0].strip()
if len(name) == 0:
raise ContentError(column_number, self._c_reader.line_num,
cell, 'Column name is empty')
if name in self._column_names:
raise ContentError(column_number, self._c_reader.line_num,
name, 'Duplicate column name')
if len(cell_parts) == 2:
column_type = cell_parts[1].strip().lower()
if column_type not in CONVERTERS:
raise UnknownTypeError(column_number, column_type)
else:
# Unspecified - assume built-in 'string'
column_type = 'string'
self._converters.append([name, CONVERTERS[column_type]])
self._column_names.append(name)
column_number += 1 | [
"def",
"_handle_hdr",
"(",
"self",
",",
"hdr",
")",
":",
"column_number",
"=",
"1",
"for",
"cell",
"in",
"hdr",
":",
"cell_parts",
"=",
"cell",
".",
"split",
"(",
"self",
".",
"_type_sep",
")",
"if",
"len",
"(",
"cell_parts",
")",
"not",
"in",
"[",
"1",
",",
"2",
"]",
":",
"raise",
"ContentError",
"(",
"column_number",
",",
"self",
".",
"_c_reader",
".",
"line_num",
",",
"cell",
",",
"'Expected name and type (up to 2 items)'",
")",
"name",
"=",
"cell_parts",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"name",
")",
"==",
"0",
":",
"raise",
"ContentError",
"(",
"column_number",
",",
"self",
".",
"_c_reader",
".",
"line_num",
",",
"cell",
",",
"'Column name is empty'",
")",
"if",
"name",
"in",
"self",
".",
"_column_names",
":",
"raise",
"ContentError",
"(",
"column_number",
",",
"self",
".",
"_c_reader",
".",
"line_num",
",",
"name",
",",
"'Duplicate column name'",
")",
"if",
"len",
"(",
"cell_parts",
")",
"==",
"2",
":",
"column_type",
"=",
"cell_parts",
"[",
"1",
"]",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"column_type",
"not",
"in",
"CONVERTERS",
":",
"raise",
"UnknownTypeError",
"(",
"column_number",
",",
"column_type",
")",
"else",
":",
"# Unspecified - assume built-in 'string'",
"column_type",
"=",
"'string'",
"self",
".",
"_converters",
".",
"append",
"(",
"[",
"name",
",",
"CONVERTERS",
"[",
"column_type",
"]",
"]",
")",
"self",
".",
"_column_names",
".",
"append",
"(",
"name",
")",
"column_number",
"+=",
"1"
] | Given the file header line (or one provided when the object
is instantiated) this method populates the ``self._converters`` array,
a list of type converters indexed by the column name.
:param hdr: The header line.
:raises: ContentError for any formatting problems
:raises: UnknownTypeError if the type is not known | [
"Given",
"the",
"file",
"header",
"line",
"(",
"or",
"one",
"provided",
"when",
"the",
"object",
"is",
"instantiated",
")",
"this",
"method",
"populates",
"the",
"self",
".",
"_converters",
"array",
"a",
"list",
"of",
"type",
"converters",
"indexed",
"by",
"the",
"column",
"name",
"."
] | train | https://github.com/InformaticsMatters/pipelines-utils/blob/058aa6eceeff28c4ae402f6f58c58720bff0298e/src/python/pipelines_utils/TypedColumnReader.py#L259-L293 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/samples/udata_client_datagouvfr.py | DataGouvFr.search_datasets | def search_datasets(
self,
license=None,
format=None,
query=None,
featured=None,
owner=None,
organization=None,
badge=None,
reuses=None,
page_size=20,
x_fields=None,
):
"""Search datasets within uData portal."""
# handling request parameters
payload = {"badge": badge, "size": page_size, "X-Fields": x_fields}
# search request
# head = {"X-API-KEY": self.api_key}
search_url = "{}/datasets".format(
self.base_url,
# org_id,
# page_size
)
search_req = requests.get(
search_url,
# headers=head,
params=payload,
)
# serializing result into dict and storing resources in variables
logger.debug(search_req.url)
return search_req.json() | python | def search_datasets(
self,
license=None,
format=None,
query=None,
featured=None,
owner=None,
organization=None,
badge=None,
reuses=None,
page_size=20,
x_fields=None,
):
"""Search datasets within uData portal."""
# handling request parameters
payload = {"badge": badge, "size": page_size, "X-Fields": x_fields}
# search request
# head = {"X-API-KEY": self.api_key}
search_url = "{}/datasets".format(
self.base_url,
# org_id,
# page_size
)
search_req = requests.get(
search_url,
# headers=head,
params=payload,
)
# serializing result into dict and storing resources in variables
logger.debug(search_req.url)
return search_req.json() | [
"def",
"search_datasets",
"(",
"self",
",",
"license",
"=",
"None",
",",
"format",
"=",
"None",
",",
"query",
"=",
"None",
",",
"featured",
"=",
"None",
",",
"owner",
"=",
"None",
",",
"organization",
"=",
"None",
",",
"badge",
"=",
"None",
",",
"reuses",
"=",
"None",
",",
"page_size",
"=",
"20",
",",
"x_fields",
"=",
"None",
",",
")",
":",
"# handling request parameters",
"payload",
"=",
"{",
"\"badge\"",
":",
"badge",
",",
"\"size\"",
":",
"page_size",
",",
"\"X-Fields\"",
":",
"x_fields",
"}",
"# search request",
"# head = {\"X-API-KEY\": self.api_key}",
"search_url",
"=",
"\"{}/datasets\"",
".",
"format",
"(",
"self",
".",
"base_url",
",",
"# org_id,",
"# page_size",
")",
"search_req",
"=",
"requests",
".",
"get",
"(",
"search_url",
",",
"# headers=head,",
"params",
"=",
"payload",
",",
")",
"# serializing result into dict and storing resources in variables",
"logger",
".",
"debug",
"(",
"search_req",
".",
"url",
")",
"return",
"search_req",
".",
"json",
"(",
")"
] | Search datasets within uData portal. | [
"Search",
"datasets",
"within",
"uData",
"portal",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/samples/udata_client_datagouvfr.py#L79-L112 |
isogeo/isogeo-api-py-minsdk | isogeo_pysdk/samples/udata_client_datagouvfr.py | DataGouvFr.get_filters_values | def get_filters_values(self):
"""Get different filters values as dicts."""
# DATASETS --
# badges
self._DST_BADGES = requests.get(self.base_url + "datasets/badges/").json()
# licences
self._DST_LICENSES = {
l.get("id"): l.get("title")
for l in requests.get(self.base_url + "datasets/licenses").json()
}
# frequencies
self._DST_FREQUENCIES = {
f.get("id"): f.get("label")
for f in requests.get(self.base_url + "datasets/frequencies").json()
}
# ORGANIZATIONS --
# badges
self._ORG_BADGES = requests.get(self.base_url + "organizations/badges/").json()
# # licences
# self._DST_LICENSES = {l.get("id"): l.get("title")
# for l in requests.get(self.base_url + "datasets/licenses").json()}
# # frequencies
# self._DST_FREQUENCIES = {f.get("id"): f.get("label")
# for f in requests.get(self.base_url + "datasets/frequencies").json()}
# SPATIAL --
# granularities
self._GRANULARITIES = {
g.get("id"): g.get("name")
for g in requests.get(self.base_url + "spatial/granularities").json()
}
# levels
self._LEVELS = {
g.get("id"): g.get("name")
for g in requests.get(self.base_url + "spatial/levels").json()
}
# MISC --
# facets
self._FACETS = (
"all",
"badge",
"featured",
"format",
"geozone",
"granularity",
"license",
"owner",
"organization",
"reuses",
"tag",
"temporal_coverage",
)
# reuses
self._REUSES = ("none", "few", "quite", "many") | python | def get_filters_values(self):
"""Get different filters values as dicts."""
# DATASETS --
# badges
self._DST_BADGES = requests.get(self.base_url + "datasets/badges/").json()
# licences
self._DST_LICENSES = {
l.get("id"): l.get("title")
for l in requests.get(self.base_url + "datasets/licenses").json()
}
# frequencies
self._DST_FREQUENCIES = {
f.get("id"): f.get("label")
for f in requests.get(self.base_url + "datasets/frequencies").json()
}
# ORGANIZATIONS --
# badges
self._ORG_BADGES = requests.get(self.base_url + "organizations/badges/").json()
# # licences
# self._DST_LICENSES = {l.get("id"): l.get("title")
# for l in requests.get(self.base_url + "datasets/licenses").json()}
# # frequencies
# self._DST_FREQUENCIES = {f.get("id"): f.get("label")
# for f in requests.get(self.base_url + "datasets/frequencies").json()}
# SPATIAL --
# granularities
self._GRANULARITIES = {
g.get("id"): g.get("name")
for g in requests.get(self.base_url + "spatial/granularities").json()
}
# levels
self._LEVELS = {
g.get("id"): g.get("name")
for g in requests.get(self.base_url + "spatial/levels").json()
}
# MISC --
# facets
self._FACETS = (
"all",
"badge",
"featured",
"format",
"geozone",
"granularity",
"license",
"owner",
"organization",
"reuses",
"tag",
"temporal_coverage",
)
# reuses
self._REUSES = ("none", "few", "quite", "many") | [
"def",
"get_filters_values",
"(",
"self",
")",
":",
"# DATASETS --",
"# badges",
"self",
".",
"_DST_BADGES",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"base_url",
"+",
"\"datasets/badges/\"",
")",
".",
"json",
"(",
")",
"# licences",
"self",
".",
"_DST_LICENSES",
"=",
"{",
"l",
".",
"get",
"(",
"\"id\"",
")",
":",
"l",
".",
"get",
"(",
"\"title\"",
")",
"for",
"l",
"in",
"requests",
".",
"get",
"(",
"self",
".",
"base_url",
"+",
"\"datasets/licenses\"",
")",
".",
"json",
"(",
")",
"}",
"# frequencies",
"self",
".",
"_DST_FREQUENCIES",
"=",
"{",
"f",
".",
"get",
"(",
"\"id\"",
")",
":",
"f",
".",
"get",
"(",
"\"label\"",
")",
"for",
"f",
"in",
"requests",
".",
"get",
"(",
"self",
".",
"base_url",
"+",
"\"datasets/frequencies\"",
")",
".",
"json",
"(",
")",
"}",
"# ORGANIZATIONS --",
"# badges",
"self",
".",
"_ORG_BADGES",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"base_url",
"+",
"\"organizations/badges/\"",
")",
".",
"json",
"(",
")",
"# # licences",
"# self._DST_LICENSES = {l.get(\"id\"): l.get(\"title\")",
"# for l in requests.get(self.base_url + \"datasets/licenses\").json()}",
"# # frequencies",
"# self._DST_FREQUENCIES = {f.get(\"id\"): f.get(\"label\")",
"# for f in requests.get(self.base_url + \"datasets/frequencies\").json()}",
"# SPATIAL --",
"# granularities",
"self",
".",
"_GRANULARITIES",
"=",
"{",
"g",
".",
"get",
"(",
"\"id\"",
")",
":",
"g",
".",
"get",
"(",
"\"name\"",
")",
"for",
"g",
"in",
"requests",
".",
"get",
"(",
"self",
".",
"base_url",
"+",
"\"spatial/granularities\"",
")",
".",
"json",
"(",
")",
"}",
"# levels",
"self",
".",
"_LEVELS",
"=",
"{",
"g",
".",
"get",
"(",
"\"id\"",
")",
":",
"g",
".",
"get",
"(",
"\"name\"",
")",
"for",
"g",
"in",
"requests",
".",
"get",
"(",
"self",
".",
"base_url",
"+",
"\"spatial/levels\"",
")",
".",
"json",
"(",
")",
"}",
"# MISC --",
"# facets",
"self",
".",
"_FACETS",
"=",
"(",
"\"all\"",
",",
"\"badge\"",
",",
"\"featured\"",
",",
"\"format\"",
",",
"\"geozone\"",
",",
"\"granularity\"",
",",
"\"license\"",
",",
"\"owner\"",
",",
"\"organization\"",
",",
"\"reuses\"",
",",
"\"tag\"",
",",
"\"temporal_coverage\"",
",",
")",
"# reuses",
"self",
".",
"_REUSES",
"=",
"(",
"\"none\"",
",",
"\"few\"",
",",
"\"quite\"",
",",
"\"many\"",
")"
] | Get different filters values as dicts. | [
"Get",
"different",
"filters",
"values",
"as",
"dicts",
"."
] | train | https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/samples/udata_client_datagouvfr.py#L115-L167 |
novopl/peltak | src/peltak_appengine/logic.py | deploy | def deploy(app_id, version, promote, quiet):
# type: (str, str, bool, bool) -> None
""" Deploy the app to AppEngine.
Args:
app_id (str):
AppEngine App ID. Overrides config value app_id if given.
version (str):
AppEngine project version. Overrides config values if given.
promote (bool):
If set to **True** promote the current remote app version to the one
that's being deployed.
quiet (bool):
If set to **True** this will pass the ``--quiet`` flag to gcloud
command.
"""
gae_app = GaeApp.for_branch(git.current_branch().name)
if gae_app is None and None in (app_id, version):
msg = (
"Can't find an AppEngine app setup for branch <35>{}<32> and"
"--project and --version were not given."
)
log.err(msg, git.current_branch().name)
sys.exit(1)
if version is not None:
gae_app.version = version
if app_id is not None:
gae_app.app_id = app_id
gae_app.deploy(promote, quiet) | python | def deploy(app_id, version, promote, quiet):
# type: (str, str, bool, bool) -> None
""" Deploy the app to AppEngine.
Args:
app_id (str):
AppEngine App ID. Overrides config value app_id if given.
version (str):
AppEngine project version. Overrides config values if given.
promote (bool):
If set to **True** promote the current remote app version to the one
that's being deployed.
quiet (bool):
If set to **True** this will pass the ``--quiet`` flag to gcloud
command.
"""
gae_app = GaeApp.for_branch(git.current_branch().name)
if gae_app is None and None in (app_id, version):
msg = (
"Can't find an AppEngine app setup for branch <35>{}<32> and"
"--project and --version were not given."
)
log.err(msg, git.current_branch().name)
sys.exit(1)
if version is not None:
gae_app.version = version
if app_id is not None:
gae_app.app_id = app_id
gae_app.deploy(promote, quiet) | [
"def",
"deploy",
"(",
"app_id",
",",
"version",
",",
"promote",
",",
"quiet",
")",
":",
"# type: (str, str, bool, bool) -> None",
"gae_app",
"=",
"GaeApp",
".",
"for_branch",
"(",
"git",
".",
"current_branch",
"(",
")",
".",
"name",
")",
"if",
"gae_app",
"is",
"None",
"and",
"None",
"in",
"(",
"app_id",
",",
"version",
")",
":",
"msg",
"=",
"(",
"\"Can't find an AppEngine app setup for branch <35>{}<32> and\"",
"\"--project and --version were not given.\"",
")",
"log",
".",
"err",
"(",
"msg",
",",
"git",
".",
"current_branch",
"(",
")",
".",
"name",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"version",
"is",
"not",
"None",
":",
"gae_app",
".",
"version",
"=",
"version",
"if",
"app_id",
"is",
"not",
"None",
":",
"gae_app",
".",
"app_id",
"=",
"app_id",
"gae_app",
".",
"deploy",
"(",
"promote",
",",
"quiet",
")"
] | Deploy the app to AppEngine.
Args:
app_id (str):
AppEngine App ID. Overrides config value app_id if given.
version (str):
AppEngine project version. Overrides config values if given.
promote (bool):
If set to **True** promote the current remote app version to the one
that's being deployed.
quiet (bool):
If set to **True** this will pass the ``--quiet`` flag to gcloud
command. | [
"Deploy",
"the",
"app",
"to",
"AppEngine",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak_appengine/logic.py#L41-L73 |
novopl/peltak | src/peltak_appengine/logic.py | devserver | def devserver(port, admin_port, clear):
# type: (int, int, bool) -> None
""" Run devserver.
Args:
port (int):
Port on which the app will be served.
admin_port (int):
Port on which the admin interface is served.
clear (bool):
If set to **True**, clear the datastore on startup.
"""
admin_port = admin_port or (port + 1)
args = [
'--port={}'.format(port),
'--admin_port={}'.format(admin_port)
]
if clear:
args += ['--clear_datastore=yes']
with conf.within_proj_dir():
shell.run('dev_appserver.py . {args}'.format(args=' '.join(args))) | python | def devserver(port, admin_port, clear):
# type: (int, int, bool) -> None
""" Run devserver.
Args:
port (int):
Port on which the app will be served.
admin_port (int):
Port on which the admin interface is served.
clear (bool):
If set to **True**, clear the datastore on startup.
"""
admin_port = admin_port or (port + 1)
args = [
'--port={}'.format(port),
'--admin_port={}'.format(admin_port)
]
if clear:
args += ['--clear_datastore=yes']
with conf.within_proj_dir():
shell.run('dev_appserver.py . {args}'.format(args=' '.join(args))) | [
"def",
"devserver",
"(",
"port",
",",
"admin_port",
",",
"clear",
")",
":",
"# type: (int, int, bool) -> None",
"admin_port",
"=",
"admin_port",
"or",
"(",
"port",
"+",
"1",
")",
"args",
"=",
"[",
"'--port={}'",
".",
"format",
"(",
"port",
")",
",",
"'--admin_port={}'",
".",
"format",
"(",
"admin_port",
")",
"]",
"if",
"clear",
":",
"args",
"+=",
"[",
"'--clear_datastore=yes'",
"]",
"with",
"conf",
".",
"within_proj_dir",
"(",
")",
":",
"shell",
".",
"run",
"(",
"'dev_appserver.py . {args}'",
".",
"format",
"(",
"args",
"=",
"' '",
".",
"join",
"(",
"args",
")",
")",
")"
] | Run devserver.
Args:
port (int):
Port on which the app will be served.
admin_port (int):
Port on which the admin interface is served.
clear (bool):
If set to **True**, clear the datastore on startup. | [
"Run",
"devserver",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak_appengine/logic.py#L77-L100 |
novopl/peltak | src/peltak_appengine/logic.py | setup_ci | def setup_ci():
# type: () -> None
""" Setup AppEngine SDK on CircleCI """
gcloud_path = shell.run('which gcloud', capture=True).stdout.strip()
sdk_path = normpath(join(gcloud_path, '../../platform/google_appengine'))
gcloud_cmd = gcloud_path + ' --quiet'
if not exists(sdk_path):
log.info("Installing AppEngine SDK")
shell.run('sudo {} components install app-engine-python'.format(
gcloud_cmd
))
else:
# Only initialise once. To reinitialise, just build without cache.
log.info("AppEngine SDK already initialised")
log.info("Using service account authentication")
shell.run('{} auth activate-service-account --key-file {}'.format(
gcloud_cmd,
conf.proj_path('ops/client_secret.json')
)) | python | def setup_ci():
# type: () -> None
""" Setup AppEngine SDK on CircleCI """
gcloud_path = shell.run('which gcloud', capture=True).stdout.strip()
sdk_path = normpath(join(gcloud_path, '../../platform/google_appengine'))
gcloud_cmd = gcloud_path + ' --quiet'
if not exists(sdk_path):
log.info("Installing AppEngine SDK")
shell.run('sudo {} components install app-engine-python'.format(
gcloud_cmd
))
else:
# Only initialise once. To reinitialise, just build without cache.
log.info("AppEngine SDK already initialised")
log.info("Using service account authentication")
shell.run('{} auth activate-service-account --key-file {}'.format(
gcloud_cmd,
conf.proj_path('ops/client_secret.json')
)) | [
"def",
"setup_ci",
"(",
")",
":",
"# type: () -> None",
"gcloud_path",
"=",
"shell",
".",
"run",
"(",
"'which gcloud'",
",",
"capture",
"=",
"True",
")",
".",
"stdout",
".",
"strip",
"(",
")",
"sdk_path",
"=",
"normpath",
"(",
"join",
"(",
"gcloud_path",
",",
"'../../platform/google_appengine'",
")",
")",
"gcloud_cmd",
"=",
"gcloud_path",
"+",
"' --quiet'",
"if",
"not",
"exists",
"(",
"sdk_path",
")",
":",
"log",
".",
"info",
"(",
"\"Installing AppEngine SDK\"",
")",
"shell",
".",
"run",
"(",
"'sudo {} components install app-engine-python'",
".",
"format",
"(",
"gcloud_cmd",
")",
")",
"else",
":",
"# Only initialise once. To reinitialise, just build without cache.",
"log",
".",
"info",
"(",
"\"AppEngine SDK already initialised\"",
")",
"log",
".",
"info",
"(",
"\"Using service account authentication\"",
")",
"shell",
".",
"run",
"(",
"'{} auth activate-service-account --key-file {}'",
".",
"format",
"(",
"gcloud_cmd",
",",
"conf",
".",
"proj_path",
"(",
"'ops/client_secret.json'",
")",
")",
")"
] | Setup AppEngine SDK on CircleCI | [
"Setup",
"AppEngine",
"SDK",
"on",
"CircleCI"
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak_appengine/logic.py#L104-L124 |
novopl/peltak | src/peltak/core/util.py | mark_experimental | def mark_experimental(fn):
# type: (FunctionType) -> FunctionType
""" Mark function as experimental.
Args:
fn (FunctionType):
The command function to decorate.
"""
@wraps(fn)
def wrapper(*args, **kw): # pylint: disable=missing-docstring
from peltak.core import shell
if shell.is_tty:
warnings.warn("This command is has experimental status. The "
"interface is not yet stable and might change "
"without notice within with a patch version update. "
"Use at your own risk")
return fn(*args, **kw)
return wrapper | python | def mark_experimental(fn):
# type: (FunctionType) -> FunctionType
""" Mark function as experimental.
Args:
fn (FunctionType):
The command function to decorate.
"""
@wraps(fn)
def wrapper(*args, **kw): # pylint: disable=missing-docstring
from peltak.core import shell
if shell.is_tty:
warnings.warn("This command is has experimental status. The "
"interface is not yet stable and might change "
"without notice within with a patch version update. "
"Use at your own risk")
return fn(*args, **kw)
return wrapper | [
"def",
"mark_experimental",
"(",
"fn",
")",
":",
"# type: (FunctionType) -> FunctionType",
"@",
"wraps",
"(",
"fn",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"# pylint: disable=missing-docstring",
"from",
"peltak",
".",
"core",
"import",
"shell",
"if",
"shell",
".",
"is_tty",
":",
"warnings",
".",
"warn",
"(",
"\"This command is has experimental status. The \"",
"\"interface is not yet stable and might change \"",
"\"without notice within with a patch version update. \"",
"\"Use at your own risk\"",
")",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"wrapper"
] | Mark function as experimental.
Args:
fn (FunctionType):
The command function to decorate. | [
"Mark",
"function",
"as",
"experimental",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/core/util.py#L74-L93 |
novopl/peltak | src/peltak/core/util.py | mark_deprecated | def mark_deprecated(replaced_by):
# type: (Text) -> FunctionType
""" Mark command as deprecated.
Args:
replaced_by (str):
The command that deprecated this command and should be used instead.
"""
def decorator(fn): # pylint: disable=missing-docstring
@wraps(fn)
def wrapper(*args, **kw): # pylint: disable=missing-docstring
from peltak.core import shell
if shell.is_tty:
warnings.warn("This command is has been deprecated. Please use "
"{new} instead.".format(new=replaced_by))
return fn(*args, **kw)
return wrapper
return decorator | python | def mark_deprecated(replaced_by):
# type: (Text) -> FunctionType
""" Mark command as deprecated.
Args:
replaced_by (str):
The command that deprecated this command and should be used instead.
"""
def decorator(fn): # pylint: disable=missing-docstring
@wraps(fn)
def wrapper(*args, **kw): # pylint: disable=missing-docstring
from peltak.core import shell
if shell.is_tty:
warnings.warn("This command is has been deprecated. Please use "
"{new} instead.".format(new=replaced_by))
return fn(*args, **kw)
return wrapper
return decorator | [
"def",
"mark_deprecated",
"(",
"replaced_by",
")",
":",
"# type: (Text) -> FunctionType",
"def",
"decorator",
"(",
"fn",
")",
":",
"# pylint: disable=missing-docstring",
"@",
"wraps",
"(",
"fn",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"# pylint: disable=missing-docstring",
"from",
"peltak",
".",
"core",
"import",
"shell",
"if",
"shell",
".",
"is_tty",
":",
"warnings",
".",
"warn",
"(",
"\"This command is has been deprecated. Please use \"",
"\"{new} instead.\"",
".",
"format",
"(",
"new",
"=",
"replaced_by",
")",
")",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"wrapper",
"return",
"decorator"
] | Mark command as deprecated.
Args:
replaced_by (str):
The command that deprecated this command and should be used instead. | [
"Mark",
"command",
"as",
"deprecated",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/core/util.py#L96-L117 |
novopl/peltak | src/peltak/core/util.py | in_batches | def in_batches(iterable, batch_size):
# type: (Iterable[Any]) -> Generator[List[Any]]
""" Split the given iterable into batches.
Args:
iterable (Iterable[Any]):
The iterable you want to split into batches.
batch_size (int):
The size of each bach. The last batch will be probably smaller (if
the number of elements cannot be equally divided.
Returns:
Generator[list[Any]]: Will yield all items in batches of **batch_size**
size.
Example:
>>> from peltak.core import util
>>>
>>> batches = util.in_batches([1, 2, 3, 4, 5, 6, 7], 3)
>>> batches = list(batches) # so we can query for lenght
>>> len(batches)
3
>>> batches
[[1, 2, 3], [4, 5, 6], [7]]
"""
items = list(iterable)
size = len(items)
for i in range(0, size, batch_size):
yield items[i:min(i + batch_size, size)] | python | def in_batches(iterable, batch_size):
# type: (Iterable[Any]) -> Generator[List[Any]]
""" Split the given iterable into batches.
Args:
iterable (Iterable[Any]):
The iterable you want to split into batches.
batch_size (int):
The size of each bach. The last batch will be probably smaller (if
the number of elements cannot be equally divided.
Returns:
Generator[list[Any]]: Will yield all items in batches of **batch_size**
size.
Example:
>>> from peltak.core import util
>>>
>>> batches = util.in_batches([1, 2, 3, 4, 5, 6, 7], 3)
>>> batches = list(batches) # so we can query for lenght
>>> len(batches)
3
>>> batches
[[1, 2, 3], [4, 5, 6], [7]]
"""
items = list(iterable)
size = len(items)
for i in range(0, size, batch_size):
yield items[i:min(i + batch_size, size)] | [
"def",
"in_batches",
"(",
"iterable",
",",
"batch_size",
")",
":",
"# type: (Iterable[Any]) -> Generator[List[Any]]",
"items",
"=",
"list",
"(",
"iterable",
")",
"size",
"=",
"len",
"(",
"items",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"size",
",",
"batch_size",
")",
":",
"yield",
"items",
"[",
"i",
":",
"min",
"(",
"i",
"+",
"batch_size",
",",
"size",
")",
"]"
] | Split the given iterable into batches.
Args:
iterable (Iterable[Any]):
The iterable you want to split into batches.
batch_size (int):
The size of each bach. The last batch will be probably smaller (if
the number of elements cannot be equally divided.
Returns:
Generator[list[Any]]: Will yield all items in batches of **batch_size**
size.
Example:
>>> from peltak.core import util
>>>
>>> batches = util.in_batches([1, 2, 3, 4, 5, 6, 7], 3)
>>> batches = list(batches) # so we can query for lenght
>>> len(batches)
3
>>> batches
[[1, 2, 3], [4, 5, 6], [7]] | [
"Split",
"the",
"given",
"iterable",
"into",
"batches",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/core/util.py#L186-L217 |
novopl/peltak | src/peltak/core/util.py | yaml_dump | def yaml_dump(data, stream=None):
# type: (YamlData, Optional[TextIO]) -> Text
""" Dump data to a YAML string/file.
Args:
data (YamlData):
The data to serialize as YAML.
stream (TextIO):
The file-like object to save to. If given, this function will write
the resulting YAML to that stream.
Returns:
str: The YAML string.
"""
return yaml.dump(
data,
stream=stream,
Dumper=Dumper,
default_flow_style=False
) | python | def yaml_dump(data, stream=None):
# type: (YamlData, Optional[TextIO]) -> Text
""" Dump data to a YAML string/file.
Args:
data (YamlData):
The data to serialize as YAML.
stream (TextIO):
The file-like object to save to. If given, this function will write
the resulting YAML to that stream.
Returns:
str: The YAML string.
"""
return yaml.dump(
data,
stream=stream,
Dumper=Dumper,
default_flow_style=False
) | [
"def",
"yaml_dump",
"(",
"data",
",",
"stream",
"=",
"None",
")",
":",
"# type: (YamlData, Optional[TextIO]) -> Text",
"return",
"yaml",
".",
"dump",
"(",
"data",
",",
"stream",
"=",
"stream",
",",
"Dumper",
"=",
"Dumper",
",",
"default_flow_style",
"=",
"False",
")"
] | Dump data to a YAML string/file.
Args:
data (YamlData):
The data to serialize as YAML.
stream (TextIO):
The file-like object to save to. If given, this function will write
the resulting YAML to that stream.
Returns:
str: The YAML string. | [
"Dump",
"data",
"to",
"a",
"YAML",
"string",
"/",
"file",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/core/util.py#L233-L252 |
novopl/peltak | src/peltak/core/util.py | cached_result.clear | def clear(cls, fn):
# type: (FunctionType) -> None
""" Clear result cache on the given function.
If the function has no cached result, this call will do nothing.
Args:
fn (FunctionType):
The function whose cache should be cleared.
"""
if hasattr(fn, cls.CACHE_VAR):
delattr(fn, cls.CACHE_VAR) | python | def clear(cls, fn):
# type: (FunctionType) -> None
""" Clear result cache on the given function.
If the function has no cached result, this call will do nothing.
Args:
fn (FunctionType):
The function whose cache should be cleared.
"""
if hasattr(fn, cls.CACHE_VAR):
delattr(fn, cls.CACHE_VAR) | [
"def",
"clear",
"(",
"cls",
",",
"fn",
")",
":",
"# type: (FunctionType) -> None",
"if",
"hasattr",
"(",
"fn",
",",
"cls",
".",
"CACHE_VAR",
")",
":",
"delattr",
"(",
"fn",
",",
"cls",
".",
"CACHE_VAR",
")"
] | Clear result cache on the given function.
If the function has no cached result, this call will do nothing.
Args:
fn (FunctionType):
The function whose cache should be cleared. | [
"Clear",
"result",
"cache",
"on",
"the",
"given",
"function",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/core/util.py#L172-L183 |
novopl/peltak | src/peltak/extra/gitflow/logic/task.py | start | def start(name):
# type: (str) -> None
""" Start working on a new feature by branching off develop.
This will create a new branch off develop called feature/<name>.
Args:
name (str):
The name of the new feature.
"""
branch = git.current_branch(refresh=True)
task_branch = 'task/' + common.to_branch_name(name)
if branch.type not in ('feature', 'hotfix'):
log.err("Task branches can only branch off <33>feature<32> or "
"<33>hotfix<32> branches")
sys.exit(1)
common.git_checkout(task_branch, create=True) | python | def start(name):
# type: (str) -> None
""" Start working on a new feature by branching off develop.
This will create a new branch off develop called feature/<name>.
Args:
name (str):
The name of the new feature.
"""
branch = git.current_branch(refresh=True)
task_branch = 'task/' + common.to_branch_name(name)
if branch.type not in ('feature', 'hotfix'):
log.err("Task branches can only branch off <33>feature<32> or "
"<33>hotfix<32> branches")
sys.exit(1)
common.git_checkout(task_branch, create=True) | [
"def",
"start",
"(",
"name",
")",
":",
"# type: (str) -> None",
"branch",
"=",
"git",
".",
"current_branch",
"(",
"refresh",
"=",
"True",
")",
"task_branch",
"=",
"'task/'",
"+",
"common",
".",
"to_branch_name",
"(",
"name",
")",
"if",
"branch",
".",
"type",
"not",
"in",
"(",
"'feature'",
",",
"'hotfix'",
")",
":",
"log",
".",
"err",
"(",
"\"Task branches can only branch off <33>feature<32> or \"",
"\"<33>hotfix<32> branches\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"common",
".",
"git_checkout",
"(",
"task_branch",
",",
"create",
"=",
"True",
")"
] | Start working on a new feature by branching off develop.
This will create a new branch off develop called feature/<name>.
Args:
name (str):
The name of the new feature. | [
"Start",
"working",
"on",
"a",
"new",
"feature",
"by",
"branching",
"off",
"develop",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/extra/gitflow/logic/task.py#L33-L51 |
novopl/peltak | src/peltak/extra/gitflow/logic/task.py | update | def update():
# type: () -> None
""" Update the feature with updates committed to develop.
This will merge current develop into the current branch.
"""
branch = git.current_branch(refresh=True)
base_branch = common.get_base_branch()
common.assert_branch_type('task')
common.git_checkout(base_branch)
common.git_pull(base_branch)
common.git_checkout(branch.name)
common.git_merge(branch.name, base_branch) | python | def update():
# type: () -> None
""" Update the feature with updates committed to develop.
This will merge current develop into the current branch.
"""
branch = git.current_branch(refresh=True)
base_branch = common.get_base_branch()
common.assert_branch_type('task')
common.git_checkout(base_branch)
common.git_pull(base_branch)
common.git_checkout(branch.name)
common.git_merge(branch.name, base_branch) | [
"def",
"update",
"(",
")",
":",
"# type: () -> None",
"branch",
"=",
"git",
".",
"current_branch",
"(",
"refresh",
"=",
"True",
")",
"base_branch",
"=",
"common",
".",
"get_base_branch",
"(",
")",
"common",
".",
"assert_branch_type",
"(",
"'task'",
")",
"common",
".",
"git_checkout",
"(",
"base_branch",
")",
"common",
".",
"git_pull",
"(",
"base_branch",
")",
"common",
".",
"git_checkout",
"(",
"branch",
".",
"name",
")",
"common",
".",
"git_merge",
"(",
"branch",
".",
"name",
",",
"base_branch",
")"
] | Update the feature with updates committed to develop.
This will merge current develop into the current branch. | [
"Update",
"the",
"feature",
"with",
"updates",
"committed",
"to",
"develop",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/extra/gitflow/logic/task.py#L54-L67 |
novopl/peltak | src/peltak/extra/gitflow/logic/task.py | finish | def finish():
# type: () -> None
""" Merge current feature branch into develop. """
pretend = context.get('pretend', False)
if not pretend and (git.staged() or git.unstaged()):
log.err(
"You have uncommitted changes in your repo!\n"
"You need to stash them before you merge the hotfix branch"
)
sys.exit(1)
branch = git.current_branch(refresh=True)
base = common.get_base_branch()
prompt = "<32>Merge <33>{}<32> into <33>{}<0>?".format(branch.name, base)
if not click.confirm(shell.fmt(prompt)):
log.info("Cancelled")
return
common.assert_branch_type('task')
# Merge task into it's base feature branch
common.git_checkout(base)
common.git_pull(base)
common.git_merge(base, branch.name)
# Cleanup
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(base) | python | def finish():
# type: () -> None
""" Merge current feature branch into develop. """
pretend = context.get('pretend', False)
if not pretend and (git.staged() or git.unstaged()):
log.err(
"You have uncommitted changes in your repo!\n"
"You need to stash them before you merge the hotfix branch"
)
sys.exit(1)
branch = git.current_branch(refresh=True)
base = common.get_base_branch()
prompt = "<32>Merge <33>{}<32> into <33>{}<0>?".format(branch.name, base)
if not click.confirm(shell.fmt(prompt)):
log.info("Cancelled")
return
common.assert_branch_type('task')
# Merge task into it's base feature branch
common.git_checkout(base)
common.git_pull(base)
common.git_merge(base, branch.name)
# Cleanup
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(base) | [
"def",
"finish",
"(",
")",
":",
"# type: () -> None",
"pretend",
"=",
"context",
".",
"get",
"(",
"'pretend'",
",",
"False",
")",
"if",
"not",
"pretend",
"and",
"(",
"git",
".",
"staged",
"(",
")",
"or",
"git",
".",
"unstaged",
"(",
")",
")",
":",
"log",
".",
"err",
"(",
"\"You have uncommitted changes in your repo!\\n\"",
"\"You need to stash them before you merge the hotfix branch\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"branch",
"=",
"git",
".",
"current_branch",
"(",
"refresh",
"=",
"True",
")",
"base",
"=",
"common",
".",
"get_base_branch",
"(",
")",
"prompt",
"=",
"\"<32>Merge <33>{}<32> into <33>{}<0>?\"",
".",
"format",
"(",
"branch",
".",
"name",
",",
"base",
")",
"if",
"not",
"click",
".",
"confirm",
"(",
"shell",
".",
"fmt",
"(",
"prompt",
")",
")",
":",
"log",
".",
"info",
"(",
"\"Cancelled\"",
")",
"return",
"common",
".",
"assert_branch_type",
"(",
"'task'",
")",
"# Merge task into it's base feature branch",
"common",
".",
"git_checkout",
"(",
"base",
")",
"common",
".",
"git_pull",
"(",
"base",
")",
"common",
".",
"git_merge",
"(",
"base",
",",
"branch",
".",
"name",
")",
"# Cleanup",
"common",
".",
"git_branch_delete",
"(",
"branch",
".",
"name",
")",
"common",
".",
"git_prune",
"(",
")",
"common",
".",
"git_checkout",
"(",
"base",
")"
] | Merge current feature branch into develop. | [
"Merge",
"current",
"feature",
"branch",
"into",
"develop",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/extra/gitflow/logic/task.py#L83-L114 |
novopl/peltak | src/peltak/extra/gitflow/logic/task.py | merged | def merged():
# type: () -> None
""" Cleanup a remotely merged branch. """
base_branch = common.get_base_branch()
branch = git.current_branch(refresh=True)
common.assert_branch_type('task')
# Pull feature branch with the merged task
common.git_checkout(base_branch)
common.git_pull(base_branch)
# Cleanup
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(base_branch) | python | def merged():
# type: () -> None
""" Cleanup a remotely merged branch. """
base_branch = common.get_base_branch()
branch = git.current_branch(refresh=True)
common.assert_branch_type('task')
# Pull feature branch with the merged task
common.git_checkout(base_branch)
common.git_pull(base_branch)
# Cleanup
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(base_branch) | [
"def",
"merged",
"(",
")",
":",
"# type: () -> None",
"base_branch",
"=",
"common",
".",
"get_base_branch",
"(",
")",
"branch",
"=",
"git",
".",
"current_branch",
"(",
"refresh",
"=",
"True",
")",
"common",
".",
"assert_branch_type",
"(",
"'task'",
")",
"# Pull feature branch with the merged task",
"common",
".",
"git_checkout",
"(",
"base_branch",
")",
"common",
".",
"git_pull",
"(",
"base_branch",
")",
"# Cleanup",
"common",
".",
"git_branch_delete",
"(",
"branch",
".",
"name",
")",
"common",
".",
"git_prune",
"(",
")",
"common",
".",
"git_checkout",
"(",
"base_branch",
")"
] | Cleanup a remotely merged branch. | [
"Cleanup",
"a",
"remotely",
"merged",
"branch",
"."
] | train | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/extra/gitflow/logic/task.py#L117-L133 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | mutagen_call | def mutagen_call(action, path, func, *args, **kwargs):
"""Call a Mutagen function with appropriate error handling.
`action` is a string describing what the function is trying to do,
and `path` is the relevant filename. The rest of the arguments
describe the callable to invoke.
We require at least Mutagen 1.33, where `IOError` is *never* used,
neither for internal parsing errors *nor* for ordinary IO error
conditions such as a bad filename. Mutagen-specific parsing errors and IO
errors are reraised as `UnreadableFileError`. Other exceptions
raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`.
"""
try:
return func(*args, **kwargs)
except mutagen.MutagenError as exc:
log.debug(u'%s failed: %s', action, six.text_type(exc))
raise UnreadableFileError(path, six.text_type(exc))
except Exception as exc:
# Isolate bugs in Mutagen.
log.debug(u'%s', traceback.format_exc())
log.error(u'uncaught Mutagen exception in %s: %s', action, exc)
raise MutagenError(path, exc) | python | def mutagen_call(action, path, func, *args, **kwargs):
"""Call a Mutagen function with appropriate error handling.
`action` is a string describing what the function is trying to do,
and `path` is the relevant filename. The rest of the arguments
describe the callable to invoke.
We require at least Mutagen 1.33, where `IOError` is *never* used,
neither for internal parsing errors *nor* for ordinary IO error
conditions such as a bad filename. Mutagen-specific parsing errors and IO
errors are reraised as `UnreadableFileError`. Other exceptions
raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`.
"""
try:
return func(*args, **kwargs)
except mutagen.MutagenError as exc:
log.debug(u'%s failed: %s', action, six.text_type(exc))
raise UnreadableFileError(path, six.text_type(exc))
except Exception as exc:
# Isolate bugs in Mutagen.
log.debug(u'%s', traceback.format_exc())
log.error(u'uncaught Mutagen exception in %s: %s', action, exc)
raise MutagenError(path, exc) | [
"def",
"mutagen_call",
"(",
"action",
",",
"path",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"mutagen",
".",
"MutagenError",
"as",
"exc",
":",
"log",
".",
"debug",
"(",
"u'%s failed: %s'",
",",
"action",
",",
"six",
".",
"text_type",
"(",
"exc",
")",
")",
"raise",
"UnreadableFileError",
"(",
"path",
",",
"six",
".",
"text_type",
"(",
"exc",
")",
")",
"except",
"Exception",
"as",
"exc",
":",
"# Isolate bugs in Mutagen.",
"log",
".",
"debug",
"(",
"u'%s'",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
"log",
".",
"error",
"(",
"u'uncaught Mutagen exception in %s: %s'",
",",
"action",
",",
"exc",
")",
"raise",
"MutagenError",
"(",
"path",
",",
"exc",
")"
] | Call a Mutagen function with appropriate error handling.
`action` is a string describing what the function is trying to do,
and `path` is the relevant filename. The rest of the arguments
describe the callable to invoke.
We require at least Mutagen 1.33, where `IOError` is *never* used,
neither for internal parsing errors *nor* for ordinary IO error
conditions such as a bad filename. Mutagen-specific parsing errors and IO
errors are reraised as `UnreadableFileError`. Other exceptions
raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`. | [
"Call",
"a",
"Mutagen",
"function",
"with",
"appropriate",
"error",
"handling",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L117-L139 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | _safe_cast | def _safe_cast(out_type, val):
"""Try to covert val to out_type but never raise an exception. If
the value can't be converted, then a sensible default value is
returned. out_type should be bool, int, or unicode; otherwise, the
value is just passed through.
"""
if val is None:
return None
if out_type == int:
if isinstance(val, int) or isinstance(val, float):
# Just a number.
return int(val)
else:
# Process any other type as a string.
if isinstance(val, bytes):
val = val.decode('utf-8', 'ignore')
elif not isinstance(val, six.string_types):
val = six.text_type(val)
# Get a number from the front of the string.
match = re.match(r'[\+-]?[0-9]+', val.strip())
return int(match.group(0)) if match else 0
elif out_type == bool:
try:
# Should work for strings, bools, ints:
return bool(int(val))
except ValueError:
return False
elif out_type == six.text_type:
if isinstance(val, bytes):
return val.decode('utf-8', 'ignore')
elif isinstance(val, six.text_type):
return val
else:
return six.text_type(val)
elif out_type == float:
if isinstance(val, int) or isinstance(val, float):
return float(val)
else:
if isinstance(val, bytes):
val = val.decode('utf-8', 'ignore')
else:
val = six.text_type(val)
match = re.match(r'[\+-]?([0-9]+\.?[0-9]*|[0-9]*\.[0-9]+)',
val.strip())
if match:
val = match.group(0)
if val:
return float(val)
return 0.0
else:
return val | python | def _safe_cast(out_type, val):
"""Try to covert val to out_type but never raise an exception. If
the value can't be converted, then a sensible default value is
returned. out_type should be bool, int, or unicode; otherwise, the
value is just passed through.
"""
if val is None:
return None
if out_type == int:
if isinstance(val, int) or isinstance(val, float):
# Just a number.
return int(val)
else:
# Process any other type as a string.
if isinstance(val, bytes):
val = val.decode('utf-8', 'ignore')
elif not isinstance(val, six.string_types):
val = six.text_type(val)
# Get a number from the front of the string.
match = re.match(r'[\+-]?[0-9]+', val.strip())
return int(match.group(0)) if match else 0
elif out_type == bool:
try:
# Should work for strings, bools, ints:
return bool(int(val))
except ValueError:
return False
elif out_type == six.text_type:
if isinstance(val, bytes):
return val.decode('utf-8', 'ignore')
elif isinstance(val, six.text_type):
return val
else:
return six.text_type(val)
elif out_type == float:
if isinstance(val, int) or isinstance(val, float):
return float(val)
else:
if isinstance(val, bytes):
val = val.decode('utf-8', 'ignore')
else:
val = six.text_type(val)
match = re.match(r'[\+-]?([0-9]+\.?[0-9]*|[0-9]*\.[0-9]+)',
val.strip())
if match:
val = match.group(0)
if val:
return float(val)
return 0.0
else:
return val | [
"def",
"_safe_cast",
"(",
"out_type",
",",
"val",
")",
":",
"if",
"val",
"is",
"None",
":",
"return",
"None",
"if",
"out_type",
"==",
"int",
":",
"if",
"isinstance",
"(",
"val",
",",
"int",
")",
"or",
"isinstance",
"(",
"val",
",",
"float",
")",
":",
"# Just a number.",
"return",
"int",
"(",
"val",
")",
"else",
":",
"# Process any other type as a string.",
"if",
"isinstance",
"(",
"val",
",",
"bytes",
")",
":",
"val",
"=",
"val",
".",
"decode",
"(",
"'utf-8'",
",",
"'ignore'",
")",
"elif",
"not",
"isinstance",
"(",
"val",
",",
"six",
".",
"string_types",
")",
":",
"val",
"=",
"six",
".",
"text_type",
"(",
"val",
")",
"# Get a number from the front of the string.",
"match",
"=",
"re",
".",
"match",
"(",
"r'[\\+-]?[0-9]+'",
",",
"val",
".",
"strip",
"(",
")",
")",
"return",
"int",
"(",
"match",
".",
"group",
"(",
"0",
")",
")",
"if",
"match",
"else",
"0",
"elif",
"out_type",
"==",
"bool",
":",
"try",
":",
"# Should work for strings, bools, ints:",
"return",
"bool",
"(",
"int",
"(",
"val",
")",
")",
"except",
"ValueError",
":",
"return",
"False",
"elif",
"out_type",
"==",
"six",
".",
"text_type",
":",
"if",
"isinstance",
"(",
"val",
",",
"bytes",
")",
":",
"return",
"val",
".",
"decode",
"(",
"'utf-8'",
",",
"'ignore'",
")",
"elif",
"isinstance",
"(",
"val",
",",
"six",
".",
"text_type",
")",
":",
"return",
"val",
"else",
":",
"return",
"six",
".",
"text_type",
"(",
"val",
")",
"elif",
"out_type",
"==",
"float",
":",
"if",
"isinstance",
"(",
"val",
",",
"int",
")",
"or",
"isinstance",
"(",
"val",
",",
"float",
")",
":",
"return",
"float",
"(",
"val",
")",
"else",
":",
"if",
"isinstance",
"(",
"val",
",",
"bytes",
")",
":",
"val",
"=",
"val",
".",
"decode",
"(",
"'utf-8'",
",",
"'ignore'",
")",
"else",
":",
"val",
"=",
"six",
".",
"text_type",
"(",
"val",
")",
"match",
"=",
"re",
".",
"match",
"(",
"r'[\\+-]?([0-9]+\\.?[0-9]*|[0-9]*\\.[0-9]+)'",
",",
"val",
".",
"strip",
"(",
")",
")",
"if",
"match",
":",
"val",
"=",
"match",
".",
"group",
"(",
"0",
")",
"if",
"val",
":",
"return",
"float",
"(",
"val",
")",
"return",
"0.0",
"else",
":",
"return",
"val"
] | Try to covert val to out_type but never raise an exception. If
the value can't be converted, then a sensible default value is
returned. out_type should be bool, int, or unicode; otherwise, the
value is just passed through. | [
"Try",
"to",
"covert",
"val",
"to",
"out_type",
"but",
"never",
"raise",
"an",
"exception",
".",
"If",
"the",
"value",
"can",
"t",
"be",
"converted",
"then",
"a",
"sensible",
"default",
"value",
"is",
"returned",
".",
"out_type",
"should",
"be",
"bool",
"int",
"or",
"unicode",
";",
"otherwise",
"the",
"value",
"is",
"just",
"passed",
"through",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L144-L199 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | _unpack_asf_image | def _unpack_asf_image(data):
"""Unpack image data from a WM/Picture tag. Return a tuple
containing the MIME type, the raw image data, a type indicator, and
the image's description.
This function is treated as "untrusted" and could throw all manner
of exceptions (out-of-bounds, etc.). We should clean this up
sometime so that the failure modes are well-defined.
"""
type, size = struct.unpack_from('<bi', data)
pos = 5
mime = b''
while data[pos:pos + 2] != b'\x00\x00':
mime += data[pos:pos + 2]
pos += 2
pos += 2
description = b''
while data[pos:pos + 2] != b'\x00\x00':
description += data[pos:pos + 2]
pos += 2
pos += 2
image_data = data[pos:pos + size]
return (mime.decode("utf-16-le"), image_data, type,
description.decode("utf-16-le")) | python | def _unpack_asf_image(data):
"""Unpack image data from a WM/Picture tag. Return a tuple
containing the MIME type, the raw image data, a type indicator, and
the image's description.
This function is treated as "untrusted" and could throw all manner
of exceptions (out-of-bounds, etc.). We should clean this up
sometime so that the failure modes are well-defined.
"""
type, size = struct.unpack_from('<bi', data)
pos = 5
mime = b''
while data[pos:pos + 2] != b'\x00\x00':
mime += data[pos:pos + 2]
pos += 2
pos += 2
description = b''
while data[pos:pos + 2] != b'\x00\x00':
description += data[pos:pos + 2]
pos += 2
pos += 2
image_data = data[pos:pos + size]
return (mime.decode("utf-16-le"), image_data, type,
description.decode("utf-16-le")) | [
"def",
"_unpack_asf_image",
"(",
"data",
")",
":",
"type",
",",
"size",
"=",
"struct",
".",
"unpack_from",
"(",
"'<bi'",
",",
"data",
")",
"pos",
"=",
"5",
"mime",
"=",
"b''",
"while",
"data",
"[",
"pos",
":",
"pos",
"+",
"2",
"]",
"!=",
"b'\\x00\\x00'",
":",
"mime",
"+=",
"data",
"[",
"pos",
":",
"pos",
"+",
"2",
"]",
"pos",
"+=",
"2",
"pos",
"+=",
"2",
"description",
"=",
"b''",
"while",
"data",
"[",
"pos",
":",
"pos",
"+",
"2",
"]",
"!=",
"b'\\x00\\x00'",
":",
"description",
"+=",
"data",
"[",
"pos",
":",
"pos",
"+",
"2",
"]",
"pos",
"+=",
"2",
"pos",
"+=",
"2",
"image_data",
"=",
"data",
"[",
"pos",
":",
"pos",
"+",
"size",
"]",
"return",
"(",
"mime",
".",
"decode",
"(",
"\"utf-16-le\"",
")",
",",
"image_data",
",",
"type",
",",
"description",
".",
"decode",
"(",
"\"utf-16-le\"",
")",
")"
] | Unpack image data from a WM/Picture tag. Return a tuple
containing the MIME type, the raw image data, a type indicator, and
the image's description.
This function is treated as "untrusted" and could throw all manner
of exceptions (out-of-bounds, etc.). We should clean this up
sometime so that the failure modes are well-defined. | [
"Unpack",
"image",
"data",
"from",
"a",
"WM",
"/",
"Picture",
"tag",
".",
"Return",
"a",
"tuple",
"containing",
"the",
"MIME",
"type",
"the",
"raw",
"image",
"data",
"a",
"type",
"indicator",
"and",
"the",
"image",
"s",
"description",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L204-L227 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | _pack_asf_image | def _pack_asf_image(mime, data, type=3, description=""):
"""Pack image data for a WM/Picture tag.
"""
tag_data = struct.pack('<bi', type, len(data))
tag_data += mime.encode("utf-16-le") + b'\x00\x00'
tag_data += description.encode("utf-16-le") + b'\x00\x00'
tag_data += data
return tag_data | python | def _pack_asf_image(mime, data, type=3, description=""):
"""Pack image data for a WM/Picture tag.
"""
tag_data = struct.pack('<bi', type, len(data))
tag_data += mime.encode("utf-16-le") + b'\x00\x00'
tag_data += description.encode("utf-16-le") + b'\x00\x00'
tag_data += data
return tag_data | [
"def",
"_pack_asf_image",
"(",
"mime",
",",
"data",
",",
"type",
"=",
"3",
",",
"description",
"=",
"\"\"",
")",
":",
"tag_data",
"=",
"struct",
".",
"pack",
"(",
"'<bi'",
",",
"type",
",",
"len",
"(",
"data",
")",
")",
"tag_data",
"+=",
"mime",
".",
"encode",
"(",
"\"utf-16-le\"",
")",
"+",
"b'\\x00\\x00'",
"tag_data",
"+=",
"description",
".",
"encode",
"(",
"\"utf-16-le\"",
")",
"+",
"b'\\x00\\x00'",
"tag_data",
"+=",
"data",
"return",
"tag_data"
] | Pack image data for a WM/Picture tag. | [
"Pack",
"image",
"data",
"for",
"a",
"WM",
"/",
"Picture",
"tag",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L230-L237 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | _sc_decode | def _sc_decode(soundcheck):
"""Convert a Sound Check bytestring value to a (gain, peak) tuple as
used by ReplayGain.
"""
# We decode binary data. If one of the formats gives us a text
# string, interpret it as UTF-8.
if isinstance(soundcheck, six.text_type):
soundcheck = soundcheck.encode('utf-8')
# SoundCheck tags consist of 10 numbers, each represented by 8
# characters of ASCII hex preceded by a space.
try:
soundcheck = codecs.decode(soundcheck.replace(b' ', b''), 'hex')
soundcheck = struct.unpack('!iiiiiiiiii', soundcheck)
except (struct.error, TypeError, binascii.Error):
# SoundCheck isn't in the format we expect, so return default
# values.
return 0.0, 0.0
# SoundCheck stores absolute calculated/measured RMS value in an
# unknown unit. We need to find the ratio of this measurement
# compared to a reference value of 1000 to get our gain in dB. We
# play it safe by using the larger of the two values (i.e., the most
# attenuation).
maxgain = max(soundcheck[:2])
if maxgain > 0:
gain = math.log10(maxgain / 1000.0) * -10
else:
# Invalid gain value found.
gain = 0.0
# SoundCheck stores peak values as the actual value of the sample,
# and again separately for the left and right channels. We need to
# convert this to a percentage of full scale, which is 32768 for a
# 16 bit sample. Once again, we play it safe by using the larger of
# the two values.
peak = max(soundcheck[6:8]) / 32768.0
return round(gain, 2), round(peak, 6) | python | def _sc_decode(soundcheck):
"""Convert a Sound Check bytestring value to a (gain, peak) tuple as
used by ReplayGain.
"""
# We decode binary data. If one of the formats gives us a text
# string, interpret it as UTF-8.
if isinstance(soundcheck, six.text_type):
soundcheck = soundcheck.encode('utf-8')
# SoundCheck tags consist of 10 numbers, each represented by 8
# characters of ASCII hex preceded by a space.
try:
soundcheck = codecs.decode(soundcheck.replace(b' ', b''), 'hex')
soundcheck = struct.unpack('!iiiiiiiiii', soundcheck)
except (struct.error, TypeError, binascii.Error):
# SoundCheck isn't in the format we expect, so return default
# values.
return 0.0, 0.0
# SoundCheck stores absolute calculated/measured RMS value in an
# unknown unit. We need to find the ratio of this measurement
# compared to a reference value of 1000 to get our gain in dB. We
# play it safe by using the larger of the two values (i.e., the most
# attenuation).
maxgain = max(soundcheck[:2])
if maxgain > 0:
gain = math.log10(maxgain / 1000.0) * -10
else:
# Invalid gain value found.
gain = 0.0
# SoundCheck stores peak values as the actual value of the sample,
# and again separately for the left and right channels. We need to
# convert this to a percentage of full scale, which is 32768 for a
# 16 bit sample. Once again, we play it safe by using the larger of
# the two values.
peak = max(soundcheck[6:8]) / 32768.0
return round(gain, 2), round(peak, 6) | [
"def",
"_sc_decode",
"(",
"soundcheck",
")",
":",
"# We decode binary data. If one of the formats gives us a text",
"# string, interpret it as UTF-8.",
"if",
"isinstance",
"(",
"soundcheck",
",",
"six",
".",
"text_type",
")",
":",
"soundcheck",
"=",
"soundcheck",
".",
"encode",
"(",
"'utf-8'",
")",
"# SoundCheck tags consist of 10 numbers, each represented by 8",
"# characters of ASCII hex preceded by a space.",
"try",
":",
"soundcheck",
"=",
"codecs",
".",
"decode",
"(",
"soundcheck",
".",
"replace",
"(",
"b' '",
",",
"b''",
")",
",",
"'hex'",
")",
"soundcheck",
"=",
"struct",
".",
"unpack",
"(",
"'!iiiiiiiiii'",
",",
"soundcheck",
")",
"except",
"(",
"struct",
".",
"error",
",",
"TypeError",
",",
"binascii",
".",
"Error",
")",
":",
"# SoundCheck isn't in the format we expect, so return default",
"# values.",
"return",
"0.0",
",",
"0.0",
"# SoundCheck stores absolute calculated/measured RMS value in an",
"# unknown unit. We need to find the ratio of this measurement",
"# compared to a reference value of 1000 to get our gain in dB. We",
"# play it safe by using the larger of the two values (i.e., the most",
"# attenuation).",
"maxgain",
"=",
"max",
"(",
"soundcheck",
"[",
":",
"2",
"]",
")",
"if",
"maxgain",
">",
"0",
":",
"gain",
"=",
"math",
".",
"log10",
"(",
"maxgain",
"/",
"1000.0",
")",
"*",
"-",
"10",
"else",
":",
"# Invalid gain value found.",
"gain",
"=",
"0.0",
"# SoundCheck stores peak values as the actual value of the sample,",
"# and again separately for the left and right channels. We need to",
"# convert this to a percentage of full scale, which is 32768 for a",
"# 16 bit sample. Once again, we play it safe by using the larger of",
"# the two values.",
"peak",
"=",
"max",
"(",
"soundcheck",
"[",
"6",
":",
"8",
"]",
")",
"/",
"32768.0",
"return",
"round",
"(",
"gain",
",",
"2",
")",
",",
"round",
"(",
"peak",
",",
"6",
")"
] | Convert a Sound Check bytestring value to a (gain, peak) tuple as
used by ReplayGain. | [
"Convert",
"a",
"Sound",
"Check",
"bytestring",
"value",
"to",
"a",
"(",
"gain",
"peak",
")",
"tuple",
"as",
"used",
"by",
"ReplayGain",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L242-L280 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | _sc_encode | def _sc_encode(gain, peak):
"""Encode ReplayGain gain/peak values as a Sound Check string.
"""
# SoundCheck stores the peak value as the actual value of the
# sample, rather than the percentage of full scale that RG uses, so
# we do a simple conversion assuming 16 bit samples.
peak *= 32768.0
# SoundCheck stores absolute RMS values in some unknown units rather
# than the dB values RG uses. We can calculate these absolute values
# from the gain ratio using a reference value of 1000 units. We also
# enforce the maximum value here, which is equivalent to about
# -18.2dB.
g1 = int(min(round((10 ** (gain / -10)) * 1000), 65534))
# Same as above, except our reference level is 2500 units.
g2 = int(min(round((10 ** (gain / -10)) * 2500), 65534))
# The purpose of these values are unknown, but they also seem to be
# unused so we just use zero.
uk = 0
values = (g1, g1, g2, g2, uk, uk, int(peak), int(peak), uk, uk)
return (u' %08X' * 10) % values | python | def _sc_encode(gain, peak):
"""Encode ReplayGain gain/peak values as a Sound Check string.
"""
# SoundCheck stores the peak value as the actual value of the
# sample, rather than the percentage of full scale that RG uses, so
# we do a simple conversion assuming 16 bit samples.
peak *= 32768.0
# SoundCheck stores absolute RMS values in some unknown units rather
# than the dB values RG uses. We can calculate these absolute values
# from the gain ratio using a reference value of 1000 units. We also
# enforce the maximum value here, which is equivalent to about
# -18.2dB.
g1 = int(min(round((10 ** (gain / -10)) * 1000), 65534))
# Same as above, except our reference level is 2500 units.
g2 = int(min(round((10 ** (gain / -10)) * 2500), 65534))
# The purpose of these values are unknown, but they also seem to be
# unused so we just use zero.
uk = 0
values = (g1, g1, g2, g2, uk, uk, int(peak), int(peak), uk, uk)
return (u' %08X' * 10) % values | [
"def",
"_sc_encode",
"(",
"gain",
",",
"peak",
")",
":",
"# SoundCheck stores the peak value as the actual value of the",
"# sample, rather than the percentage of full scale that RG uses, so",
"# we do a simple conversion assuming 16 bit samples.",
"peak",
"*=",
"32768.0",
"# SoundCheck stores absolute RMS values in some unknown units rather",
"# than the dB values RG uses. We can calculate these absolute values",
"# from the gain ratio using a reference value of 1000 units. We also",
"# enforce the maximum value here, which is equivalent to about",
"# -18.2dB.",
"g1",
"=",
"int",
"(",
"min",
"(",
"round",
"(",
"(",
"10",
"**",
"(",
"gain",
"/",
"-",
"10",
")",
")",
"*",
"1000",
")",
",",
"65534",
")",
")",
"# Same as above, except our reference level is 2500 units.",
"g2",
"=",
"int",
"(",
"min",
"(",
"round",
"(",
"(",
"10",
"**",
"(",
"gain",
"/",
"-",
"10",
")",
")",
"*",
"2500",
")",
",",
"65534",
")",
")",
"# The purpose of these values are unknown, but they also seem to be",
"# unused so we just use zero.",
"uk",
"=",
"0",
"values",
"=",
"(",
"g1",
",",
"g1",
",",
"g2",
",",
"g2",
",",
"uk",
",",
"uk",
",",
"int",
"(",
"peak",
")",
",",
"int",
"(",
"peak",
")",
",",
"uk",
",",
"uk",
")",
"return",
"(",
"u' %08X'",
"*",
"10",
")",
"%",
"values"
] | Encode ReplayGain gain/peak values as a Sound Check string. | [
"Encode",
"ReplayGain",
"gain",
"/",
"peak",
"values",
"as",
"a",
"Sound",
"Check",
"string",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L283-L304 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | image_mime_type | def image_mime_type(data):
"""Return the MIME type of the image data (a bytestring).
"""
# This checks for a jpeg file with only the magic bytes (unrecognized by
# imghdr.what). imghdr.what returns none for that type of file, so
# _wider_test_jpeg is run in that case. It still returns None if it didn't
# match such a jpeg file.
kind = _imghdr_what_wrapper(data)
if kind in ['gif', 'jpeg', 'png', 'tiff', 'bmp']:
return 'image/{0}'.format(kind)
elif kind == 'pgm':
return 'image/x-portable-graymap'
elif kind == 'pbm':
return 'image/x-portable-bitmap'
elif kind == 'ppm':
return 'image/x-portable-pixmap'
elif kind == 'xbm':
return 'image/x-xbitmap'
else:
return 'image/x-{0}'.format(kind) | python | def image_mime_type(data):
"""Return the MIME type of the image data (a bytestring).
"""
# This checks for a jpeg file with only the magic bytes (unrecognized by
# imghdr.what). imghdr.what returns none for that type of file, so
# _wider_test_jpeg is run in that case. It still returns None if it didn't
# match such a jpeg file.
kind = _imghdr_what_wrapper(data)
if kind in ['gif', 'jpeg', 'png', 'tiff', 'bmp']:
return 'image/{0}'.format(kind)
elif kind == 'pgm':
return 'image/x-portable-graymap'
elif kind == 'pbm':
return 'image/x-portable-bitmap'
elif kind == 'ppm':
return 'image/x-portable-pixmap'
elif kind == 'xbm':
return 'image/x-xbitmap'
else:
return 'image/x-{0}'.format(kind) | [
"def",
"image_mime_type",
"(",
"data",
")",
":",
"# This checks for a jpeg file with only the magic bytes (unrecognized by",
"# imghdr.what). imghdr.what returns none for that type of file, so",
"# _wider_test_jpeg is run in that case. It still returns None if it didn't",
"# match such a jpeg file.",
"kind",
"=",
"_imghdr_what_wrapper",
"(",
"data",
")",
"if",
"kind",
"in",
"[",
"'gif'",
",",
"'jpeg'",
",",
"'png'",
",",
"'tiff'",
",",
"'bmp'",
"]",
":",
"return",
"'image/{0}'",
".",
"format",
"(",
"kind",
")",
"elif",
"kind",
"==",
"'pgm'",
":",
"return",
"'image/x-portable-graymap'",
"elif",
"kind",
"==",
"'pbm'",
":",
"return",
"'image/x-portable-bitmap'",
"elif",
"kind",
"==",
"'ppm'",
":",
"return",
"'image/x-portable-pixmap'",
"elif",
"kind",
"==",
"'xbm'",
":",
"return",
"'image/x-xbitmap'",
"else",
":",
"return",
"'image/x-{0}'",
".",
"format",
"(",
"kind",
")"
] | Return the MIME type of the image data (a bytestring). | [
"Return",
"the",
"MIME",
"type",
"of",
"the",
"image",
"data",
"(",
"a",
"bytestring",
")",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L329-L348 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | StorageStyle.deserialize | def deserialize(self, mutagen_value):
"""Given a raw value stored on a Mutagen object, decode and
return the represented value.
"""
if self.suffix and isinstance(mutagen_value, six.text_type) \
and mutagen_value.endswith(self.suffix):
return mutagen_value[:-len(self.suffix)]
else:
return mutagen_value | python | def deserialize(self, mutagen_value):
"""Given a raw value stored on a Mutagen object, decode and
return the represented value.
"""
if self.suffix and isinstance(mutagen_value, six.text_type) \
and mutagen_value.endswith(self.suffix):
return mutagen_value[:-len(self.suffix)]
else:
return mutagen_value | [
"def",
"deserialize",
"(",
"self",
",",
"mutagen_value",
")",
":",
"if",
"self",
".",
"suffix",
"and",
"isinstance",
"(",
"mutagen_value",
",",
"six",
".",
"text_type",
")",
"and",
"mutagen_value",
".",
"endswith",
"(",
"self",
".",
"suffix",
")",
":",
"return",
"mutagen_value",
"[",
":",
"-",
"len",
"(",
"self",
".",
"suffix",
")",
"]",
"else",
":",
"return",
"mutagen_value"
] | Given a raw value stored on a Mutagen object, decode and
return the represented value. | [
"Given",
"a",
"raw",
"value",
"stored",
"on",
"a",
"Mutagen",
"object",
"decode",
"and",
"return",
"the",
"represented",
"value",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L494-L502 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | StorageStyle.set | def set(self, mutagen_file, value):
"""Assign the value for the field using this style.
"""
self.store(mutagen_file, self.serialize(value)) | python | def set(self, mutagen_file, value):
"""Assign the value for the field using this style.
"""
self.store(mutagen_file, self.serialize(value)) | [
"def",
"set",
"(",
"self",
",",
"mutagen_file",
",",
"value",
")",
":",
"self",
".",
"store",
"(",
"mutagen_file",
",",
"self",
".",
"serialize",
"(",
"value",
")",
")"
] | Assign the value for the field using this style. | [
"Assign",
"the",
"value",
"for",
"the",
"field",
"using",
"this",
"style",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L506-L509 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | StorageStyle.serialize | def serialize(self, value):
"""Convert the external Python value to a type that is suitable for
storing in a Mutagen file object.
"""
if isinstance(value, float) and self.as_type is six.text_type:
value = u'{0:.{1}f}'.format(value, self.float_places)
value = self.as_type(value)
elif self.as_type is six.text_type:
if isinstance(value, bool):
# Store bools as 1/0 instead of True/False.
value = six.text_type(int(bool(value)))
elif isinstance(value, bytes):
value = value.decode('utf-8', 'ignore')
else:
value = six.text_type(value)
else:
value = self.as_type(value)
if self.suffix:
value += self.suffix
return value | python | def serialize(self, value):
"""Convert the external Python value to a type that is suitable for
storing in a Mutagen file object.
"""
if isinstance(value, float) and self.as_type is six.text_type:
value = u'{0:.{1}f}'.format(value, self.float_places)
value = self.as_type(value)
elif self.as_type is six.text_type:
if isinstance(value, bool):
# Store bools as 1/0 instead of True/False.
value = six.text_type(int(bool(value)))
elif isinstance(value, bytes):
value = value.decode('utf-8', 'ignore')
else:
value = six.text_type(value)
else:
value = self.as_type(value)
if self.suffix:
value += self.suffix
return value | [
"def",
"serialize",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"float",
")",
"and",
"self",
".",
"as_type",
"is",
"six",
".",
"text_type",
":",
"value",
"=",
"u'{0:.{1}f}'",
".",
"format",
"(",
"value",
",",
"self",
".",
"float_places",
")",
"value",
"=",
"self",
".",
"as_type",
"(",
"value",
")",
"elif",
"self",
".",
"as_type",
"is",
"six",
".",
"text_type",
":",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"# Store bools as 1/0 instead of True/False.",
"value",
"=",
"six",
".",
"text_type",
"(",
"int",
"(",
"bool",
"(",
"value",
")",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"bytes",
")",
":",
"value",
"=",
"value",
".",
"decode",
"(",
"'utf-8'",
",",
"'ignore'",
")",
"else",
":",
"value",
"=",
"six",
".",
"text_type",
"(",
"value",
")",
"else",
":",
"value",
"=",
"self",
".",
"as_type",
"(",
"value",
")",
"if",
"self",
".",
"suffix",
":",
"value",
"+=",
"self",
".",
"suffix",
"return",
"value"
] | Convert the external Python value to a type that is suitable for
storing in a Mutagen file object. | [
"Convert",
"the",
"external",
"Python",
"value",
"to",
"a",
"type",
"that",
"is",
"suitable",
"for",
"storing",
"in",
"a",
"Mutagen",
"file",
"object",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L516-L537 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | ListStorageStyle.get_list | def get_list(self, mutagen_file):
"""Get a list of all values for the field using this style.
"""
return [self.deserialize(item) for item in self.fetch(mutagen_file)] | python | def get_list(self, mutagen_file):
"""Get a list of all values for the field using this style.
"""
return [self.deserialize(item) for item in self.fetch(mutagen_file)] | [
"def",
"get_list",
"(",
"self",
",",
"mutagen_file",
")",
":",
"return",
"[",
"self",
".",
"deserialize",
"(",
"item",
")",
"for",
"item",
"in",
"self",
".",
"fetch",
"(",
"mutagen_file",
")",
"]"
] | Get a list of all values for the field using this style. | [
"Get",
"a",
"list",
"of",
"all",
"values",
"for",
"the",
"field",
"using",
"this",
"style",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L569-L572 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | ListStorageStyle.set_list | def set_list(self, mutagen_file, values):
"""Set all values for the field using this style. `values`
should be an iterable.
"""
self.store(mutagen_file, [self.serialize(value) for value in values]) | python | def set_list(self, mutagen_file, values):
"""Set all values for the field using this style. `values`
should be an iterable.
"""
self.store(mutagen_file, [self.serialize(value) for value in values]) | [
"def",
"set_list",
"(",
"self",
",",
"mutagen_file",
",",
"values",
")",
":",
"self",
".",
"store",
"(",
"mutagen_file",
",",
"[",
"self",
".",
"serialize",
"(",
"value",
")",
"for",
"value",
"in",
"values",
"]",
")"
] | Set all values for the field using this style. `values`
should be an iterable. | [
"Set",
"all",
"values",
"for",
"the",
"field",
"using",
"this",
"style",
".",
"values",
"should",
"be",
"an",
"iterable",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L588-L592 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | MP3ImageStorageStyle.deserialize | def deserialize(self, apic_frame):
"""Convert APIC frame into Image."""
return Image(data=apic_frame.data, desc=apic_frame.desc,
type=apic_frame.type) | python | def deserialize(self, apic_frame):
"""Convert APIC frame into Image."""
return Image(data=apic_frame.data, desc=apic_frame.desc,
type=apic_frame.type) | [
"def",
"deserialize",
"(",
"self",
",",
"apic_frame",
")",
":",
"return",
"Image",
"(",
"data",
"=",
"apic_frame",
".",
"data",
",",
"desc",
"=",
"apic_frame",
".",
"desc",
",",
"type",
"=",
"apic_frame",
".",
"type",
")"
] | Convert APIC frame into Image. | [
"Convert",
"APIC",
"frame",
"into",
"Image",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L933-L936 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | MP3ImageStorageStyle.serialize | def serialize(self, image):
"""Return an APIC frame populated with data from ``image``.
"""
assert isinstance(image, Image)
frame = mutagen.id3.Frames[self.key]()
frame.data = image.data
frame.mime = image.mime_type
frame.desc = image.desc or u''
# For compatibility with OS X/iTunes prefer latin-1 if possible.
# See issue #899
try:
frame.desc.encode("latin-1")
except UnicodeEncodeError:
frame.encoding = mutagen.id3.Encoding.UTF16
else:
frame.encoding = mutagen.id3.Encoding.LATIN1
frame.type = image.type_index
return frame | python | def serialize(self, image):
"""Return an APIC frame populated with data from ``image``.
"""
assert isinstance(image, Image)
frame = mutagen.id3.Frames[self.key]()
frame.data = image.data
frame.mime = image.mime_type
frame.desc = image.desc or u''
# For compatibility with OS X/iTunes prefer latin-1 if possible.
# See issue #899
try:
frame.desc.encode("latin-1")
except UnicodeEncodeError:
frame.encoding = mutagen.id3.Encoding.UTF16
else:
frame.encoding = mutagen.id3.Encoding.LATIN1
frame.type = image.type_index
return frame | [
"def",
"serialize",
"(",
"self",
",",
"image",
")",
":",
"assert",
"isinstance",
"(",
"image",
",",
"Image",
")",
"frame",
"=",
"mutagen",
".",
"id3",
".",
"Frames",
"[",
"self",
".",
"key",
"]",
"(",
")",
"frame",
".",
"data",
"=",
"image",
".",
"data",
"frame",
".",
"mime",
"=",
"image",
".",
"mime_type",
"frame",
".",
"desc",
"=",
"image",
".",
"desc",
"or",
"u''",
"# For compatibility with OS X/iTunes prefer latin-1 if possible.",
"# See issue #899",
"try",
":",
"frame",
".",
"desc",
".",
"encode",
"(",
"\"latin-1\"",
")",
"except",
"UnicodeEncodeError",
":",
"frame",
".",
"encoding",
"=",
"mutagen",
".",
"id3",
".",
"Encoding",
".",
"UTF16",
"else",
":",
"frame",
".",
"encoding",
"=",
"mutagen",
".",
"id3",
".",
"Encoding",
".",
"LATIN1",
"frame",
".",
"type",
"=",
"image",
".",
"type_index",
"return",
"frame"
] | Return an APIC frame populated with data from ``image``. | [
"Return",
"an",
"APIC",
"frame",
"populated",
"with",
"data",
"from",
"image",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L947-L966 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | VorbisImageStorageStyle.serialize | def serialize(self, image):
"""Turn a Image into a base64 encoded FLAC picture block.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or u''
# Encoding with base64 returns bytes on both Python 2 and 3.
# Mutagen requires the data to be a Unicode string, so we decode
# it before passing it along.
return base64.b64encode(pic.write()).decode('ascii') | python | def serialize(self, image):
"""Turn a Image into a base64 encoded FLAC picture block.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or u''
# Encoding with base64 returns bytes on both Python 2 and 3.
# Mutagen requires the data to be a Unicode string, so we decode
# it before passing it along.
return base64.b64encode(pic.write()).decode('ascii') | [
"def",
"serialize",
"(",
"self",
",",
"image",
")",
":",
"pic",
"=",
"mutagen",
".",
"flac",
".",
"Picture",
"(",
")",
"pic",
".",
"data",
"=",
"image",
".",
"data",
"pic",
".",
"type",
"=",
"image",
".",
"type_index",
"pic",
".",
"mime",
"=",
"image",
".",
"mime_type",
"pic",
".",
"desc",
"=",
"image",
".",
"desc",
"or",
"u''",
"# Encoding with base64 returns bytes on both Python 2 and 3.",
"# Mutagen requires the data to be a Unicode string, so we decode",
"# it before passing it along.",
"return",
"base64",
".",
"b64encode",
"(",
"pic",
".",
"write",
"(",
")",
")",
".",
"decode",
"(",
"'ascii'",
")"
] | Turn a Image into a base64 encoded FLAC picture block. | [
"Turn",
"a",
"Image",
"into",
"a",
"base64",
"encoded",
"FLAC",
"picture",
"block",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1036-L1048 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | FlacImageStorageStyle.store | def store(self, mutagen_file, pictures):
"""``pictures`` is a list of mutagen.flac.Picture instances.
"""
mutagen_file.clear_pictures()
for pic in pictures:
mutagen_file.add_picture(pic) | python | def store(self, mutagen_file, pictures):
"""``pictures`` is a list of mutagen.flac.Picture instances.
"""
mutagen_file.clear_pictures()
for pic in pictures:
mutagen_file.add_picture(pic) | [
"def",
"store",
"(",
"self",
",",
"mutagen_file",
",",
"pictures",
")",
":",
"mutagen_file",
".",
"clear_pictures",
"(",
")",
"for",
"pic",
"in",
"pictures",
":",
"mutagen_file",
".",
"add_picture",
"(",
"pic",
")"
] | ``pictures`` is a list of mutagen.flac.Picture instances. | [
"pictures",
"is",
"a",
"list",
"of",
"mutagen",
".",
"flac",
".",
"Picture",
"instances",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1066-L1071 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | FlacImageStorageStyle.serialize | def serialize(self, image):
"""Turn a Image into a mutagen.flac.Picture.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or u''
return pic | python | def serialize(self, image):
"""Turn a Image into a mutagen.flac.Picture.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or u''
return pic | [
"def",
"serialize",
"(",
"self",
",",
"image",
")",
":",
"pic",
"=",
"mutagen",
".",
"flac",
".",
"Picture",
"(",
")",
"pic",
".",
"data",
"=",
"image",
".",
"data",
"pic",
".",
"type",
"=",
"image",
".",
"type_index",
"pic",
".",
"mime",
"=",
"image",
".",
"mime_type",
"pic",
".",
"desc",
"=",
"image",
".",
"desc",
"or",
"u''",
"return",
"pic"
] | Turn a Image into a mutagen.flac.Picture. | [
"Turn",
"a",
"Image",
"into",
"a",
"mutagen",
".",
"flac",
".",
"Picture",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1073-L1081 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | APEv2ImageStorageStyle.delete | def delete(self, mutagen_file):
"""Remove all images from the file.
"""
for cover_tag in self.TAG_NAMES.values():
try:
del mutagen_file[cover_tag]
except KeyError:
pass | python | def delete(self, mutagen_file):
"""Remove all images from the file.
"""
for cover_tag in self.TAG_NAMES.values():
try:
del mutagen_file[cover_tag]
except KeyError:
pass | [
"def",
"delete",
"(",
"self",
",",
"mutagen_file",
")",
":",
"for",
"cover_tag",
"in",
"self",
".",
"TAG_NAMES",
".",
"values",
"(",
")",
":",
"try",
":",
"del",
"mutagen_file",
"[",
"cover_tag",
"]",
"except",
"KeyError",
":",
"pass"
] | Remove all images from the file. | [
"Remove",
"all",
"images",
"from",
"the",
"file",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1150-L1157 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | MediaField.styles | def styles(self, mutagen_file):
"""Yields the list of storage styles of this field that can
handle the MediaFile's format.
"""
for style in self._styles:
if mutagen_file.__class__.__name__ in style.formats:
yield style | python | def styles(self, mutagen_file):
"""Yields the list of storage styles of this field that can
handle the MediaFile's format.
"""
for style in self._styles:
if mutagen_file.__class__.__name__ in style.formats:
yield style | [
"def",
"styles",
"(",
"self",
",",
"mutagen_file",
")",
":",
"for",
"style",
"in",
"self",
".",
"_styles",
":",
"if",
"mutagen_file",
".",
"__class__",
".",
"__name__",
"in",
"style",
".",
"formats",
":",
"yield",
"style"
] | Yields the list of storage styles of this field that can
handle the MediaFile's format. | [
"Yields",
"the",
"list",
"of",
"storage",
"styles",
"of",
"this",
"field",
"that",
"can",
"handle",
"the",
"MediaFile",
"s",
"format",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1183-L1189 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | MediaField._none_value | def _none_value(self):
"""Get an appropriate "null" value for this field's type. This
is used internally when setting the field to None.
"""
if self.out_type == int:
return 0
elif self.out_type == float:
return 0.0
elif self.out_type == bool:
return False
elif self.out_type == six.text_type:
return u'' | python | def _none_value(self):
"""Get an appropriate "null" value for this field's type. This
is used internally when setting the field to None.
"""
if self.out_type == int:
return 0
elif self.out_type == float:
return 0.0
elif self.out_type == bool:
return False
elif self.out_type == six.text_type:
return u'' | [
"def",
"_none_value",
"(",
"self",
")",
":",
"if",
"self",
".",
"out_type",
"==",
"int",
":",
"return",
"0",
"elif",
"self",
".",
"out_type",
"==",
"float",
":",
"return",
"0.0",
"elif",
"self",
".",
"out_type",
"==",
"bool",
":",
"return",
"False",
"elif",
"self",
".",
"out_type",
"==",
"six",
".",
"text_type",
":",
"return",
"u''"
] | Get an appropriate "null" value for this field's type. This
is used internally when setting the field to None. | [
"Get",
"an",
"appropriate",
"null",
"value",
"for",
"this",
"field",
"s",
"type",
".",
"This",
"is",
"used",
"internally",
"when",
"setting",
"the",
"field",
"to",
"None",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1209-L1220 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | DateField._get_date_tuple | def _get_date_tuple(self, mediafile):
"""Get a 3-item sequence representing the date consisting of a
year, month, and day number. Each number is either an integer or
None.
"""
# Get the underlying data and split on hyphens and slashes.
datestring = super(DateField, self).__get__(mediafile, None)
if isinstance(datestring, six.string_types):
datestring = re.sub(r'[Tt ].*$', '', six.text_type(datestring))
items = re.split('[-/]', six.text_type(datestring))
else:
items = []
# Ensure that we have exactly 3 components, possibly by
# truncating or padding.
items = items[:3]
if len(items) < 3:
items += [None] * (3 - len(items))
# Use year field if year is missing.
if not items[0] and hasattr(self, '_year_field'):
items[0] = self._year_field.__get__(mediafile)
# Convert each component to an integer if possible.
items_ = []
for item in items:
try:
items_.append(int(item))
except (TypeError, ValueError):
items_.append(None)
return items_ | python | def _get_date_tuple(self, mediafile):
"""Get a 3-item sequence representing the date consisting of a
year, month, and day number. Each number is either an integer or
None.
"""
# Get the underlying data and split on hyphens and slashes.
datestring = super(DateField, self).__get__(mediafile, None)
if isinstance(datestring, six.string_types):
datestring = re.sub(r'[Tt ].*$', '', six.text_type(datestring))
items = re.split('[-/]', six.text_type(datestring))
else:
items = []
# Ensure that we have exactly 3 components, possibly by
# truncating or padding.
items = items[:3]
if len(items) < 3:
items += [None] * (3 - len(items))
# Use year field if year is missing.
if not items[0] and hasattr(self, '_year_field'):
items[0] = self._year_field.__get__(mediafile)
# Convert each component to an integer if possible.
items_ = []
for item in items:
try:
items_.append(int(item))
except (TypeError, ValueError):
items_.append(None)
return items_ | [
"def",
"_get_date_tuple",
"(",
"self",
",",
"mediafile",
")",
":",
"# Get the underlying data and split on hyphens and slashes.",
"datestring",
"=",
"super",
"(",
"DateField",
",",
"self",
")",
".",
"__get__",
"(",
"mediafile",
",",
"None",
")",
"if",
"isinstance",
"(",
"datestring",
",",
"six",
".",
"string_types",
")",
":",
"datestring",
"=",
"re",
".",
"sub",
"(",
"r'[Tt ].*$'",
",",
"''",
",",
"six",
".",
"text_type",
"(",
"datestring",
")",
")",
"items",
"=",
"re",
".",
"split",
"(",
"'[-/]'",
",",
"six",
".",
"text_type",
"(",
"datestring",
")",
")",
"else",
":",
"items",
"=",
"[",
"]",
"# Ensure that we have exactly 3 components, possibly by",
"# truncating or padding.",
"items",
"=",
"items",
"[",
":",
"3",
"]",
"if",
"len",
"(",
"items",
")",
"<",
"3",
":",
"items",
"+=",
"[",
"None",
"]",
"*",
"(",
"3",
"-",
"len",
"(",
"items",
")",
")",
"# Use year field if year is missing.",
"if",
"not",
"items",
"[",
"0",
"]",
"and",
"hasattr",
"(",
"self",
",",
"'_year_field'",
")",
":",
"items",
"[",
"0",
"]",
"=",
"self",
".",
"_year_field",
".",
"__get__",
"(",
"mediafile",
")",
"# Convert each component to an integer if possible.",
"items_",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"try",
":",
"items_",
".",
"append",
"(",
"int",
"(",
"item",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"items_",
".",
"append",
"(",
"None",
")",
"return",
"items_"
] | Get a 3-item sequence representing the date consisting of a
year, month, and day number. Each number is either an integer or
None. | [
"Get",
"a",
"3",
"-",
"item",
"sequence",
"representing",
"the",
"date",
"consisting",
"of",
"a",
"year",
"month",
"and",
"day",
"number",
".",
"Each",
"number",
"is",
"either",
"an",
"integer",
"or",
"None",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1293-L1323 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | DateField._set_date_tuple | def _set_date_tuple(self, mediafile, year, month=None, day=None):
"""Set the value of the field given a year, month, and day
number. Each number can be an integer or None to indicate an
unset component.
"""
if year is None:
self.__delete__(mediafile)
return
date = [u'{0:04d}'.format(int(year))]
if month:
date.append(u'{0:02d}'.format(int(month)))
if month and day:
date.append(u'{0:02d}'.format(int(day)))
date = map(six.text_type, date)
super(DateField, self).__set__(mediafile, u'-'.join(date))
if hasattr(self, '_year_field'):
self._year_field.__set__(mediafile, year) | python | def _set_date_tuple(self, mediafile, year, month=None, day=None):
"""Set the value of the field given a year, month, and day
number. Each number can be an integer or None to indicate an
unset component.
"""
if year is None:
self.__delete__(mediafile)
return
date = [u'{0:04d}'.format(int(year))]
if month:
date.append(u'{0:02d}'.format(int(month)))
if month and day:
date.append(u'{0:02d}'.format(int(day)))
date = map(six.text_type, date)
super(DateField, self).__set__(mediafile, u'-'.join(date))
if hasattr(self, '_year_field'):
self._year_field.__set__(mediafile, year) | [
"def",
"_set_date_tuple",
"(",
"self",
",",
"mediafile",
",",
"year",
",",
"month",
"=",
"None",
",",
"day",
"=",
"None",
")",
":",
"if",
"year",
"is",
"None",
":",
"self",
".",
"__delete__",
"(",
"mediafile",
")",
"return",
"date",
"=",
"[",
"u'{0:04d}'",
".",
"format",
"(",
"int",
"(",
"year",
")",
")",
"]",
"if",
"month",
":",
"date",
".",
"append",
"(",
"u'{0:02d}'",
".",
"format",
"(",
"int",
"(",
"month",
")",
")",
")",
"if",
"month",
"and",
"day",
":",
"date",
".",
"append",
"(",
"u'{0:02d}'",
".",
"format",
"(",
"int",
"(",
"day",
")",
")",
")",
"date",
"=",
"map",
"(",
"six",
".",
"text_type",
",",
"date",
")",
"super",
"(",
"DateField",
",",
"self",
")",
".",
"__set__",
"(",
"mediafile",
",",
"u'-'",
".",
"join",
"(",
"date",
")",
")",
"if",
"hasattr",
"(",
"self",
",",
"'_year_field'",
")",
":",
"self",
".",
"_year_field",
".",
"__set__",
"(",
"mediafile",
",",
"year",
")"
] | Set the value of the field given a year, month, and day
number. Each number can be an integer or None to indicate an
unset component. | [
"Set",
"the",
"value",
"of",
"the",
"field",
"given",
"a",
"year",
"month",
"and",
"day",
"number",
".",
"Each",
"number",
"can",
"be",
"an",
"integer",
"or",
"None",
"to",
"indicate",
"an",
"unset",
"component",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1325-L1343 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | MediaFile.save | def save(self):
"""Write the object's tags back to the file. May
throw `UnreadableFileError`.
"""
# Possibly save the tags to ID3v2.3.
kwargs = {}
if self.id3v23:
id3 = self.mgfile
if hasattr(id3, 'tags'):
# In case this is an MP3 object, not an ID3 object.
id3 = id3.tags
id3.update_to_v23()
kwargs['v2_version'] = 3
mutagen_call('save', self.path, self.mgfile.save, **kwargs) | python | def save(self):
"""Write the object's tags back to the file. May
throw `UnreadableFileError`.
"""
# Possibly save the tags to ID3v2.3.
kwargs = {}
if self.id3v23:
id3 = self.mgfile
if hasattr(id3, 'tags'):
# In case this is an MP3 object, not an ID3 object.
id3 = id3.tags
id3.update_to_v23()
kwargs['v2_version'] = 3
mutagen_call('save', self.path, self.mgfile.save, **kwargs) | [
"def",
"save",
"(",
"self",
")",
":",
"# Possibly save the tags to ID3v2.3.",
"kwargs",
"=",
"{",
"}",
"if",
"self",
".",
"id3v23",
":",
"id3",
"=",
"self",
".",
"mgfile",
"if",
"hasattr",
"(",
"id3",
",",
"'tags'",
")",
":",
"# In case this is an MP3 object, not an ID3 object.",
"id3",
"=",
"id3",
".",
"tags",
"id3",
".",
"update_to_v23",
"(",
")",
"kwargs",
"[",
"'v2_version'",
"]",
"=",
"3",
"mutagen_call",
"(",
"'save'",
",",
"self",
".",
"path",
",",
"self",
".",
"mgfile",
".",
"save",
",",
"*",
"*",
"kwargs",
")"
] | Write the object's tags back to the file. May
throw `UnreadableFileError`. | [
"Write",
"the",
"object",
"s",
"tags",
"back",
"to",
"the",
"file",
".",
"May",
"throw",
"UnreadableFileError",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1492-L1506 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | MediaFile.fields | def fields(cls):
"""Get the names of all writable properties that reflect
metadata tags (i.e., those that are instances of
:class:`MediaField`).
"""
for property, descriptor in cls.__dict__.items():
if isinstance(descriptor, MediaField):
if isinstance(property, bytes):
# On Python 2, class field names are bytes. This method
# produces text strings.
yield property.decode('utf8', 'ignore')
else:
yield property | python | def fields(cls):
"""Get the names of all writable properties that reflect
metadata tags (i.e., those that are instances of
:class:`MediaField`).
"""
for property, descriptor in cls.__dict__.items():
if isinstance(descriptor, MediaField):
if isinstance(property, bytes):
# On Python 2, class field names are bytes. This method
# produces text strings.
yield property.decode('utf8', 'ignore')
else:
yield property | [
"def",
"fields",
"(",
"cls",
")",
":",
"for",
"property",
",",
"descriptor",
"in",
"cls",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"descriptor",
",",
"MediaField",
")",
":",
"if",
"isinstance",
"(",
"property",
",",
"bytes",
")",
":",
"# On Python 2, class field names are bytes. This method",
"# produces text strings.",
"yield",
"property",
".",
"decode",
"(",
"'utf8'",
",",
"'ignore'",
")",
"else",
":",
"yield",
"property"
] | Get the names of all writable properties that reflect
metadata tags (i.e., those that are instances of
:class:`MediaField`). | [
"Get",
"the",
"names",
"of",
"all",
"writable",
"properties",
"that",
"reflect",
"metadata",
"tags",
"(",
"i",
".",
"e",
".",
"those",
"that",
"are",
"instances",
"of",
":",
"class",
":",
"MediaField",
")",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1517-L1529 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | MediaFile._field_sort_name | def _field_sort_name(cls, name):
"""Get a sort key for a field name that determines the order
fields should be written in.
Fields names are kept unchanged, unless they are instances of
:class:`DateItemField`, in which case `year`, `month`, and `day`
are replaced by `date0`, `date1`, and `date2`, respectively, to
make them appear in that order.
"""
if isinstance(cls.__dict__[name], DateItemField):
name = re.sub('year', 'date0', name)
name = re.sub('month', 'date1', name)
name = re.sub('day', 'date2', name)
return name | python | def _field_sort_name(cls, name):
"""Get a sort key for a field name that determines the order
fields should be written in.
Fields names are kept unchanged, unless they are instances of
:class:`DateItemField`, in which case `year`, `month`, and `day`
are replaced by `date0`, `date1`, and `date2`, respectively, to
make them appear in that order.
"""
if isinstance(cls.__dict__[name], DateItemField):
name = re.sub('year', 'date0', name)
name = re.sub('month', 'date1', name)
name = re.sub('day', 'date2', name)
return name | [
"def",
"_field_sort_name",
"(",
"cls",
",",
"name",
")",
":",
"if",
"isinstance",
"(",
"cls",
".",
"__dict__",
"[",
"name",
"]",
",",
"DateItemField",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"'year'",
",",
"'date0'",
",",
"name",
")",
"name",
"=",
"re",
".",
"sub",
"(",
"'month'",
",",
"'date1'",
",",
"name",
")",
"name",
"=",
"re",
".",
"sub",
"(",
"'day'",
",",
"'date2'",
",",
"name",
")",
"return",
"name"
] | Get a sort key for a field name that determines the order
fields should be written in.
Fields names are kept unchanged, unless they are instances of
:class:`DateItemField`, in which case `year`, `month`, and `day`
are replaced by `date0`, `date1`, and `date2`, respectively, to
make them appear in that order. | [
"Get",
"a",
"sort",
"key",
"for",
"a",
"field",
"name",
"that",
"determines",
"the",
"order",
"fields",
"should",
"be",
"written",
"in",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1532-L1545 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | MediaFile.sorted_fields | def sorted_fields(cls):
"""Get the names of all writable metadata fields, sorted in the
order that they should be written.
This is a lexicographic order, except for instances of
:class:`DateItemField`, which are sorted in year-month-day
order.
"""
for property in sorted(cls.fields(), key=cls._field_sort_name):
yield property | python | def sorted_fields(cls):
"""Get the names of all writable metadata fields, sorted in the
order that they should be written.
This is a lexicographic order, except for instances of
:class:`DateItemField`, which are sorted in year-month-day
order.
"""
for property in sorted(cls.fields(), key=cls._field_sort_name):
yield property | [
"def",
"sorted_fields",
"(",
"cls",
")",
":",
"for",
"property",
"in",
"sorted",
"(",
"cls",
".",
"fields",
"(",
")",
",",
"key",
"=",
"cls",
".",
"_field_sort_name",
")",
":",
"yield",
"property"
] | Get the names of all writable metadata fields, sorted in the
order that they should be written.
This is a lexicographic order, except for instances of
:class:`DateItemField`, which are sorted in year-month-day
order. | [
"Get",
"the",
"names",
"of",
"all",
"writable",
"metadata",
"fields",
"sorted",
"in",
"the",
"order",
"that",
"they",
"should",
"be",
"written",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1548-L1557 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | MediaFile.add_field | def add_field(cls, name, descriptor):
"""Add a field to store custom tags.
:param name: the name of the property the field is accessed
through. It must not already exist on this class.
:param descriptor: an instance of :class:`MediaField`.
"""
if not isinstance(descriptor, MediaField):
raise ValueError(
u'{0} must be an instance of MediaField'.format(descriptor))
if name in cls.__dict__:
raise ValueError(
u'property "{0}" already exists on MediaField'.format(name))
setattr(cls, name, descriptor) | python | def add_field(cls, name, descriptor):
"""Add a field to store custom tags.
:param name: the name of the property the field is accessed
through. It must not already exist on this class.
:param descriptor: an instance of :class:`MediaField`.
"""
if not isinstance(descriptor, MediaField):
raise ValueError(
u'{0} must be an instance of MediaField'.format(descriptor))
if name in cls.__dict__:
raise ValueError(
u'property "{0}" already exists on MediaField'.format(name))
setattr(cls, name, descriptor) | [
"def",
"add_field",
"(",
"cls",
",",
"name",
",",
"descriptor",
")",
":",
"if",
"not",
"isinstance",
"(",
"descriptor",
",",
"MediaField",
")",
":",
"raise",
"ValueError",
"(",
"u'{0} must be an instance of MediaField'",
".",
"format",
"(",
"descriptor",
")",
")",
"if",
"name",
"in",
"cls",
".",
"__dict__",
":",
"raise",
"ValueError",
"(",
"u'property \"{0}\" already exists on MediaField'",
".",
"format",
"(",
"name",
")",
")",
"setattr",
"(",
"cls",
",",
"name",
",",
"descriptor",
")"
] | Add a field to store custom tags.
:param name: the name of the property the field is accessed
through. It must not already exist on this class.
:param descriptor: an instance of :class:`MediaField`. | [
"Add",
"a",
"field",
"to",
"store",
"custom",
"tags",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1571-L1585 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | MediaFile.update | def update(self, dict):
"""Set all field values from a dictionary.
For any key in `dict` that is also a field to store tags the
method retrieves the corresponding value from `dict` and updates
the `MediaFile`. If a key has the value `None`, the
corresponding property is deleted from the `MediaFile`.
"""
for field in self.sorted_fields():
if field in dict:
if dict[field] is None:
delattr(self, field)
else:
setattr(self, field, dict[field]) | python | def update(self, dict):
"""Set all field values from a dictionary.
For any key in `dict` that is also a field to store tags the
method retrieves the corresponding value from `dict` and updates
the `MediaFile`. If a key has the value `None`, the
corresponding property is deleted from the `MediaFile`.
"""
for field in self.sorted_fields():
if field in dict:
if dict[field] is None:
delattr(self, field)
else:
setattr(self, field, dict[field]) | [
"def",
"update",
"(",
"self",
",",
"dict",
")",
":",
"for",
"field",
"in",
"self",
".",
"sorted_fields",
"(",
")",
":",
"if",
"field",
"in",
"dict",
":",
"if",
"dict",
"[",
"field",
"]",
"is",
"None",
":",
"delattr",
"(",
"self",
",",
"field",
")",
"else",
":",
"setattr",
"(",
"self",
",",
"field",
",",
"dict",
"[",
"field",
"]",
")"
] | Set all field values from a dictionary.
For any key in `dict` that is also a field to store tags the
method retrieves the corresponding value from `dict` and updates
the `MediaFile`. If a key has the value `None`, the
corresponding property is deleted from the `MediaFile`. | [
"Set",
"all",
"field",
"values",
"from",
"a",
"dictionary",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1587-L1600 |
Josef-Friedrich/phrydy | phrydy/mediafile.py | MediaFile.samplerate | def samplerate(self):
"""The audio's sample rate (an int)."""
if hasattr(self.mgfile.info, 'sample_rate'):
return self.mgfile.info.sample_rate
elif self.type == 'opus':
# Opus is always 48kHz internally.
return 48000
return 0 | python | def samplerate(self):
"""The audio's sample rate (an int)."""
if hasattr(self.mgfile.info, 'sample_rate'):
return self.mgfile.info.sample_rate
elif self.type == 'opus':
# Opus is always 48kHz internally.
return 48000
return 0 | [
"def",
"samplerate",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"mgfile",
".",
"info",
",",
"'sample_rate'",
")",
":",
"return",
"self",
".",
"mgfile",
".",
"info",
".",
"sample_rate",
"elif",
"self",
".",
"type",
"==",
"'opus'",
":",
"# Opus is always 48kHz internally.",
"return",
"48000",
"return",
"0"
] | The audio's sample rate (an int). | [
"The",
"audio",
"s",
"sample",
"rate",
"(",
"an",
"int",
")",
"."
] | train | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L2162-L2169 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.