repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
curious-containers/cc-core | cc_core/commons/cwl.py | parse_cwl_type | def parse_cwl_type(cwl_type_string):
"""
Parses cwl type information from a cwl type string.
Examples:
- "File[]" -> {'type': 'File', 'isArray': True, 'isOptional': False}
- "int?" -> {'type': 'int', 'isArray': False, 'isOptional': True}
:param cwl_type_string: The cwl type string to extract information from
:return: A dictionary containing information about the parsed cwl type string
"""
is_optional = cwl_type_string.endswith('?')
if is_optional:
cwl_type_string = cwl_type_string[:-1]
is_array = cwl_type_string.endswith('[]')
if is_array:
cwl_type_string = cwl_type_string[:-2]
return {'type': cwl_type_string, 'isArray': is_array, 'isOptional': is_optional} | python | def parse_cwl_type(cwl_type_string):
"""
Parses cwl type information from a cwl type string.
Examples:
- "File[]" -> {'type': 'File', 'isArray': True, 'isOptional': False}
- "int?" -> {'type': 'int', 'isArray': False, 'isOptional': True}
:param cwl_type_string: The cwl type string to extract information from
:return: A dictionary containing information about the parsed cwl type string
"""
is_optional = cwl_type_string.endswith('?')
if is_optional:
cwl_type_string = cwl_type_string[:-1]
is_array = cwl_type_string.endswith('[]')
if is_array:
cwl_type_string = cwl_type_string[:-2]
return {'type': cwl_type_string, 'isArray': is_array, 'isOptional': is_optional} | [
"def",
"parse_cwl_type",
"(",
"cwl_type_string",
")",
":",
"is_optional",
"=",
"cwl_type_string",
".",
"endswith",
"(",
"'?'",
")",
"if",
"is_optional",
":",
"cwl_type_string",
"=",
"cwl_type_string",
"[",
":",
"-",
"1",
"]",
"is_array",
"=",
"cwl_type_string",
".",
"endswith",
"(",
"'[]'",
")",
"if",
"is_array",
":",
"cwl_type_string",
"=",
"cwl_type_string",
"[",
":",
"-",
"2",
"]",
"return",
"{",
"'type'",
":",
"cwl_type_string",
",",
"'isArray'",
":",
"is_array",
",",
"'isOptional'",
":",
"is_optional",
"}"
] | Parses cwl type information from a cwl type string.
Examples:
- "File[]" -> {'type': 'File', 'isArray': True, 'isOptional': False}
- "int?" -> {'type': 'int', 'isArray': False, 'isOptional': True}
:param cwl_type_string: The cwl type string to extract information from
:return: A dictionary containing information about the parsed cwl type string | [
"Parses",
"cwl",
"type",
"information",
"from",
"a",
"cwl",
"type",
"string",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/cwl.py#L223-L244 |
curious-containers/cc-core | cc_core/commons/cwl.py | cwl_input_directories | def cwl_input_directories(cwl_data, job_data, input_dir=None):
"""
Searches for Directories and in the cwl data and produces a dictionary containing input file information.
:param cwl_data: The cwl data as dictionary
:param job_data: The job data as dictionary
:param input_dir: TODO
:return: Returns the a dictionary containing information about input files.
The keys of this dictionary are the input/output identifiers of the files specified in the cwl description.
The corresponding value is a dictionary again with the following keys and values:
- 'isOptional': A bool indicating whether this input directory is optional
- 'isArray': A bool indicating whether this could be a list of directories
- 'files': A list of input file descriptions
A input file description is a dictionary containing the following information
- 'path': The path to the specified directory
- 'debugInfo': A field to possibly provide debug information
"""
results = {}
for input_identifier, input_data in cwl_data['inputs'].items():
cwl_type = parse_cwl_type(input_data['type'])
(is_optional, is_array, cwl_type) = itemgetter('isOptional', 'isArray', 'type')(cwl_type)
if cwl_type == 'Directory':
result = {
'isOptional': is_optional,
'isArray': is_array,
'directories': None
}
if input_identifier in job_data:
arg = job_data[input_identifier]
if is_array:
result['directories'] = [_input_directory_description(input_identifier, i, input_dir) for i in arg]
else:
result['directories'] = [_input_directory_description(input_identifier, arg, input_dir)]
results[input_identifier] = result
return results | python | def cwl_input_directories(cwl_data, job_data, input_dir=None):
"""
Searches for Directories and in the cwl data and produces a dictionary containing input file information.
:param cwl_data: The cwl data as dictionary
:param job_data: The job data as dictionary
:param input_dir: TODO
:return: Returns the a dictionary containing information about input files.
The keys of this dictionary are the input/output identifiers of the files specified in the cwl description.
The corresponding value is a dictionary again with the following keys and values:
- 'isOptional': A bool indicating whether this input directory is optional
- 'isArray': A bool indicating whether this could be a list of directories
- 'files': A list of input file descriptions
A input file description is a dictionary containing the following information
- 'path': The path to the specified directory
- 'debugInfo': A field to possibly provide debug information
"""
results = {}
for input_identifier, input_data in cwl_data['inputs'].items():
cwl_type = parse_cwl_type(input_data['type'])
(is_optional, is_array, cwl_type) = itemgetter('isOptional', 'isArray', 'type')(cwl_type)
if cwl_type == 'Directory':
result = {
'isOptional': is_optional,
'isArray': is_array,
'directories': None
}
if input_identifier in job_data:
arg = job_data[input_identifier]
if is_array:
result['directories'] = [_input_directory_description(input_identifier, i, input_dir) for i in arg]
else:
result['directories'] = [_input_directory_description(input_identifier, arg, input_dir)]
results[input_identifier] = result
return results | [
"def",
"cwl_input_directories",
"(",
"cwl_data",
",",
"job_data",
",",
"input_dir",
"=",
"None",
")",
":",
"results",
"=",
"{",
"}",
"for",
"input_identifier",
",",
"input_data",
"in",
"cwl_data",
"[",
"'inputs'",
"]",
".",
"items",
"(",
")",
":",
"cwl_type",
"=",
"parse_cwl_type",
"(",
"input_data",
"[",
"'type'",
"]",
")",
"(",
"is_optional",
",",
"is_array",
",",
"cwl_type",
")",
"=",
"itemgetter",
"(",
"'isOptional'",
",",
"'isArray'",
",",
"'type'",
")",
"(",
"cwl_type",
")",
"if",
"cwl_type",
"==",
"'Directory'",
":",
"result",
"=",
"{",
"'isOptional'",
":",
"is_optional",
",",
"'isArray'",
":",
"is_array",
",",
"'directories'",
":",
"None",
"}",
"if",
"input_identifier",
"in",
"job_data",
":",
"arg",
"=",
"job_data",
"[",
"input_identifier",
"]",
"if",
"is_array",
":",
"result",
"[",
"'directories'",
"]",
"=",
"[",
"_input_directory_description",
"(",
"input_identifier",
",",
"i",
",",
"input_dir",
")",
"for",
"i",
"in",
"arg",
"]",
"else",
":",
"result",
"[",
"'directories'",
"]",
"=",
"[",
"_input_directory_description",
"(",
"input_identifier",
",",
"arg",
",",
"input_dir",
")",
"]",
"results",
"[",
"input_identifier",
"]",
"=",
"result",
"return",
"results"
] | Searches for Directories and in the cwl data and produces a dictionary containing input file information.
:param cwl_data: The cwl data as dictionary
:param job_data: The job data as dictionary
:param input_dir: TODO
:return: Returns the a dictionary containing information about input files.
The keys of this dictionary are the input/output identifiers of the files specified in the cwl description.
The corresponding value is a dictionary again with the following keys and values:
- 'isOptional': A bool indicating whether this input directory is optional
- 'isArray': A bool indicating whether this could be a list of directories
- 'files': A list of input file descriptions
A input file description is a dictionary containing the following information
- 'path': The path to the specified directory
- 'debugInfo': A field to possibly provide debug information | [
"Searches",
"for",
"Directories",
"and",
"in",
"the",
"cwl",
"data",
"and",
"produces",
"a",
"dictionary",
"containing",
"input",
"file",
"information",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/cwl.py#L275-L318 |
curious-containers/cc-core | cc_core/commons/cwl.py | cwl_output_files | def cwl_output_files(cwl_data, inputs_to_reference, output_dir=None):
"""
Returns a dictionary containing information about the output files given in cwl_data.
:param cwl_data: The cwl data from where to extract the output file information.
:param inputs_to_reference: Inputs which are used to resolve input references.
:param output_dir: Path to the directory where output files are expected.
:return: A dictionary containing information about every output file.
"""
results = {}
for key, val in cwl_data['outputs'].items():
cwl_type = parse_cwl_type(val['type'])
(is_optional, is_array, cwl_type) = itemgetter('isOptional', 'isArray', 'type')(cwl_type)
if not cwl_type == 'File':
continue
result = {
'isOptional': is_optional,
'path': None,
'size': None,
'debugInfo': None
}
glob_path = os.path.expanduser(val['outputBinding']['glob'])
if output_dir and not os.path.isabs(glob_path):
glob_path = os.path.join(os.path.expanduser(output_dir), glob_path)
glob_path = resolve_input_references(glob_path, inputs_to_reference)
matches = glob(glob_path)
try:
if len(matches) != 1:
raise FileError('glob path "{}" does not match exactly one file'.format(glob_path))
file_path = matches[0]
result['path'] = file_path
if not os.path.isfile(file_path):
raise FileError('path is not a file')
result['size'] = os.path.getsize(file_path) / (1024 * 1024)
except:
result['debugInfo'] = exception_format()
results[key] = result
return results | python | def cwl_output_files(cwl_data, inputs_to_reference, output_dir=None):
"""
Returns a dictionary containing information about the output files given in cwl_data.
:param cwl_data: The cwl data from where to extract the output file information.
:param inputs_to_reference: Inputs which are used to resolve input references.
:param output_dir: Path to the directory where output files are expected.
:return: A dictionary containing information about every output file.
"""
results = {}
for key, val in cwl_data['outputs'].items():
cwl_type = parse_cwl_type(val['type'])
(is_optional, is_array, cwl_type) = itemgetter('isOptional', 'isArray', 'type')(cwl_type)
if not cwl_type == 'File':
continue
result = {
'isOptional': is_optional,
'path': None,
'size': None,
'debugInfo': None
}
glob_path = os.path.expanduser(val['outputBinding']['glob'])
if output_dir and not os.path.isabs(glob_path):
glob_path = os.path.join(os.path.expanduser(output_dir), glob_path)
glob_path = resolve_input_references(glob_path, inputs_to_reference)
matches = glob(glob_path)
try:
if len(matches) != 1:
raise FileError('glob path "{}" does not match exactly one file'.format(glob_path))
file_path = matches[0]
result['path'] = file_path
if not os.path.isfile(file_path):
raise FileError('path is not a file')
result['size'] = os.path.getsize(file_path) / (1024 * 1024)
except:
result['debugInfo'] = exception_format()
results[key] = result
return results | [
"def",
"cwl_output_files",
"(",
"cwl_data",
",",
"inputs_to_reference",
",",
"output_dir",
"=",
"None",
")",
":",
"results",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"cwl_data",
"[",
"'outputs'",
"]",
".",
"items",
"(",
")",
":",
"cwl_type",
"=",
"parse_cwl_type",
"(",
"val",
"[",
"'type'",
"]",
")",
"(",
"is_optional",
",",
"is_array",
",",
"cwl_type",
")",
"=",
"itemgetter",
"(",
"'isOptional'",
",",
"'isArray'",
",",
"'type'",
")",
"(",
"cwl_type",
")",
"if",
"not",
"cwl_type",
"==",
"'File'",
":",
"continue",
"result",
"=",
"{",
"'isOptional'",
":",
"is_optional",
",",
"'path'",
":",
"None",
",",
"'size'",
":",
"None",
",",
"'debugInfo'",
":",
"None",
"}",
"glob_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"val",
"[",
"'outputBinding'",
"]",
"[",
"'glob'",
"]",
")",
"if",
"output_dir",
"and",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"glob_path",
")",
":",
"glob_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"output_dir",
")",
",",
"glob_path",
")",
"glob_path",
"=",
"resolve_input_references",
"(",
"glob_path",
",",
"inputs_to_reference",
")",
"matches",
"=",
"glob",
"(",
"glob_path",
")",
"try",
":",
"if",
"len",
"(",
"matches",
")",
"!=",
"1",
":",
"raise",
"FileError",
"(",
"'glob path \"{}\" does not match exactly one file'",
".",
"format",
"(",
"glob_path",
")",
")",
"file_path",
"=",
"matches",
"[",
"0",
"]",
"result",
"[",
"'path'",
"]",
"=",
"file_path",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"raise",
"FileError",
"(",
"'path is not a file'",
")",
"result",
"[",
"'size'",
"]",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"file_path",
")",
"/",
"(",
"1024",
"*",
"1024",
")",
"except",
":",
"result",
"[",
"'debugInfo'",
"]",
"=",
"exception_format",
"(",
")",
"results",
"[",
"key",
"]",
"=",
"result",
"return",
"results"
] | Returns a dictionary containing information about the output files given in cwl_data.
:param cwl_data: The cwl data from where to extract the output file information.
:param inputs_to_reference: Inputs which are used to resolve input references.
:param output_dir: Path to the directory where output files are expected.
:return: A dictionary containing information about every output file. | [
"Returns",
"a",
"dictionary",
"containing",
"information",
"about",
"the",
"output",
"files",
"given",
"in",
"cwl_data",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/cwl.py#L321-L368 |
greenbender/python-fifo | fifo/__init__.py | Fifo.read | def read(self, length=-1):
"""
Reads from the FIFO.
Reads as much data as possible from the FIFO up to the specified
length. If the length argument is negative or ommited all data
currently available in the FIFO will be read. If there is no data
available in the FIFO an empty string is returned.
Args:
length: The amount of data to read from the FIFO. Defaults to -1.
"""
if 0 <= length < len(self):
newpos = self.pos + length
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return data
data = self.buf[self.pos:]
self.clear()
return data | python | def read(self, length=-1):
"""
Reads from the FIFO.
Reads as much data as possible from the FIFO up to the specified
length. If the length argument is negative or ommited all data
currently available in the FIFO will be read. If there is no data
available in the FIFO an empty string is returned.
Args:
length: The amount of data to read from the FIFO. Defaults to -1.
"""
if 0 <= length < len(self):
newpos = self.pos + length
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return data
data = self.buf[self.pos:]
self.clear()
return data | [
"def",
"read",
"(",
"self",
",",
"length",
"=",
"-",
"1",
")",
":",
"if",
"0",
"<=",
"length",
"<",
"len",
"(",
"self",
")",
":",
"newpos",
"=",
"self",
".",
"pos",
"+",
"length",
"data",
"=",
"self",
".",
"buf",
"[",
"self",
".",
"pos",
":",
"newpos",
"]",
"self",
".",
"pos",
"=",
"newpos",
"self",
".",
"__discard",
"(",
")",
"return",
"data",
"data",
"=",
"self",
".",
"buf",
"[",
"self",
".",
"pos",
":",
"]",
"self",
".",
"clear",
"(",
")",
"return",
"data"
] | Reads from the FIFO.
Reads as much data as possible from the FIFO up to the specified
length. If the length argument is negative or ommited all data
currently available in the FIFO will be read. If there is no data
available in the FIFO an empty string is returned.
Args:
length: The amount of data to read from the FIFO. Defaults to -1. | [
"Reads",
"from",
"the",
"FIFO",
"."
] | train | https://github.com/greenbender/python-fifo/blob/ffabb6c8b844086dd3a490d0b42bbb5aa8fbb932/fifo/__init__.py#L76-L97 |
greenbender/python-fifo | fifo/__init__.py | Fifo.readuntil | def readuntil(self, token, size=0):
"""
Reads data from the FIFO until a token is encountered.
If no token is encountered as much data is read from the FIFO as
possible keeping in mind that the FIFO must retain enough data to
perform matches for the token across writes.
Args:
token: The token to read until.
size: The minimum amount of data that should be left in the FIFO.
This is only used if it is greater than the length of the
token. When ommited this value will default to the length of
the token.
Returns: A tuple of (found, data) where found is a boolean indicating
whether the token was found, and data is all the data that could be
read from the FIFO.
Note: When a token is found the token is also read from the buffer and
returned in the data.
"""
self.__append()
i = self.buf.find(token, self.pos)
if i < 0:
index = max(len(token) - 1, size)
newpos = max(len(self.buf) - index, self.pos)
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return False, data
newpos = i + len(token)
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return True, data | python | def readuntil(self, token, size=0):
"""
Reads data from the FIFO until a token is encountered.
If no token is encountered as much data is read from the FIFO as
possible keeping in mind that the FIFO must retain enough data to
perform matches for the token across writes.
Args:
token: The token to read until.
size: The minimum amount of data that should be left in the FIFO.
This is only used if it is greater than the length of the
token. When ommited this value will default to the length of
the token.
Returns: A tuple of (found, data) where found is a boolean indicating
whether the token was found, and data is all the data that could be
read from the FIFO.
Note: When a token is found the token is also read from the buffer and
returned in the data.
"""
self.__append()
i = self.buf.find(token, self.pos)
if i < 0:
index = max(len(token) - 1, size)
newpos = max(len(self.buf) - index, self.pos)
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return False, data
newpos = i + len(token)
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return True, data | [
"def",
"readuntil",
"(",
"self",
",",
"token",
",",
"size",
"=",
"0",
")",
":",
"self",
".",
"__append",
"(",
")",
"i",
"=",
"self",
".",
"buf",
".",
"find",
"(",
"token",
",",
"self",
".",
"pos",
")",
"if",
"i",
"<",
"0",
":",
"index",
"=",
"max",
"(",
"len",
"(",
"token",
")",
"-",
"1",
",",
"size",
")",
"newpos",
"=",
"max",
"(",
"len",
"(",
"self",
".",
"buf",
")",
"-",
"index",
",",
"self",
".",
"pos",
")",
"data",
"=",
"self",
".",
"buf",
"[",
"self",
".",
"pos",
":",
"newpos",
"]",
"self",
".",
"pos",
"=",
"newpos",
"self",
".",
"__discard",
"(",
")",
"return",
"False",
",",
"data",
"newpos",
"=",
"i",
"+",
"len",
"(",
"token",
")",
"data",
"=",
"self",
".",
"buf",
"[",
"self",
".",
"pos",
":",
"newpos",
"]",
"self",
".",
"pos",
"=",
"newpos",
"self",
".",
"__discard",
"(",
")",
"return",
"True",
",",
"data"
] | Reads data from the FIFO until a token is encountered.
If no token is encountered as much data is read from the FIFO as
possible keeping in mind that the FIFO must retain enough data to
perform matches for the token across writes.
Args:
token: The token to read until.
size: The minimum amount of data that should be left in the FIFO.
This is only used if it is greater than the length of the
token. When ommited this value will default to the length of
the token.
Returns: A tuple of (found, data) where found is a boolean indicating
whether the token was found, and data is all the data that could be
read from the FIFO.
Note: When a token is found the token is also read from the buffer and
returned in the data. | [
"Reads",
"data",
"from",
"the",
"FIFO",
"until",
"a",
"token",
"is",
"encountered",
"."
] | train | https://github.com/greenbender/python-fifo/blob/ffabb6c8b844086dd3a490d0b42bbb5aa8fbb932/fifo/__init__.py#L136-L173 |
greenbender/python-fifo | fifo/__init__.py | Fifo.peekline | def peekline(self):
"""
Peeks a line into the FIFO.
Perfroms the same function as readline() without removing data from the
FIFO. See readline() for further information.
"""
self.__append()
i = self.buf.find(self.eol, self.pos)
if i < 0:
return ''
newpos = i + len(self.eol)
return self.buf[self.pos:newpos] | python | def peekline(self):
"""
Peeks a line into the FIFO.
Perfroms the same function as readline() without removing data from the
FIFO. See readline() for further information.
"""
self.__append()
i = self.buf.find(self.eol, self.pos)
if i < 0:
return ''
newpos = i + len(self.eol)
return self.buf[self.pos:newpos] | [
"def",
"peekline",
"(",
"self",
")",
":",
"self",
".",
"__append",
"(",
")",
"i",
"=",
"self",
".",
"buf",
".",
"find",
"(",
"self",
".",
"eol",
",",
"self",
".",
"pos",
")",
"if",
"i",
"<",
"0",
":",
"return",
"''",
"newpos",
"=",
"i",
"+",
"len",
"(",
"self",
".",
"eol",
")",
"return",
"self",
".",
"buf",
"[",
"self",
".",
"pos",
":",
"newpos",
"]"
] | Peeks a line into the FIFO.
Perfroms the same function as readline() without removing data from the
FIFO. See readline() for further information. | [
"Peeks",
"a",
"line",
"into",
"the",
"FIFO",
"."
] | train | https://github.com/greenbender/python-fifo/blob/ffabb6c8b844086dd3a490d0b42bbb5aa8fbb932/fifo/__init__.py#L200-L214 |
greenbender/python-fifo | fifo/__init__.py | Fifo.peekuntil | def peekuntil(self, token, size=0):
"""
Peeks for token into the FIFO.
Performs the same function as readuntil() without removing data from the
FIFO. See readuntil() for further information.
"""
self.__append()
i = self.buf.find(token, self.pos)
if i < 0:
index = max(len(token) - 1, size)
newpos = max(len(self.buf) - index, self.pos)
return False, self.buf[self.pos:newpos]
newpos = i + len(token)
return True, self.buf[self.pos:newpos] | python | def peekuntil(self, token, size=0):
"""
Peeks for token into the FIFO.
Performs the same function as readuntil() without removing data from the
FIFO. See readuntil() for further information.
"""
self.__append()
i = self.buf.find(token, self.pos)
if i < 0:
index = max(len(token) - 1, size)
newpos = max(len(self.buf) - index, self.pos)
return False, self.buf[self.pos:newpos]
newpos = i + len(token)
return True, self.buf[self.pos:newpos] | [
"def",
"peekuntil",
"(",
"self",
",",
"token",
",",
"size",
"=",
"0",
")",
":",
"self",
".",
"__append",
"(",
")",
"i",
"=",
"self",
".",
"buf",
".",
"find",
"(",
"token",
",",
"self",
".",
"pos",
")",
"if",
"i",
"<",
"0",
":",
"index",
"=",
"max",
"(",
"len",
"(",
"token",
")",
"-",
"1",
",",
"size",
")",
"newpos",
"=",
"max",
"(",
"len",
"(",
"self",
".",
"buf",
")",
"-",
"index",
",",
"self",
".",
"pos",
")",
"return",
"False",
",",
"self",
".",
"buf",
"[",
"self",
".",
"pos",
":",
"newpos",
"]",
"newpos",
"=",
"i",
"+",
"len",
"(",
"token",
")",
"return",
"True",
",",
"self",
".",
"buf",
"[",
"self",
".",
"pos",
":",
"newpos",
"]"
] | Peeks for token into the FIFO.
Performs the same function as readuntil() without removing data from the
FIFO. See readuntil() for further information. | [
"Peeks",
"for",
"token",
"into",
"the",
"FIFO",
"."
] | train | https://github.com/greenbender/python-fifo/blob/ffabb6c8b844086dd3a490d0b42bbb5aa8fbb932/fifo/__init__.py#L216-L232 |
linkhub-sdk/popbill.py | popbill/kakaoService.py | KakaoService.getURL | def getURL(self, CorpNum, UserID, ToGo):
"""
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:param ToGo: [PLUSFRIEND-νλ¬μ€μΉκ΅¬κ³μ κ΄λ¦¬, SENDER-λ°μ λ²νΈκ΄λ¦¬, TEMPLATE-μλ¦Όν‘ν
νλ¦Ώκ΄λ¦¬, BOX-μΉ΄μΉ΄μ€ν‘μ μ‘λ΄μ©]
:return: νλΉ URL
"""
if ToGo == None or ToGo == '':
raise PopbillException(-99999999, "TOGOκ°μ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
if ToGo == 'SENDER':
result = self._httpget('/Message/?TG=' + ToGo, CorpNum, UserID)
else:
result = self._httpget('/KakaoTalk/?TG=' + ToGo, CorpNum, UserID)
return result.url | python | def getURL(self, CorpNum, UserID, ToGo):
"""
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:param ToGo: [PLUSFRIEND-νλ¬μ€μΉκ΅¬κ³μ κ΄λ¦¬, SENDER-λ°μ λ²νΈκ΄λ¦¬, TEMPLATE-μλ¦Όν‘ν
νλ¦Ώκ΄λ¦¬, BOX-μΉ΄μΉ΄μ€ν‘μ μ‘λ΄μ©]
:return: νλΉ URL
"""
if ToGo == None or ToGo == '':
raise PopbillException(-99999999, "TOGOκ°μ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
if ToGo == 'SENDER':
result = self._httpget('/Message/?TG=' + ToGo, CorpNum, UserID)
else:
result = self._httpget('/KakaoTalk/?TG=' + ToGo, CorpNum, UserID)
return result.url | [
"def",
"getURL",
"(",
"self",
",",
"CorpNum",
",",
"UserID",
",",
"ToGo",
")",
":",
"if",
"ToGo",
"==",
"None",
"or",
"ToGo",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"TOGOκ°μ΄ μ
λ ₯λμ§ μμμ΅λλ€.\")",
"",
"if",
"ToGo",
"==",
"'SENDER'",
":",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/Message/?TG='",
"+",
"ToGo",
",",
"CorpNum",
",",
"UserID",
")",
"else",
":",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/KakaoTalk/?TG='",
"+",
"ToGo",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"url"
] | :param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:param ToGo: [PLUSFRIEND-νλ¬μ€μΉκ΅¬κ³μ κ΄λ¦¬, SENDER-λ°μ λ²νΈκ΄λ¦¬, TEMPLATE-μλ¦Όν‘ν
νλ¦Ώκ΄λ¦¬, BOX-μΉ΄μΉ΄μ€ν‘μ μ‘λ΄μ©]
:return: νλΉ URL | [
":",
"param",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
":",
"param",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
":",
"param",
"ToGo",
":",
"[",
"PLUSFRIEND",
"-",
"νλ¬μ€μΉκ΅¬κ³μ κ΄λ¦¬",
"SENDER",
"-",
"λ°μ λ²νΈκ΄λ¦¬",
"TEMPLATE",
"-",
"μλ¦Όν‘ν
νλ¦Ώκ΄λ¦¬",
"BOX",
"-",
"μΉ΄μΉ΄μ€ν‘μ μ‘λ΄μ©",
"]",
":",
"return",
":",
"νλΉ",
"URL"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/kakaoService.py#L28-L42 |
linkhub-sdk/popbill.py | popbill/kakaoService.py | KakaoService.getPlusFriendMgtURL | def getPlusFriendMgtURL(self, CorpNum, UserID):
"""
νλ¬μ€μΉκ΅¬ κ³μ κ΄λ¦¬ νμ
URL
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:return: νλΉ URL
"""
result = self._httpget('/KakaoTalk/?TG=PLUSFRIEND', CorpNum, UserID)
return result.url | python | def getPlusFriendMgtURL(self, CorpNum, UserID):
"""
νλ¬μ€μΉκ΅¬ κ³μ κ΄λ¦¬ νμ
URL
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:return: νλΉ URL
"""
result = self._httpget('/KakaoTalk/?TG=PLUSFRIEND', CorpNum, UserID)
return result.url | [
"def",
"getPlusFriendMgtURL",
"(",
"self",
",",
"CorpNum",
",",
"UserID",
")",
":",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/KakaoTalk/?TG=PLUSFRIEND'",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"url"
] | νλ¬μ€μΉκ΅¬ κ³μ κ΄λ¦¬ νμ
URL
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:return: νλΉ URL | [
"νλ¬μ€μΉκ΅¬",
"κ³μ κ΄λ¦¬",
"νμ
",
"URL",
":",
"param",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
":",
"param",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
":",
"return",
":",
"νλΉ",
"URL"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/kakaoService.py#L44-L52 |
linkhub-sdk/popbill.py | popbill/kakaoService.py | KakaoService.getATSTemplateMgtURL | def getATSTemplateMgtURL(self, CorpNum, UserID):
"""
μλ¦Όν‘ ν
νλ¦Ώκ΄λ¦¬ νμ
URL
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:return: νλΉ URL
"""
result = self._httpget('/KakaoTalk/?TG=TEMPLATE', CorpNum, UserID)
return result.url | python | def getATSTemplateMgtURL(self, CorpNum, UserID):
"""
μλ¦Όν‘ ν
νλ¦Ώκ΄λ¦¬ νμ
URL
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:return: νλΉ URL
"""
result = self._httpget('/KakaoTalk/?TG=TEMPLATE', CorpNum, UserID)
return result.url | [
"def",
"getATSTemplateMgtURL",
"(",
"self",
",",
"CorpNum",
",",
"UserID",
")",
":",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/KakaoTalk/?TG=TEMPLATE'",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"url"
] | μλ¦Όν‘ ν
νλ¦Ώκ΄λ¦¬ νμ
URL
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:return: νλΉ URL | [
"μλ¦Όν‘",
"ν
νλ¦Ώκ΄λ¦¬",
"νμ
",
"URL",
":",
"param",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
":",
"param",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
":",
"return",
":",
"νλΉ",
"URL"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/kakaoService.py#L64-L72 |
linkhub-sdk/popbill.py | popbill/kakaoService.py | KakaoService.getSentListURL | def getSentListURL(self, CorpNum, UserID):
"""
μΉ΄μΉ΄μ€ν‘ μ μ‘λ΄μ νμ
URL
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:return: νλΉ URL
"""
result = self._httpget('/KakaoTalk/?TG=BOX', CorpNum, UserID)
return result.url | python | def getSentListURL(self, CorpNum, UserID):
"""
μΉ΄μΉ΄μ€ν‘ μ μ‘λ΄μ νμ
URL
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:return: νλΉ URL
"""
result = self._httpget('/KakaoTalk/?TG=BOX', CorpNum, UserID)
return result.url | [
"def",
"getSentListURL",
"(",
"self",
",",
"CorpNum",
",",
"UserID",
")",
":",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/KakaoTalk/?TG=BOX'",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"url"
] | μΉ΄μΉ΄μ€ν‘ μ μ‘λ΄μ νμ
URL
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:return: νλΉ URL | [
"μΉ΄μΉ΄μ€ν‘",
"μ μ‘λ΄μ",
"νμ
",
"URL",
":",
"param",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
":",
"param",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
":",
"return",
":",
"νλΉ",
"URL"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/kakaoService.py#L74-L82 |
linkhub-sdk/popbill.py | popbill/kakaoService.py | KakaoService.sendATS_same | def sendATS_same(self, CorpNum, TemplateCode, Sender, Content, AltContent, AltSendType, SndDT, KakaoMessages,
UserID=None, RequestNum=None, ButtonList=None):
"""
μλ¦Όν‘ λλ μ μ‘
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param TemplateCode: ν
νλ¦Ώμ½λ
:param Sender: λ°μ λ²νΈ
:param Content: [λ보] μλ¦Όν‘ λ΄μ©
:param AltContent: [λ보] λ체문μ λ΄μ©
:param AltSendType: λ체문μ μ ν [곡백-λ―Έμ μ‘, C-μλ¦Όν‘λ΄μ©, A-λ체문μλ΄μ©]
:param SndDT: μμ½μΌμ [μμ±νμ : yyyyMMddHHmmss]
:param KakaoMessages: μλ¦Όν‘ λ΄μ© (λ°°μ΄)
:param UserID: νλΉνμ μμ΄λ
:param RequestNum : μμ²λ²νΈ
:return: receiptNum (μ μλ²νΈ)
"""
if TemplateCode is None or TemplateCode == '':
raise PopbillException(-99999999, "μλ¦Όν‘ ν
νλ¦Ώμ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if Sender is None or Sender == '':
raise PopbillException(-99999999, "λ°μ λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
req = {}
if TemplateCode is not None or TemplateCode != '':
req['templateCode'] = TemplateCode
if Sender is not None or Sender != '':
req['snd'] = Sender
if Content is not None or Content != '':
req['content'] = Content
if AltContent is not None or AltContent != '':
req['altContent'] = AltContent
if AltSendType is not None or AltSendType != '':
req['altSendType'] = AltSendType
if SndDT is not None or SndDT != '':
req['sndDT'] = SndDT
if KakaoMessages is not None or KakaoMessages != '':
req['msgs'] = KakaoMessages
if ButtonList is not None:
req['btns'] = ButtonList
if RequestNum is not None or RequestNum != '':
req['requestnum'] = RequestNum
postData = self._stringtify(req)
result = self._httppost('/ATS', postData, CorpNum, UserID)
return result.receiptNum | python | def sendATS_same(self, CorpNum, TemplateCode, Sender, Content, AltContent, AltSendType, SndDT, KakaoMessages,
UserID=None, RequestNum=None, ButtonList=None):
"""
μλ¦Όν‘ λλ μ μ‘
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param TemplateCode: ν
νλ¦Ώμ½λ
:param Sender: λ°μ λ²νΈ
:param Content: [λ보] μλ¦Όν‘ λ΄μ©
:param AltContent: [λ보] λ체문μ λ΄μ©
:param AltSendType: λ체문μ μ ν [곡백-λ―Έμ μ‘, C-μλ¦Όν‘λ΄μ©, A-λ체문μλ΄μ©]
:param SndDT: μμ½μΌμ [μμ±νμ : yyyyMMddHHmmss]
:param KakaoMessages: μλ¦Όν‘ λ΄μ© (λ°°μ΄)
:param UserID: νλΉνμ μμ΄λ
:param RequestNum : μμ²λ²νΈ
:return: receiptNum (μ μλ²νΈ)
"""
if TemplateCode is None or TemplateCode == '':
raise PopbillException(-99999999, "μλ¦Όν‘ ν
νλ¦Ώμ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if Sender is None or Sender == '':
raise PopbillException(-99999999, "λ°μ λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
req = {}
if TemplateCode is not None or TemplateCode != '':
req['templateCode'] = TemplateCode
if Sender is not None or Sender != '':
req['snd'] = Sender
if Content is not None or Content != '':
req['content'] = Content
if AltContent is not None or AltContent != '':
req['altContent'] = AltContent
if AltSendType is not None or AltSendType != '':
req['altSendType'] = AltSendType
if SndDT is not None or SndDT != '':
req['sndDT'] = SndDT
if KakaoMessages is not None or KakaoMessages != '':
req['msgs'] = KakaoMessages
if ButtonList is not None:
req['btns'] = ButtonList
if RequestNum is not None or RequestNum != '':
req['requestnum'] = RequestNum
postData = self._stringtify(req)
result = self._httppost('/ATS', postData, CorpNum, UserID)
return result.receiptNum | [
"def",
"sendATS_same",
"(",
"self",
",",
"CorpNum",
",",
"TemplateCode",
",",
"Sender",
",",
"Content",
",",
"AltContent",
",",
"AltSendType",
",",
"SndDT",
",",
"KakaoMessages",
",",
"UserID",
"=",
"None",
",",
"RequestNum",
"=",
"None",
",",
"ButtonList",
"=",
"None",
")",
":",
"if",
"TemplateCode",
"is",
"None",
"or",
"TemplateCode",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"μλ¦Όν‘ ν
νλ¦Ώμ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.\")",
"",
"if",
"Sender",
"is",
"None",
"or",
"Sender",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"λ°μ λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.\")",
"",
"req",
"=",
"{",
"}",
"if",
"TemplateCode",
"is",
"not",
"None",
"or",
"TemplateCode",
"!=",
"''",
":",
"req",
"[",
"'templateCode'",
"]",
"=",
"TemplateCode",
"if",
"Sender",
"is",
"not",
"None",
"or",
"Sender",
"!=",
"''",
":",
"req",
"[",
"'snd'",
"]",
"=",
"Sender",
"if",
"Content",
"is",
"not",
"None",
"or",
"Content",
"!=",
"''",
":",
"req",
"[",
"'content'",
"]",
"=",
"Content",
"if",
"AltContent",
"is",
"not",
"None",
"or",
"AltContent",
"!=",
"''",
":",
"req",
"[",
"'altContent'",
"]",
"=",
"AltContent",
"if",
"AltSendType",
"is",
"not",
"None",
"or",
"AltSendType",
"!=",
"''",
":",
"req",
"[",
"'altSendType'",
"]",
"=",
"AltSendType",
"if",
"SndDT",
"is",
"not",
"None",
"or",
"SndDT",
"!=",
"''",
":",
"req",
"[",
"'sndDT'",
"]",
"=",
"SndDT",
"if",
"KakaoMessages",
"is",
"not",
"None",
"or",
"KakaoMessages",
"!=",
"''",
":",
"req",
"[",
"'msgs'",
"]",
"=",
"KakaoMessages",
"if",
"ButtonList",
"is",
"not",
"None",
":",
"req",
"[",
"'btns'",
"]",
"=",
"ButtonList",
"if",
"RequestNum",
"is",
"not",
"None",
"or",
"RequestNum",
"!=",
"''",
":",
"req",
"[",
"'requestnum'",
"]",
"=",
"RequestNum",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"req",
")",
"result",
"=",
"self",
".",
"_httppost",
"(",
"'/ATS'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"receiptNum"
] | μλ¦Όν‘ λλ μ μ‘
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param TemplateCode: ν
νλ¦Ώμ½λ
:param Sender: λ°μ λ²νΈ
:param Content: [λ보] μλ¦Όν‘ λ΄μ©
:param AltContent: [λ보] λ체문μ λ΄μ©
:param AltSendType: λ체문μ μ ν [곡백-λ―Έμ μ‘, C-μλ¦Όν‘λ΄μ©, A-λ체문μλ΄μ©]
:param SndDT: μμ½μΌμ [μμ±νμ : yyyyMMddHHmmss]
:param KakaoMessages: μλ¦Όν‘ λ΄μ© (λ°°μ΄)
:param UserID: νλΉνμ μμ΄λ
:param RequestNum : μμ²λ²νΈ
:return: receiptNum (μ μλ²νΈ) | [
"μλ¦Όν‘",
"λλ",
"μ μ‘",
":",
"param",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
":",
"param",
"TemplateCode",
":",
"ν
νλ¦Ώμ½λ",
":",
"param",
"Sender",
":",
"λ°μ λ²νΈ",
":",
"param",
"Content",
":",
"[",
"λ보",
"]",
"μλ¦Όν‘",
"λ΄μ©",
":",
"param",
"AltContent",
":",
"[",
"λ보",
"]",
"λ체문μ",
"λ΄μ©",
":",
"param",
"AltSendType",
":",
"λ체문μ",
"μ ν",
"[",
"곡백",
"-",
"λ―Έμ μ‘",
"C",
"-",
"μλ¦Όν‘λ΄μ©",
"A",
"-",
"λ체문μλ΄μ©",
"]",
":",
"param",
"SndDT",
":",
"μμ½μΌμ",
"[",
"μμ±νμ",
":",
"yyyyMMddHHmmss",
"]",
":",
"param",
"KakaoMessages",
":",
"μλ¦Όν‘",
"λ΄μ©",
"(",
"λ°°μ΄",
")",
":",
"param",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
":",
"param",
"RequestNum",
":",
"μμ²λ²νΈ",
":",
"return",
":",
"receiptNum",
"(",
"μ μλ²νΈ",
")"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/kakaoService.py#L153-L199 |
linkhub-sdk/popbill.py | popbill/kakaoService.py | KakaoService.sendFTS_same | def sendFTS_same(self, CorpNum, PlusFriendID, Sender, Content, AltContent, AltSendType, SndDT,
KakaoMessages, KakaoButtons, AdsYN=False, UserID=None, RequestNum=None):
"""
μΉκ΅¬ν‘ ν
μ€νΈ λλ μ μ‘
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param PlusFriendID: νλ¬μ€μΉκ΅¬ μμ΄λ
:param Sender: λ°μ λ²νΈ
:param Content: [λ보] μΉκ΅¬ν‘ λ΄μ©
:param AltContent: [λ보] λ체문μ λ΄μ©
:param AltSendType: λ체문μ μ ν [곡백-λ―Έμ μ‘, C-μλ¦Όν‘λ΄μ©, A-λ체문μλ΄μ©]
:param SndDT: μμ½μΌμ [μμ±νμ : yyyyMMddHHmmss]
:param KakaoMessages: μΉκ΅¬ν‘ λ΄μ© (λ°°μ΄)
:param KakaoButtons: λ²νΌ λͺ©λ‘ (μ΅λ 5κ°)
:param AdsYN: κ΄κ³ μ μ‘μ¬λΆ
:param UserID: νλΉνμ μμ΄λ
:param RequestNum : μμ²λ²νΈ
:return: receiptNum (μ μλ²νΈ)
"""
if PlusFriendID is None or PlusFriendID == '':
raise PopbillException(-99999999, "νλ¬μ€μΉκ΅¬ μμ΄λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if Sender is None or Sender == '':
raise PopbillException(-99999999, "λ°μ λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
req = {}
if PlusFriendID is not None or PlusFriendID != '':
req['plusFriendID'] = PlusFriendID
if Sender is not None or Sender != '':
req['snd'] = Sender
if AltSendType is not None or AltSendType != '':
req['altSendType'] = AltSendType
if Content is not None or Content != '':
req['content'] = Content
if AltContent is not None or AltContent != '':
req['altContent'] = AltContent
if SndDT is not None or SndDT != '':
req['sndDT'] = SndDT
if KakaoMessages:
req['msgs'] = KakaoMessages
if KakaoButtons:
req['btns'] = KakaoButtons
if AdsYN:
req['adsYN'] = True
if RequestNum is not None or RequestNum != '':
req['requestNum'] = RequestNum
postData = self._stringtify(req)
result = self._httppost('/FTS', postData, CorpNum, UserID)
return result.receiptNum | python | def sendFTS_same(self, CorpNum, PlusFriendID, Sender, Content, AltContent, AltSendType, SndDT,
KakaoMessages, KakaoButtons, AdsYN=False, UserID=None, RequestNum=None):
"""
μΉκ΅¬ν‘ ν
μ€νΈ λλ μ μ‘
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param PlusFriendID: νλ¬μ€μΉκ΅¬ μμ΄λ
:param Sender: λ°μ λ²νΈ
:param Content: [λ보] μΉκ΅¬ν‘ λ΄μ©
:param AltContent: [λ보] λ체문μ λ΄μ©
:param AltSendType: λ체문μ μ ν [곡백-λ―Έμ μ‘, C-μλ¦Όν‘λ΄μ©, A-λ체문μλ΄μ©]
:param SndDT: μμ½μΌμ [μμ±νμ : yyyyMMddHHmmss]
:param KakaoMessages: μΉκ΅¬ν‘ λ΄μ© (λ°°μ΄)
:param KakaoButtons: λ²νΌ λͺ©λ‘ (μ΅λ 5κ°)
:param AdsYN: κ΄κ³ μ μ‘μ¬λΆ
:param UserID: νλΉνμ μμ΄λ
:param RequestNum : μμ²λ²νΈ
:return: receiptNum (μ μλ²νΈ)
"""
if PlusFriendID is None or PlusFriendID == '':
raise PopbillException(-99999999, "νλ¬μ€μΉκ΅¬ μμ΄λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if Sender is None or Sender == '':
raise PopbillException(-99999999, "λ°μ λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
req = {}
if PlusFriendID is not None or PlusFriendID != '':
req['plusFriendID'] = PlusFriendID
if Sender is not None or Sender != '':
req['snd'] = Sender
if AltSendType is not None or AltSendType != '':
req['altSendType'] = AltSendType
if Content is not None or Content != '':
req['content'] = Content
if AltContent is not None or AltContent != '':
req['altContent'] = AltContent
if SndDT is not None or SndDT != '':
req['sndDT'] = SndDT
if KakaoMessages:
req['msgs'] = KakaoMessages
if KakaoButtons:
req['btns'] = KakaoButtons
if AdsYN:
req['adsYN'] = True
if RequestNum is not None or RequestNum != '':
req['requestNum'] = RequestNum
postData = self._stringtify(req)
result = self._httppost('/FTS', postData, CorpNum, UserID)
return result.receiptNum | [
"def",
"sendFTS_same",
"(",
"self",
",",
"CorpNum",
",",
"PlusFriendID",
",",
"Sender",
",",
"Content",
",",
"AltContent",
",",
"AltSendType",
",",
"SndDT",
",",
"KakaoMessages",
",",
"KakaoButtons",
",",
"AdsYN",
"=",
"False",
",",
"UserID",
"=",
"None",
",",
"RequestNum",
"=",
"None",
")",
":",
"if",
"PlusFriendID",
"is",
"None",
"or",
"PlusFriendID",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"νλ¬μ€μΉκ΅¬ μμ΄λκ° μ
λ ₯λμ§ μμμ΅λλ€.\")",
"",
"if",
"Sender",
"is",
"None",
"or",
"Sender",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"λ°μ λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.\")",
"",
"req",
"=",
"{",
"}",
"if",
"PlusFriendID",
"is",
"not",
"None",
"or",
"PlusFriendID",
"!=",
"''",
":",
"req",
"[",
"'plusFriendID'",
"]",
"=",
"PlusFriendID",
"if",
"Sender",
"is",
"not",
"None",
"or",
"Sender",
"!=",
"''",
":",
"req",
"[",
"'snd'",
"]",
"=",
"Sender",
"if",
"AltSendType",
"is",
"not",
"None",
"or",
"AltSendType",
"!=",
"''",
":",
"req",
"[",
"'altSendType'",
"]",
"=",
"AltSendType",
"if",
"Content",
"is",
"not",
"None",
"or",
"Content",
"!=",
"''",
":",
"req",
"[",
"'content'",
"]",
"=",
"Content",
"if",
"AltContent",
"is",
"not",
"None",
"or",
"AltContent",
"!=",
"''",
":",
"req",
"[",
"'altContent'",
"]",
"=",
"AltContent",
"if",
"SndDT",
"is",
"not",
"None",
"or",
"SndDT",
"!=",
"''",
":",
"req",
"[",
"'sndDT'",
"]",
"=",
"SndDT",
"if",
"KakaoMessages",
":",
"req",
"[",
"'msgs'",
"]",
"=",
"KakaoMessages",
"if",
"KakaoButtons",
":",
"req",
"[",
"'btns'",
"]",
"=",
"KakaoButtons",
"if",
"AdsYN",
":",
"req",
"[",
"'adsYN'",
"]",
"=",
"True",
"if",
"RequestNum",
"is",
"not",
"None",
"or",
"RequestNum",
"!=",
"''",
":",
"req",
"[",
"'requestNum'",
"]",
"=",
"RequestNum",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"req",
")",
"result",
"=",
"self",
".",
"_httppost",
"(",
"'/FTS'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"receiptNum"
] | μΉκ΅¬ν‘ ν
μ€νΈ λλ μ μ‘
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param PlusFriendID: νλ¬μ€μΉκ΅¬ μμ΄λ
:param Sender: λ°μ λ²νΈ
:param Content: [λ보] μΉκ΅¬ν‘ λ΄μ©
:param AltContent: [λ보] λ체문μ λ΄μ©
:param AltSendType: λ체문μ μ ν [곡백-λ―Έμ μ‘, C-μλ¦Όν‘λ΄μ©, A-λ체문μλ΄μ©]
:param SndDT: μμ½μΌμ [μμ±νμ : yyyyMMddHHmmss]
:param KakaoMessages: μΉκ΅¬ν‘ λ΄μ© (λ°°μ΄)
:param KakaoButtons: λ²νΌ λͺ©λ‘ (μ΅λ 5κ°)
:param AdsYN: κ΄κ³ μ μ‘μ¬λΆ
:param UserID: νλΉνμ μμ΄λ
:param RequestNum : μμ²λ²νΈ
:return: receiptNum (μ μλ²νΈ) | [
"μΉκ΅¬ν‘",
"ν
μ€νΈ",
"λλ",
"μ μ‘",
":",
"param",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
":",
"param",
"PlusFriendID",
":",
"νλ¬μ€μΉκ΅¬",
"μμ΄λ",
":",
"param",
"Sender",
":",
"λ°μ λ²νΈ",
":",
"param",
"Content",
":",
"[",
"λ보",
"]",
"μΉκ΅¬ν‘",
"λ΄μ©",
":",
"param",
"AltContent",
":",
"[",
"λ보",
"]",
"λ체문μ",
"λ΄μ©",
":",
"param",
"AltSendType",
":",
"λ체문μ",
"μ ν",
"[",
"곡백",
"-",
"λ―Έμ μ‘",
"C",
"-",
"μλ¦Όν‘λ΄μ©",
"A",
"-",
"λ체문μλ΄μ©",
"]",
":",
"param",
"SndDT",
":",
"μμ½μΌμ",
"[",
"μμ±νμ",
":",
"yyyyMMddHHmmss",
"]",
":",
"param",
"KakaoMessages",
":",
"μΉκ΅¬ν‘",
"λ΄μ©",
"(",
"λ°°μ΄",
")",
":",
"param",
"KakaoButtons",
":",
"λ²νΌ",
"λͺ©λ‘",
"(",
"μ΅λ",
"5κ°",
")",
":",
"param",
"AdsYN",
":",
"κ΄κ³ ",
"μ μ‘μ¬λΆ",
":",
"param",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
":",
"param",
"RequestNum",
":",
"μμ²λ²νΈ",
":",
"return",
":",
"receiptNum",
"(",
"μ μλ²νΈ",
")"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/kakaoService.py#L219-L268 |
linkhub-sdk/popbill.py | popbill/kakaoService.py | KakaoService.sendFMS_same | def sendFMS_same(self, CorpNum, PlusFriendID, Sender, Content, AltContent, AltSendType, SndDT, FilePath, ImageURL,
KakaoMessages, KakaoButtons, AdsYN=False, UserID=None, RequestNum=None):
"""
μΉκ΅¬ν‘ μ΄λ―Έμ§ λλ μ μ‘
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param PlusFriendID: νλ¬μ€μΉκ΅¬ μμ΄λ
:param Sender: λ°μ λ²νΈ
:param Content: [λ보] μΉκ΅¬ν‘ λ΄μ©
:param AltContent: [λ보] λ체문μ λ΄μ©
:param AltSendType: λ체문μ μ ν [곡백-λ―Έμ μ‘, C-μλ¦Όν‘λ΄μ©, A-λ체문μλ΄μ©]
:param SndDT: μμ½μΌμ [μμ±νμ : yyyyMMddHHmmss]
:param FilePath: νμΌκ²½λ‘
:param ImageURL: μ΄λ―Έμ§URL
:param KakaoMessages: μΉκ΅¬ν‘ λ΄μ© (λ°°μ΄)
:param KakaoButtons: λ²νΌ λͺ©λ‘ (μ΅λ 5κ°)
:param AdsYN: κ΄κ³ μ μ‘μ¬λΆ
:param UserID: νλΉνμ μμ΄λ
:param RequestNum : μμ²λ²νΈ
:return: receiptNum (μ μλ²νΈ)
"""
if PlusFriendID is None or PlusFriendID == '':
raise PopbillException(-99999999, "νλ¬μ€μΉκ΅¬ μμ΄λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if Sender is None or Sender == '':
raise PopbillException(-99999999, "λ°μ λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
req = {}
if PlusFriendID is not None or PlusFriendID != '':
req['plusFriendID'] = PlusFriendID
if Sender is not None or Sender != '':
req['snd'] = Sender
if Content is not None or Content != '':
req['content'] = Content
if AltContent is not None or AltContent != '':
req['altContent'] = AltContent
if AltSendType is not None or AltSendType != '':
req['altSendType'] = AltSendType
if SndDT is not None or SndDT != '':
req['sndDT'] = SndDT
if KakaoMessages is not None or KakaoMessages != '':
req['msgs'] = KakaoMessages
if ImageURL is not None or ImageURL != '':
req['imageURL'] = ImageURL
if KakaoButtons:
req['btns'] = KakaoButtons
if AdsYN:
req['adsYN'] = True
if RequestNum is not None or RequestNum != '':
req['requestNum'] = RequestNum
postData = self._stringtify(req)
files = []
try:
with open(FilePath, "rb") as F:
files = [File(fieldName='file',
fileName=F.name,
fileData=F.read())]
except IOError:
raise PopbillException(-99999999, "ν΄λΉκ²½λ‘μ νμΌμ΄ μκ±°λ μ½μ μ μμ΅λλ€.")
result = self._httppost_files('/FMS', postData, files, CorpNum, UserID)
return result.receiptNum | python | def sendFMS_same(self, CorpNum, PlusFriendID, Sender, Content, AltContent, AltSendType, SndDT, FilePath, ImageURL,
KakaoMessages, KakaoButtons, AdsYN=False, UserID=None, RequestNum=None):
"""
μΉκ΅¬ν‘ μ΄λ―Έμ§ λλ μ μ‘
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param PlusFriendID: νλ¬μ€μΉκ΅¬ μμ΄λ
:param Sender: λ°μ λ²νΈ
:param Content: [λ보] μΉκ΅¬ν‘ λ΄μ©
:param AltContent: [λ보] λ체문μ λ΄μ©
:param AltSendType: λ체문μ μ ν [곡백-λ―Έμ μ‘, C-μλ¦Όν‘λ΄μ©, A-λ체문μλ΄μ©]
:param SndDT: μμ½μΌμ [μμ±νμ : yyyyMMddHHmmss]
:param FilePath: νμΌκ²½λ‘
:param ImageURL: μ΄λ―Έμ§URL
:param KakaoMessages: μΉκ΅¬ν‘ λ΄μ© (λ°°μ΄)
:param KakaoButtons: λ²νΌ λͺ©λ‘ (μ΅λ 5κ°)
:param AdsYN: κ΄κ³ μ μ‘μ¬λΆ
:param UserID: νλΉνμ μμ΄λ
:param RequestNum : μμ²λ²νΈ
:return: receiptNum (μ μλ²νΈ)
"""
if PlusFriendID is None or PlusFriendID == '':
raise PopbillException(-99999999, "νλ¬μ€μΉκ΅¬ μμ΄λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if Sender is None or Sender == '':
raise PopbillException(-99999999, "λ°μ λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
req = {}
if PlusFriendID is not None or PlusFriendID != '':
req['plusFriendID'] = PlusFriendID
if Sender is not None or Sender != '':
req['snd'] = Sender
if Content is not None or Content != '':
req['content'] = Content
if AltContent is not None or AltContent != '':
req['altContent'] = AltContent
if AltSendType is not None or AltSendType != '':
req['altSendType'] = AltSendType
if SndDT is not None or SndDT != '':
req['sndDT'] = SndDT
if KakaoMessages is not None or KakaoMessages != '':
req['msgs'] = KakaoMessages
if ImageURL is not None or ImageURL != '':
req['imageURL'] = ImageURL
if KakaoButtons:
req['btns'] = KakaoButtons
if AdsYN:
req['adsYN'] = True
if RequestNum is not None or RequestNum != '':
req['requestNum'] = RequestNum
postData = self._stringtify(req)
files = []
try:
with open(FilePath, "rb") as F:
files = [File(fieldName='file',
fileName=F.name,
fileData=F.read())]
except IOError:
raise PopbillException(-99999999, "ν΄λΉκ²½λ‘μ νμΌμ΄ μκ±°λ μ½μ μ μμ΅λλ€.")
result = self._httppost_files('/FMS', postData, files, CorpNum, UserID)
return result.receiptNum | [
"def",
"sendFMS_same",
"(",
"self",
",",
"CorpNum",
",",
"PlusFriendID",
",",
"Sender",
",",
"Content",
",",
"AltContent",
",",
"AltSendType",
",",
"SndDT",
",",
"FilePath",
",",
"ImageURL",
",",
"KakaoMessages",
",",
"KakaoButtons",
",",
"AdsYN",
"=",
"False",
",",
"UserID",
"=",
"None",
",",
"RequestNum",
"=",
"None",
")",
":",
"if",
"PlusFriendID",
"is",
"None",
"or",
"PlusFriendID",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"νλ¬μ€μΉκ΅¬ μμ΄λκ° μ
λ ₯λμ§ μμμ΅λλ€.\")",
"",
"if",
"Sender",
"is",
"None",
"or",
"Sender",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"λ°μ λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.\")",
"",
"req",
"=",
"{",
"}",
"if",
"PlusFriendID",
"is",
"not",
"None",
"or",
"PlusFriendID",
"!=",
"''",
":",
"req",
"[",
"'plusFriendID'",
"]",
"=",
"PlusFriendID",
"if",
"Sender",
"is",
"not",
"None",
"or",
"Sender",
"!=",
"''",
":",
"req",
"[",
"'snd'",
"]",
"=",
"Sender",
"if",
"Content",
"is",
"not",
"None",
"or",
"Content",
"!=",
"''",
":",
"req",
"[",
"'content'",
"]",
"=",
"Content",
"if",
"AltContent",
"is",
"not",
"None",
"or",
"AltContent",
"!=",
"''",
":",
"req",
"[",
"'altContent'",
"]",
"=",
"AltContent",
"if",
"AltSendType",
"is",
"not",
"None",
"or",
"AltSendType",
"!=",
"''",
":",
"req",
"[",
"'altSendType'",
"]",
"=",
"AltSendType",
"if",
"SndDT",
"is",
"not",
"None",
"or",
"SndDT",
"!=",
"''",
":",
"req",
"[",
"'sndDT'",
"]",
"=",
"SndDT",
"if",
"KakaoMessages",
"is",
"not",
"None",
"or",
"KakaoMessages",
"!=",
"''",
":",
"req",
"[",
"'msgs'",
"]",
"=",
"KakaoMessages",
"if",
"ImageURL",
"is",
"not",
"None",
"or",
"ImageURL",
"!=",
"''",
":",
"req",
"[",
"'imageURL'",
"]",
"=",
"ImageURL",
"if",
"KakaoButtons",
":",
"req",
"[",
"'btns'",
"]",
"=",
"KakaoButtons",
"if",
"AdsYN",
":",
"req",
"[",
"'adsYN'",
"]",
"=",
"True",
"if",
"RequestNum",
"is",
"not",
"None",
"or",
"RequestNum",
"!=",
"''",
":",
"req",
"[",
"'requestNum'",
"]",
"=",
"RequestNum",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"req",
")",
"files",
"=",
"[",
"]",
"try",
":",
"with",
"open",
"(",
"FilePath",
",",
"\"rb\"",
")",
"as",
"F",
":",
"files",
"=",
"[",
"File",
"(",
"fieldName",
"=",
"'file'",
",",
"fileName",
"=",
"F",
".",
"name",
",",
"fileData",
"=",
"F",
".",
"read",
"(",
")",
")",
"]",
"except",
"IOError",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"ν΄λΉκ²½λ‘μ νμΌμ΄ μκ±°λ μ½μ μ μμ΅λλ€.\")",
"",
"result",
"=",
"self",
".",
"_httppost_files",
"(",
"'/FMS'",
",",
"postData",
",",
"files",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"receiptNum"
] | μΉκ΅¬ν‘ μ΄λ―Έμ§ λλ μ μ‘
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param PlusFriendID: νλ¬μ€μΉκ΅¬ μμ΄λ
:param Sender: λ°μ λ²νΈ
:param Content: [λ보] μΉκ΅¬ν‘ λ΄μ©
:param AltContent: [λ보] λ체문μ λ΄μ©
:param AltSendType: λ체문μ μ ν [곡백-λ―Έμ μ‘, C-μλ¦Όν‘λ΄μ©, A-λ체문μλ΄μ©]
:param SndDT: μμ½μΌμ [μμ±νμ : yyyyMMddHHmmss]
:param FilePath: νμΌκ²½λ‘
:param ImageURL: μ΄λ―Έμ§URL
:param KakaoMessages: μΉκ΅¬ν‘ λ΄μ© (λ°°μ΄)
:param KakaoButtons: λ²νΌ λͺ©λ‘ (μ΅λ 5κ°)
:param AdsYN: κ΄κ³ μ μ‘μ¬λΆ
:param UserID: νλΉνμ μμ΄λ
:param RequestNum : μμ²λ²νΈ
:return: receiptNum (μ μλ²νΈ) | [
"μΉκ΅¬ν‘",
"μ΄λ―Έμ§",
"λλ",
"μ μ‘",
":",
"param",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
":",
"param",
"PlusFriendID",
":",
"νλ¬μ€μΉκ΅¬",
"μμ΄λ",
":",
"param",
"Sender",
":",
"λ°μ λ²νΈ",
":",
"param",
"Content",
":",
"[",
"λ보",
"]",
"μΉκ΅¬ν‘",
"λ΄μ©",
":",
"param",
"AltContent",
":",
"[",
"λ보",
"]",
"λ체문μ",
"λ΄μ©",
":",
"param",
"AltSendType",
":",
"λ체문μ",
"μ ν",
"[",
"곡백",
"-",
"λ―Έμ μ‘",
"C",
"-",
"μλ¦Όν‘λ΄μ©",
"A",
"-",
"λ체문μλ΄μ©",
"]",
":",
"param",
"SndDT",
":",
"μμ½μΌμ",
"[",
"μμ±νμ",
":",
"yyyyMMddHHmmss",
"]",
":",
"param",
"FilePath",
":",
"νμΌκ²½λ‘",
":",
"param",
"ImageURL",
":",
"μ΄λ―Έμ§URL",
":",
"param",
"KakaoMessages",
":",
"μΉκ΅¬ν‘",
"λ΄μ©",
"(",
"λ°°μ΄",
")",
":",
"param",
"KakaoButtons",
":",
"λ²νΌ",
"λͺ©λ‘",
"(",
"μ΅λ",
"5κ°",
")",
":",
"param",
"AdsYN",
":",
"κ΄κ³ ",
"μ μ‘μ¬λΆ",
":",
"param",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
":",
"param",
"RequestNum",
":",
"μμ²λ²νΈ",
":",
"return",
":",
"receiptNum",
"(",
"μ μλ²νΈ",
")"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/kakaoService.py#L289-L351 |
linkhub-sdk/popbill.py | popbill/kakaoService.py | KakaoService.cancelReserve | def cancelReserve(self, CorpNum, ReceiptNum, UserID=None):
"""
μμ½μ μ‘ μ·¨μ
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param ReceiptNum: μ μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:return: code (μμ²μ λν μν μλ΅μ½λ), message (μμ²μ λν μλ΅ λ©μμ§)
"""
if ReceiptNum == None or len(ReceiptNum) != 18:
raise PopbillException(-99999999, "μ μλ²νΈκ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
return self._httpget('/KakaoTalk/' + ReceiptNum + '/Cancel', CorpNum, UserID) | python | def cancelReserve(self, CorpNum, ReceiptNum, UserID=None):
"""
μμ½μ μ‘ μ·¨μ
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param ReceiptNum: μ μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:return: code (μμ²μ λν μν μλ΅μ½λ), message (μμ²μ λν μλ΅ λ©μμ§)
"""
if ReceiptNum == None or len(ReceiptNum) != 18:
raise PopbillException(-99999999, "μ μλ²νΈκ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
return self._httpget('/KakaoTalk/' + ReceiptNum + '/Cancel', CorpNum, UserID) | [
"def",
"cancelReserve",
"(",
"self",
",",
"CorpNum",
",",
"ReceiptNum",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"ReceiptNum",
"==",
"None",
"or",
"len",
"(",
"ReceiptNum",
")",
"!=",
"18",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"μ μλ²νΈκ° μ¬λ°λ₯΄μ§ μμ΅λλ€.\")",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/KakaoTalk/'",
"+",
"ReceiptNum",
"+",
"'/Cancel'",
",",
"CorpNum",
",",
"UserID",
")"
] | μμ½μ μ‘ μ·¨μ
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param ReceiptNum: μ μλ²νΈ
:param UserID: νλΉνμ μμ΄λ
:return: code (μμ²μ λν μν μλ΅μ½λ), message (μμ²μ λν μλ΅ λ©μμ§) | [
"μμ½μ μ‘",
"μ·¨μ",
":",
"param",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
":",
"param",
"ReceiptNum",
":",
"μ μλ²νΈ",
":",
"param",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
":",
"return",
":",
"code",
"(",
"μμ²μ",
"λν",
"μν",
"μλ΅μ½λ",
")",
"message",
"(",
"μμ²μ",
"λν",
"μλ΅",
"λ©μμ§",
")"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/kakaoService.py#L353-L364 |
linkhub-sdk/popbill.py | popbill/kakaoService.py | KakaoService.getUnitCost | def getUnitCost(self, CorpNum, MsgType, UserID=None):
"""
μ μ‘λ¨κ° νμΈ
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param MsgType: μΉ΄μΉ΄μ€ν‘ μ ν
:param UserID: νλΉ νμμμ΄λ
:return: unitCost
"""
if MsgType is None or MsgType == "":
raise PopbillException(-99999999, "μ μ‘μ νμ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
result = self._httpget("/KakaoTalk/UnitCost?Type=" + MsgType, CorpNum)
return float(result.unitCost) | python | def getUnitCost(self, CorpNum, MsgType, UserID=None):
"""
μ μ‘λ¨κ° νμΈ
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param MsgType: μΉ΄μΉ΄μ€ν‘ μ ν
:param UserID: νλΉ νμμμ΄λ
:return: unitCost
"""
if MsgType is None or MsgType == "":
raise PopbillException(-99999999, "μ μ‘μ νμ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
result = self._httpget("/KakaoTalk/UnitCost?Type=" + MsgType, CorpNum)
return float(result.unitCost) | [
"def",
"getUnitCost",
"(",
"self",
",",
"CorpNum",
",",
"MsgType",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MsgType",
"is",
"None",
"or",
"MsgType",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"μ μ‘μ νμ΄ μ
λ ₯λμ§ μμμ΅λλ€.\")",
"",
"result",
"=",
"self",
".",
"_httpget",
"(",
"\"/KakaoTalk/UnitCost?Type=\"",
"+",
"MsgType",
",",
"CorpNum",
")",
"return",
"float",
"(",
"result",
".",
"unitCost",
")"
] | μ μ‘λ¨κ° νμΈ
:param CorpNum: νλΉνμ μ¬μ
μλ²νΈ
:param MsgType: μΉ΄μΉ΄μ€ν‘ μ ν
:param UserID: νλΉ νμμμ΄λ
:return: unitCost | [
"μ μ‘λ¨κ°",
"νμΈ",
":",
"param",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
":",
"param",
"MsgType",
":",
"μΉ΄μΉ΄μ€ν‘",
"μ ν",
":",
"param",
"UserID",
":",
"νλΉ",
"νμμμ΄λ",
":",
"return",
":",
"unitCost"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/kakaoService.py#L448-L460 |
mediawiki-utilities/python-mwreverts | mwreverts/utilities/revdocs2reverts.py | revdocs2reverts | def revdocs2reverts(rev_docs, radius=defaults.RADIUS, use_sha1=False,
resort=False, verbose=False):
"""
Converts a sequence of page-partitioned revision documents into a sequence
of reverts.
:Params:
rev_docs : `iterable` ( `dict` )
a page-partitioned sequence of revision documents
radius : `int`
The maximum number of revisions that a revert can reference.
use_sha1 : `bool`
Use the sha1 field as the checksum for comparison.
resort : `bool`
If True, re-sort the revisions of each page.
verbose : `bool`
Print dots and stuff
"""
page_rev_docs = groupby(rev_docs, lambda rd: rd.get('page'))
for page_doc, rev_docs in page_rev_docs:
if verbose:
sys.stderr.write(page_doc.get('title') + ": ")
sys.stderr.flush()
if resort:
if verbose:
sys.stderr.write("(sorting) ")
sys.stderr.flush()
rev_docs = sorted(
rev_docs, key=lambda r: (r.get('timestamp'), r.get('id')))
detector = Detector(radius=radius)
for rev_doc in rev_docs:
if not use_sha1 and 'text' not in rev_doc:
logger.warn("Skipping {0}: 'text' field not found in {0}"
.format(rev_doc['id'], rev_doc))
continue
if use_sha1:
checksum = rev_doc.get('sha1') or DummyChecksum()
elif 'text' in rev_doc:
text_bytes = bytes(rev_doc['text'], 'utf8', 'replace')
checksum = hashlib.sha1(text_bytes).digest()
revert = detector.process(checksum, rev_doc)
if revert:
yield revert.to_json()
if verbose:
sys.stderr.write("r")
sys.stderr.flush()
else:
if verbose:
sys.stderr.write(".")
sys.stderr.flush()
if verbose:
sys.stderr.write("\n")
sys.stderr.flush() | python | def revdocs2reverts(rev_docs, radius=defaults.RADIUS, use_sha1=False,
resort=False, verbose=False):
"""
Converts a sequence of page-partitioned revision documents into a sequence
of reverts.
:Params:
rev_docs : `iterable` ( `dict` )
a page-partitioned sequence of revision documents
radius : `int`
The maximum number of revisions that a revert can reference.
use_sha1 : `bool`
Use the sha1 field as the checksum for comparison.
resort : `bool`
If True, re-sort the revisions of each page.
verbose : `bool`
Print dots and stuff
"""
page_rev_docs = groupby(rev_docs, lambda rd: rd.get('page'))
for page_doc, rev_docs in page_rev_docs:
if verbose:
sys.stderr.write(page_doc.get('title') + ": ")
sys.stderr.flush()
if resort:
if verbose:
sys.stderr.write("(sorting) ")
sys.stderr.flush()
rev_docs = sorted(
rev_docs, key=lambda r: (r.get('timestamp'), r.get('id')))
detector = Detector(radius=radius)
for rev_doc in rev_docs:
if not use_sha1 and 'text' not in rev_doc:
logger.warn("Skipping {0}: 'text' field not found in {0}"
.format(rev_doc['id'], rev_doc))
continue
if use_sha1:
checksum = rev_doc.get('sha1') or DummyChecksum()
elif 'text' in rev_doc:
text_bytes = bytes(rev_doc['text'], 'utf8', 'replace')
checksum = hashlib.sha1(text_bytes).digest()
revert = detector.process(checksum, rev_doc)
if revert:
yield revert.to_json()
if verbose:
sys.stderr.write("r")
sys.stderr.flush()
else:
if verbose:
sys.stderr.write(".")
sys.stderr.flush()
if verbose:
sys.stderr.write("\n")
sys.stderr.flush() | [
"def",
"revdocs2reverts",
"(",
"rev_docs",
",",
"radius",
"=",
"defaults",
".",
"RADIUS",
",",
"use_sha1",
"=",
"False",
",",
"resort",
"=",
"False",
",",
"verbose",
"=",
"False",
")",
":",
"page_rev_docs",
"=",
"groupby",
"(",
"rev_docs",
",",
"lambda",
"rd",
":",
"rd",
".",
"get",
"(",
"'page'",
")",
")",
"for",
"page_doc",
",",
"rev_docs",
"in",
"page_rev_docs",
":",
"if",
"verbose",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"page_doc",
".",
"get",
"(",
"'title'",
")",
"+",
"\": \"",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"if",
"resort",
":",
"if",
"verbose",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"(sorting) \"",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"rev_docs",
"=",
"sorted",
"(",
"rev_docs",
",",
"key",
"=",
"lambda",
"r",
":",
"(",
"r",
".",
"get",
"(",
"'timestamp'",
")",
",",
"r",
".",
"get",
"(",
"'id'",
")",
")",
")",
"detector",
"=",
"Detector",
"(",
"radius",
"=",
"radius",
")",
"for",
"rev_doc",
"in",
"rev_docs",
":",
"if",
"not",
"use_sha1",
"and",
"'text'",
"not",
"in",
"rev_doc",
":",
"logger",
".",
"warn",
"(",
"\"Skipping {0}: 'text' field not found in {0}\"",
".",
"format",
"(",
"rev_doc",
"[",
"'id'",
"]",
",",
"rev_doc",
")",
")",
"continue",
"if",
"use_sha1",
":",
"checksum",
"=",
"rev_doc",
".",
"get",
"(",
"'sha1'",
")",
"or",
"DummyChecksum",
"(",
")",
"elif",
"'text'",
"in",
"rev_doc",
":",
"text_bytes",
"=",
"bytes",
"(",
"rev_doc",
"[",
"'text'",
"]",
",",
"'utf8'",
",",
"'replace'",
")",
"checksum",
"=",
"hashlib",
".",
"sha1",
"(",
"text_bytes",
")",
".",
"digest",
"(",
")",
"revert",
"=",
"detector",
".",
"process",
"(",
"checksum",
",",
"rev_doc",
")",
"if",
"revert",
":",
"yield",
"revert",
".",
"to_json",
"(",
")",
"if",
"verbose",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"r\"",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"else",
":",
"if",
"verbose",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\".\"",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"if",
"verbose",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\\n\"",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")"
] | Converts a sequence of page-partitioned revision documents into a sequence
of reverts.
:Params:
rev_docs : `iterable` ( `dict` )
a page-partitioned sequence of revision documents
radius : `int`
The maximum number of revisions that a revert can reference.
use_sha1 : `bool`
Use the sha1 field as the checksum for comparison.
resort : `bool`
If True, re-sort the revisions of each page.
verbose : `bool`
Print dots and stuff | [
"Converts",
"a",
"sequence",
"of",
"page",
"-",
"partitioned",
"revision",
"documents",
"into",
"a",
"sequence",
"of",
"reverts",
"."
] | train | https://github.com/mediawiki-utilities/python-mwreverts/blob/d379ac941e14e235ad82a48bd445a3dfa6cc022e/mwreverts/utilities/revdocs2reverts.py#L54-L114 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_hrfutils.py | spm_hrf_compat | def spm_hrf_compat(t,
peak_delay=6,
under_delay=16,
peak_disp=1,
under_disp=1,
p_u_ratio=6,
normalize=True,
):
""" SPM HRF function from sum of two gamma PDFs
This function is designed to be partially compatible with SPMs `spm_hrf.m`
function.
The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and
dispersion `peak_disp`), minus an *undershoot* gamma PDF (with location
`under_delay` and dispersion `under_disp`, and divided by the `p_u_ratio`).
Parameters
----------
t : array-like
vector of times at which to sample HRF
peak_delay : float, optional
delay of peak
peak_disp : float, optional
width (dispersion) of peak
under_delay : float, optional
delay of undershoot
under_disp : float, optional
width (dispersion) of undershoot
p_u_ratio : float, optional
peak to undershoot ratio. Undershoot divided by this value before
subtracting from peak.
normalize : {True, False}, optional
If True, divide HRF values by their sum before returning. SPM does this
by default.
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
See ``spm_hrf.m`` in the SPM distribution.
"""
if len([v for v in [peak_delay, peak_disp, under_delay, under_disp]
if v <= 0]):
raise ValueError("delays and dispersions must be > 0")
# gamma.pdf only defined for t > 0
hrf = np.zeros(t.shape, dtype=np.float)
pos_t = t[t > 0]
peak = sps.gamma.pdf(pos_t,
peak_delay / peak_disp,
loc=0,
scale=peak_disp)
undershoot = sps.gamma.pdf(pos_t,
under_delay / under_disp,
loc=0,
scale=under_disp)
hrf[t > 0] = peak - undershoot / p_u_ratio
if not normalize:
return hrf
return hrf / np.max(hrf) | python | def spm_hrf_compat(t,
peak_delay=6,
under_delay=16,
peak_disp=1,
under_disp=1,
p_u_ratio=6,
normalize=True,
):
""" SPM HRF function from sum of two gamma PDFs
This function is designed to be partially compatible with SPMs `spm_hrf.m`
function.
The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and
dispersion `peak_disp`), minus an *undershoot* gamma PDF (with location
`under_delay` and dispersion `under_disp`, and divided by the `p_u_ratio`).
Parameters
----------
t : array-like
vector of times at which to sample HRF
peak_delay : float, optional
delay of peak
peak_disp : float, optional
width (dispersion) of peak
under_delay : float, optional
delay of undershoot
under_disp : float, optional
width (dispersion) of undershoot
p_u_ratio : float, optional
peak to undershoot ratio. Undershoot divided by this value before
subtracting from peak.
normalize : {True, False}, optional
If True, divide HRF values by their sum before returning. SPM does this
by default.
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
See ``spm_hrf.m`` in the SPM distribution.
"""
if len([v for v in [peak_delay, peak_disp, under_delay, under_disp]
if v <= 0]):
raise ValueError("delays and dispersions must be > 0")
# gamma.pdf only defined for t > 0
hrf = np.zeros(t.shape, dtype=np.float)
pos_t = t[t > 0]
peak = sps.gamma.pdf(pos_t,
peak_delay / peak_disp,
loc=0,
scale=peak_disp)
undershoot = sps.gamma.pdf(pos_t,
under_delay / under_disp,
loc=0,
scale=under_disp)
hrf[t > 0] = peak - undershoot / p_u_ratio
if not normalize:
return hrf
return hrf / np.max(hrf) | [
"def",
"spm_hrf_compat",
"(",
"t",
",",
"peak_delay",
"=",
"6",
",",
"under_delay",
"=",
"16",
",",
"peak_disp",
"=",
"1",
",",
"under_disp",
"=",
"1",
",",
"p_u_ratio",
"=",
"6",
",",
"normalize",
"=",
"True",
",",
")",
":",
"if",
"len",
"(",
"[",
"v",
"for",
"v",
"in",
"[",
"peak_delay",
",",
"peak_disp",
",",
"under_delay",
",",
"under_disp",
"]",
"if",
"v",
"<=",
"0",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"delays and dispersions must be > 0\"",
")",
"# gamma.pdf only defined for t > 0",
"hrf",
"=",
"np",
".",
"zeros",
"(",
"t",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"pos_t",
"=",
"t",
"[",
"t",
">",
"0",
"]",
"peak",
"=",
"sps",
".",
"gamma",
".",
"pdf",
"(",
"pos_t",
",",
"peak_delay",
"/",
"peak_disp",
",",
"loc",
"=",
"0",
",",
"scale",
"=",
"peak_disp",
")",
"undershoot",
"=",
"sps",
".",
"gamma",
".",
"pdf",
"(",
"pos_t",
",",
"under_delay",
"/",
"under_disp",
",",
"loc",
"=",
"0",
",",
"scale",
"=",
"under_disp",
")",
"hrf",
"[",
"t",
">",
"0",
"]",
"=",
"peak",
"-",
"undershoot",
"/",
"p_u_ratio",
"if",
"not",
"normalize",
":",
"return",
"hrf",
"return",
"hrf",
"/",
"np",
".",
"max",
"(",
"hrf",
")"
] | SPM HRF function from sum of two gamma PDFs
This function is designed to be partially compatible with SPMs `spm_hrf.m`
function.
The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and
dispersion `peak_disp`), minus an *undershoot* gamma PDF (with location
`under_delay` and dispersion `under_disp`, and divided by the `p_u_ratio`).
Parameters
----------
t : array-like
vector of times at which to sample HRF
peak_delay : float, optional
delay of peak
peak_disp : float, optional
width (dispersion) of peak
under_delay : float, optional
delay of undershoot
under_disp : float, optional
width (dispersion) of undershoot
p_u_ratio : float, optional
peak to undershoot ratio. Undershoot divided by this value before
subtracting from peak.
normalize : {True, False}, optional
If True, divide HRF values by their sum before returning. SPM does this
by default.
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
See ``spm_hrf.m`` in the SPM distribution. | [
"SPM",
"HRF",
"function",
"from",
"sum",
"of",
"two",
"gamma",
"PDFs"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_hrfutils.py#L32-L94 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_hrfutils.py | dspmt | def dspmt(t):
""" SPM canonical HRF derivative, HRF derivative values for time values `t`
This is the canonical HRF derivative function as used in SPM.
It is the numerical difference of the HRF sampled at time `t` minus the
values sampled at time `t` -1
"""
t = np.asarray(t)
return spmt(t) - spmt(t - 1) | python | def dspmt(t):
""" SPM canonical HRF derivative, HRF derivative values for time values `t`
This is the canonical HRF derivative function as used in SPM.
It is the numerical difference of the HRF sampled at time `t` minus the
values sampled at time `t` -1
"""
t = np.asarray(t)
return spmt(t) - spmt(t - 1) | [
"def",
"dspmt",
"(",
"t",
")",
":",
"t",
"=",
"np",
".",
"asarray",
"(",
"t",
")",
"return",
"spmt",
"(",
"t",
")",
"-",
"spmt",
"(",
"t",
"-",
"1",
")"
] | SPM canonical HRF derivative, HRF derivative values for time values `t`
This is the canonical HRF derivative function as used in SPM.
It is the numerical difference of the HRF sampled at time `t` minus the
values sampled at time `t` -1 | [
"SPM",
"canonical",
"HRF",
"derivative",
"HRF",
"derivative",
"values",
"for",
"time",
"values",
"t"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_hrfutils.py#L115-L124 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_hrfutils.py | cnvlTc | def cnvlTc(idxPrc,
aryPrfTcChunk,
lstHrf,
varTr,
varNumVol,
queOut,
varOvsmpl=10,
varHrfLen=32,
):
"""
Convolution of time courses with HRF model.
"""
# *** prepare hrf time courses for convolution
print("---------Process " + str(idxPrc) +
": Prepare hrf time courses for convolution")
# get frame times, i.e. start point of every volume in seconds
vecFrms = np.arange(0, varTr * varNumVol, varTr)
# get supersampled frames times, i.e. start point of every volume in
# seconds, since convolution takes place in temp. upsampled space
vecFrmTms = np.arange(0, varTr * varNumVol, varTr / varOvsmpl)
# get resolution of supersampled frame times
varRes = varTr / float(varOvsmpl)
# prepare empty list that will contain the arrays with hrf time courses
lstBse = []
for hrfFn in lstHrf:
# needs to be a multiple of oversample
vecTmpBse = hrfFn(np.linspace(0, varHrfLen,
(varHrfLen // varTr) * varOvsmpl))
lstBse.append(vecTmpBse)
# *** prepare pixel time courses for convolution
print("---------Process " + str(idxPrc) +
": Prepare pixel time courses for convolution")
# adjust the input, if necessary, such that input is 2D, with last dim time
tplInpShp = aryPrfTcChunk.shape
aryPrfTcChunk = aryPrfTcChunk.reshape((-1, aryPrfTcChunk.shape[-1]))
# Prepare an empty array for ouput
aryConv = np.zeros((aryPrfTcChunk.shape[0], len(lstHrf),
aryPrfTcChunk.shape[1]))
print("---------Process " + str(idxPrc) +
": Convolve")
# Each time course is convolved with the HRF separately, because the
# numpy convolution function can only be used on one-dimensional data.
# Thus, we have to loop through time courses:
for idxTc in range(0, aryConv.shape[0]):
# Extract the current time course:
vecTc = aryPrfTcChunk[idxTc, :]
# upsample the pixel time course, so that it matches the hrf time crs
vecTcUps = np.zeros(int(varNumVol * varTr/varRes))
vecOns = vecFrms[vecTc.astype(bool)]
vecInd = np.round(vecOns / varRes).astype(np.int)
vecTcUps[vecInd] = 1.
# *** convolve
for indBase, base in enumerate(lstBse):
# perform the convolution
col = np.convolve(base, vecTcUps, mode='full')[:vecTcUps.size]
# get function for downsampling
f = interp1d(vecFrmTms, col)
# downsample to original space and assign to ary
aryConv[idxTc, indBase, :] = f(vecFrms)
# determine output shape
tplOutShp = tplInpShp[:-1] + (len(lstHrf), ) + (tplInpShp[-1], )
# Create list containing the convolved timecourses, and the process ID:
lstOut = [idxPrc,
aryConv.reshape(tplOutShp)]
# Put output to queue:
queOut.put(lstOut) | python | def cnvlTc(idxPrc,
aryPrfTcChunk,
lstHrf,
varTr,
varNumVol,
queOut,
varOvsmpl=10,
varHrfLen=32,
):
"""
Convolution of time courses with HRF model.
"""
# *** prepare hrf time courses for convolution
print("---------Process " + str(idxPrc) +
": Prepare hrf time courses for convolution")
# get frame times, i.e. start point of every volume in seconds
vecFrms = np.arange(0, varTr * varNumVol, varTr)
# get supersampled frames times, i.e. start point of every volume in
# seconds, since convolution takes place in temp. upsampled space
vecFrmTms = np.arange(0, varTr * varNumVol, varTr / varOvsmpl)
# get resolution of supersampled frame times
varRes = varTr / float(varOvsmpl)
# prepare empty list that will contain the arrays with hrf time courses
lstBse = []
for hrfFn in lstHrf:
# needs to be a multiple of oversample
vecTmpBse = hrfFn(np.linspace(0, varHrfLen,
(varHrfLen // varTr) * varOvsmpl))
lstBse.append(vecTmpBse)
# *** prepare pixel time courses for convolution
print("---------Process " + str(idxPrc) +
": Prepare pixel time courses for convolution")
# adjust the input, if necessary, such that input is 2D, with last dim time
tplInpShp = aryPrfTcChunk.shape
aryPrfTcChunk = aryPrfTcChunk.reshape((-1, aryPrfTcChunk.shape[-1]))
# Prepare an empty array for ouput
aryConv = np.zeros((aryPrfTcChunk.shape[0], len(lstHrf),
aryPrfTcChunk.shape[1]))
print("---------Process " + str(idxPrc) +
": Convolve")
# Each time course is convolved with the HRF separately, because the
# numpy convolution function can only be used on one-dimensional data.
# Thus, we have to loop through time courses:
for idxTc in range(0, aryConv.shape[0]):
# Extract the current time course:
vecTc = aryPrfTcChunk[idxTc, :]
# upsample the pixel time course, so that it matches the hrf time crs
vecTcUps = np.zeros(int(varNumVol * varTr/varRes))
vecOns = vecFrms[vecTc.astype(bool)]
vecInd = np.round(vecOns / varRes).astype(np.int)
vecTcUps[vecInd] = 1.
# *** convolve
for indBase, base in enumerate(lstBse):
# perform the convolution
col = np.convolve(base, vecTcUps, mode='full')[:vecTcUps.size]
# get function for downsampling
f = interp1d(vecFrmTms, col)
# downsample to original space and assign to ary
aryConv[idxTc, indBase, :] = f(vecFrms)
# determine output shape
tplOutShp = tplInpShp[:-1] + (len(lstHrf), ) + (tplInpShp[-1], )
# Create list containing the convolved timecourses, and the process ID:
lstOut = [idxPrc,
aryConv.reshape(tplOutShp)]
# Put output to queue:
queOut.put(lstOut) | [
"def",
"cnvlTc",
"(",
"idxPrc",
",",
"aryPrfTcChunk",
",",
"lstHrf",
",",
"varTr",
",",
"varNumVol",
",",
"queOut",
",",
"varOvsmpl",
"=",
"10",
",",
"varHrfLen",
"=",
"32",
",",
")",
":",
"# *** prepare hrf time courses for convolution",
"print",
"(",
"\"---------Process \"",
"+",
"str",
"(",
"idxPrc",
")",
"+",
"\": Prepare hrf time courses for convolution\"",
")",
"# get frame times, i.e. start point of every volume in seconds",
"vecFrms",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"varTr",
"*",
"varNumVol",
",",
"varTr",
")",
"# get supersampled frames times, i.e. start point of every volume in",
"# seconds, since convolution takes place in temp. upsampled space",
"vecFrmTms",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"varTr",
"*",
"varNumVol",
",",
"varTr",
"/",
"varOvsmpl",
")",
"# get resolution of supersampled frame times",
"varRes",
"=",
"varTr",
"/",
"float",
"(",
"varOvsmpl",
")",
"# prepare empty list that will contain the arrays with hrf time courses",
"lstBse",
"=",
"[",
"]",
"for",
"hrfFn",
"in",
"lstHrf",
":",
"# needs to be a multiple of oversample",
"vecTmpBse",
"=",
"hrfFn",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"varHrfLen",
",",
"(",
"varHrfLen",
"//",
"varTr",
")",
"*",
"varOvsmpl",
")",
")",
"lstBse",
".",
"append",
"(",
"vecTmpBse",
")",
"# *** prepare pixel time courses for convolution",
"print",
"(",
"\"---------Process \"",
"+",
"str",
"(",
"idxPrc",
")",
"+",
"\": Prepare pixel time courses for convolution\"",
")",
"# adjust the input, if necessary, such that input is 2D, with last dim time",
"tplInpShp",
"=",
"aryPrfTcChunk",
".",
"shape",
"aryPrfTcChunk",
"=",
"aryPrfTcChunk",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"aryPrfTcChunk",
".",
"shape",
"[",
"-",
"1",
"]",
")",
")",
"# Prepare an empty array for ouput",
"aryConv",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryPrfTcChunk",
".",
"shape",
"[",
"0",
"]",
",",
"len",
"(",
"lstHrf",
")",
",",
"aryPrfTcChunk",
".",
"shape",
"[",
"1",
"]",
")",
")",
"print",
"(",
"\"---------Process \"",
"+",
"str",
"(",
"idxPrc",
")",
"+",
"\": Convolve\"",
")",
"# Each time course is convolved with the HRF separately, because the",
"# numpy convolution function can only be used on one-dimensional data.",
"# Thus, we have to loop through time courses:",
"for",
"idxTc",
"in",
"range",
"(",
"0",
",",
"aryConv",
".",
"shape",
"[",
"0",
"]",
")",
":",
"# Extract the current time course:",
"vecTc",
"=",
"aryPrfTcChunk",
"[",
"idxTc",
",",
":",
"]",
"# upsample the pixel time course, so that it matches the hrf time crs",
"vecTcUps",
"=",
"np",
".",
"zeros",
"(",
"int",
"(",
"varNumVol",
"*",
"varTr",
"/",
"varRes",
")",
")",
"vecOns",
"=",
"vecFrms",
"[",
"vecTc",
".",
"astype",
"(",
"bool",
")",
"]",
"vecInd",
"=",
"np",
".",
"round",
"(",
"vecOns",
"/",
"varRes",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
"vecTcUps",
"[",
"vecInd",
"]",
"=",
"1.",
"# *** convolve",
"for",
"indBase",
",",
"base",
"in",
"enumerate",
"(",
"lstBse",
")",
":",
"# perform the convolution",
"col",
"=",
"np",
".",
"convolve",
"(",
"base",
",",
"vecTcUps",
",",
"mode",
"=",
"'full'",
")",
"[",
":",
"vecTcUps",
".",
"size",
"]",
"# get function for downsampling",
"f",
"=",
"interp1d",
"(",
"vecFrmTms",
",",
"col",
")",
"# downsample to original space and assign to ary",
"aryConv",
"[",
"idxTc",
",",
"indBase",
",",
":",
"]",
"=",
"f",
"(",
"vecFrms",
")",
"# determine output shape",
"tplOutShp",
"=",
"tplInpShp",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"len",
"(",
"lstHrf",
")",
",",
")",
"+",
"(",
"tplInpShp",
"[",
"-",
"1",
"]",
",",
")",
"# Create list containing the convolved timecourses, and the process ID:",
"lstOut",
"=",
"[",
"idxPrc",
",",
"aryConv",
".",
"reshape",
"(",
"tplOutShp",
")",
"]",
"# Put output to queue:",
"queOut",
".",
"put",
"(",
"lstOut",
")"
] | Convolution of time courses with HRF model. | [
"Convolution",
"of",
"time",
"courses",
"with",
"HRF",
"model",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_hrfutils.py#L143-L219 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_hrfutils.py | funcHrf | def funcHrf(varNumVol, varTr):
"""Create double gamma function.
Source:
http://www.jarrodmillman.com/rcsds/lectures/convolution_background.html
"""
vecX = np.arange(0, varNumVol, 1)
# Expected time of peak of HRF [s]:
varHrfPeak = 6.0 / varTr
# Expected time of undershoot of HRF [s]:
varHrfUndr = 12.0 / varTr
# Scaling factor undershoot (relative to peak):
varSclUndr = 0.35
# Gamma pdf for the peak
vecHrfPeak = gamma.pdf(vecX, varHrfPeak)
# Gamma pdf for the undershoot
vecHrfUndr = gamma.pdf(vecX, varHrfUndr)
# Combine them
vecHrf = vecHrfPeak - varSclUndr * vecHrfUndr
# Scale maximum of HRF to 1.0:
vecHrf = np.divide(vecHrf, np.max(vecHrf))
return vecHrf | python | def funcHrf(varNumVol, varTr):
"""Create double gamma function.
Source:
http://www.jarrodmillman.com/rcsds/lectures/convolution_background.html
"""
vecX = np.arange(0, varNumVol, 1)
# Expected time of peak of HRF [s]:
varHrfPeak = 6.0 / varTr
# Expected time of undershoot of HRF [s]:
varHrfUndr = 12.0 / varTr
# Scaling factor undershoot (relative to peak):
varSclUndr = 0.35
# Gamma pdf for the peak
vecHrfPeak = gamma.pdf(vecX, varHrfPeak)
# Gamma pdf for the undershoot
vecHrfUndr = gamma.pdf(vecX, varHrfUndr)
# Combine them
vecHrf = vecHrfPeak - varSclUndr * vecHrfUndr
# Scale maximum of HRF to 1.0:
vecHrf = np.divide(vecHrf, np.max(vecHrf))
return vecHrf | [
"def",
"funcHrf",
"(",
"varNumVol",
",",
"varTr",
")",
":",
"vecX",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"varNumVol",
",",
"1",
")",
"# Expected time of peak of HRF [s]:",
"varHrfPeak",
"=",
"6.0",
"/",
"varTr",
"# Expected time of undershoot of HRF [s]:",
"varHrfUndr",
"=",
"12.0",
"/",
"varTr",
"# Scaling factor undershoot (relative to peak):",
"varSclUndr",
"=",
"0.35",
"# Gamma pdf for the peak",
"vecHrfPeak",
"=",
"gamma",
".",
"pdf",
"(",
"vecX",
",",
"varHrfPeak",
")",
"# Gamma pdf for the undershoot",
"vecHrfUndr",
"=",
"gamma",
".",
"pdf",
"(",
"vecX",
",",
"varHrfUndr",
")",
"# Combine them",
"vecHrf",
"=",
"vecHrfPeak",
"-",
"varSclUndr",
"*",
"vecHrfUndr",
"# Scale maximum of HRF to 1.0:",
"vecHrf",
"=",
"np",
".",
"divide",
"(",
"vecHrf",
",",
"np",
".",
"max",
"(",
"vecHrf",
")",
")",
"return",
"vecHrf"
] | Create double gamma function.
Source:
http://www.jarrodmillman.com/rcsds/lectures/convolution_background.html | [
"Create",
"double",
"gamma",
"function",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_hrfutils.py#L226-L251 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_hrfutils.py | cnvlTcOld | def cnvlTcOld(idxPrc,
aryPrfTcChunk,
varTr,
varNumVol,
queOut):
"""
Old version:
Convolution of time courses with one canonical HRF model.
"""
# Create 'canonical' HRF time course model:
vecHrf = funcHrf(varNumVol, varTr)
# adjust the input, if necessary, such that input is 2D, with last dim time
tplInpShp = aryPrfTcChunk.shape
aryPrfTcChunk = aryPrfTcChunk.reshape((-1, aryPrfTcChunk.shape[-1]))
# Prepare an empty array for ouput
aryConv = np.zeros(np.shape(aryPrfTcChunk))
# Each time course is convolved with the HRF separately, because the
# numpy convolution function can only be used on one-dimensional data.
# Thus, we have to loop through time courses:
for idxTc, vecTc in enumerate(aryPrfTcChunk):
# In order to avoid an artefact at the end of the time series, we have
# to concatenate an empty array to both the design matrix and the HRF
# model before convolution.
vecTc = np.append(vecTc, np.zeros(100))
vecHrf = np.append(vecHrf, np.zeros(100))
# Convolve design matrix with HRF model:
aryConv[idxTc, :] = np.convolve(vecTc, vecHrf,
mode='full')[:varNumVol]
# determine output shape
tplOutShp = tplInpShp[:-1] + (1, ) + (tplInpShp[-1], )
# Create list containing the convolved timecourses, and the process ID:
lstOut = [idxPrc,
aryConv.reshape(tplOutShp)]
# Put output to queue:
queOut.put(lstOut) | python | def cnvlTcOld(idxPrc,
aryPrfTcChunk,
varTr,
varNumVol,
queOut):
"""
Old version:
Convolution of time courses with one canonical HRF model.
"""
# Create 'canonical' HRF time course model:
vecHrf = funcHrf(varNumVol, varTr)
# adjust the input, if necessary, such that input is 2D, with last dim time
tplInpShp = aryPrfTcChunk.shape
aryPrfTcChunk = aryPrfTcChunk.reshape((-1, aryPrfTcChunk.shape[-1]))
# Prepare an empty array for ouput
aryConv = np.zeros(np.shape(aryPrfTcChunk))
# Each time course is convolved with the HRF separately, because the
# numpy convolution function can only be used on one-dimensional data.
# Thus, we have to loop through time courses:
for idxTc, vecTc in enumerate(aryPrfTcChunk):
# In order to avoid an artefact at the end of the time series, we have
# to concatenate an empty array to both the design matrix and the HRF
# model before convolution.
vecTc = np.append(vecTc, np.zeros(100))
vecHrf = np.append(vecHrf, np.zeros(100))
# Convolve design matrix with HRF model:
aryConv[idxTc, :] = np.convolve(vecTc, vecHrf,
mode='full')[:varNumVol]
# determine output shape
tplOutShp = tplInpShp[:-1] + (1, ) + (tplInpShp[-1], )
# Create list containing the convolved timecourses, and the process ID:
lstOut = [idxPrc,
aryConv.reshape(tplOutShp)]
# Put output to queue:
queOut.put(lstOut) | [
"def",
"cnvlTcOld",
"(",
"idxPrc",
",",
"aryPrfTcChunk",
",",
"varTr",
",",
"varNumVol",
",",
"queOut",
")",
":",
"# Create 'canonical' HRF time course model:",
"vecHrf",
"=",
"funcHrf",
"(",
"varNumVol",
",",
"varTr",
")",
"# adjust the input, if necessary, such that input is 2D, with last dim time",
"tplInpShp",
"=",
"aryPrfTcChunk",
".",
"shape",
"aryPrfTcChunk",
"=",
"aryPrfTcChunk",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"aryPrfTcChunk",
".",
"shape",
"[",
"-",
"1",
"]",
")",
")",
"# Prepare an empty array for ouput",
"aryConv",
"=",
"np",
".",
"zeros",
"(",
"np",
".",
"shape",
"(",
"aryPrfTcChunk",
")",
")",
"# Each time course is convolved with the HRF separately, because the",
"# numpy convolution function can only be used on one-dimensional data.",
"# Thus, we have to loop through time courses:",
"for",
"idxTc",
",",
"vecTc",
"in",
"enumerate",
"(",
"aryPrfTcChunk",
")",
":",
"# In order to avoid an artefact at the end of the time series, we have",
"# to concatenate an empty array to both the design matrix and the HRF",
"# model before convolution.",
"vecTc",
"=",
"np",
".",
"append",
"(",
"vecTc",
",",
"np",
".",
"zeros",
"(",
"100",
")",
")",
"vecHrf",
"=",
"np",
".",
"append",
"(",
"vecHrf",
",",
"np",
".",
"zeros",
"(",
"100",
")",
")",
"# Convolve design matrix with HRF model:",
"aryConv",
"[",
"idxTc",
",",
":",
"]",
"=",
"np",
".",
"convolve",
"(",
"vecTc",
",",
"vecHrf",
",",
"mode",
"=",
"'full'",
")",
"[",
":",
"varNumVol",
"]",
"# determine output shape",
"tplOutShp",
"=",
"tplInpShp",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"1",
",",
")",
"+",
"(",
"tplInpShp",
"[",
"-",
"1",
"]",
",",
")",
"# Create list containing the convolved timecourses, and the process ID:",
"lstOut",
"=",
"[",
"idxPrc",
",",
"aryConv",
".",
"reshape",
"(",
"tplOutShp",
")",
"]",
"# Put output to queue:",
"queOut",
".",
"put",
"(",
"lstOut",
")"
] | Old version:
Convolution of time courses with one canonical HRF model. | [
"Old",
"version",
":",
"Convolution",
"of",
"time",
"courses",
"with",
"one",
"canonical",
"HRF",
"model",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_hrfutils.py#L254-L296 |
pmacosta/pcsv | pcsv/dsort.py | dsort | def dsort(fname, order, has_header=True, frow=0, ofname=None):
r"""
Sort file data.
:param fname: Name of the comma-separated values file to sort
:type fname: FileNameExists_
:param order: Sort order
:type order: :ref:`CsvColFilter`
:param has_header: Flag that indicates whether the comma-separated
values file to sort has column headers in its first line
(True) or not (False)
:type has_header: boolean
:param frow: First data row (starting from 1). If 0 the row where data
starts is auto-detected as the first row that has a number
(integer of float) in at least one of its columns
:type frow: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the sorted data. If None the sorting is
done "in place"
:type ofname: FileName_ or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.dsort.dsort
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`frow\` is not valid)
* RuntimeError (Argument \`has_header\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
ofname = fname if ofname is None else ofname
obj = CsvFile(fname=fname, has_header=has_header, frow=frow)
obj.dsort(order)
obj.write(fname=ofname, header=has_header, append=False) | python | def dsort(fname, order, has_header=True, frow=0, ofname=None):
r"""
Sort file data.
:param fname: Name of the comma-separated values file to sort
:type fname: FileNameExists_
:param order: Sort order
:type order: :ref:`CsvColFilter`
:param has_header: Flag that indicates whether the comma-separated
values file to sort has column headers in its first line
(True) or not (False)
:type has_header: boolean
:param frow: First data row (starting from 1). If 0 the row where data
starts is auto-detected as the first row that has a number
(integer of float) in at least one of its columns
:type frow: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the sorted data. If None the sorting is
done "in place"
:type ofname: FileName_ or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.dsort.dsort
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`frow\` is not valid)
* RuntimeError (Argument \`has_header\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
ofname = fname if ofname is None else ofname
obj = CsvFile(fname=fname, has_header=has_header, frow=frow)
obj.dsort(order)
obj.write(fname=ofname, header=has_header, append=False) | [
"def",
"dsort",
"(",
"fname",
",",
"order",
",",
"has_header",
"=",
"True",
",",
"frow",
"=",
"0",
",",
"ofname",
"=",
"None",
")",
":",
"ofname",
"=",
"fname",
"if",
"ofname",
"is",
"None",
"else",
"ofname",
"obj",
"=",
"CsvFile",
"(",
"fname",
"=",
"fname",
",",
"has_header",
"=",
"has_header",
",",
"frow",
"=",
"frow",
")",
"obj",
".",
"dsort",
"(",
"order",
")",
"obj",
".",
"write",
"(",
"fname",
"=",
"ofname",
",",
"header",
"=",
"has_header",
",",
"append",
"=",
"False",
")"
] | r"""
Sort file data.
:param fname: Name of the comma-separated values file to sort
:type fname: FileNameExists_
:param order: Sort order
:type order: :ref:`CsvColFilter`
:param has_header: Flag that indicates whether the comma-separated
values file to sort has column headers in its first line
(True) or not (False)
:type has_header: boolean
:param frow: First data row (starting from 1). If 0 the row where data
starts is auto-detected as the first row that has a number
(integer of float) in at least one of its columns
:type frow: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the sorted data. If None the sorting is
done "in place"
:type ofname: FileName_ or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.dsort.dsort
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`frow\` is not valid)
* RuntimeError (Argument \`has_header\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]] | [
"r",
"Sort",
"file",
"data",
"."
] | train | https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/dsort.py#L37-L93 |
bachya/pyflunearyou | example.py | main | async def main() -> None:
"""Create the aiohttp session and run the example."""
logging.basicConfig(level=logging.INFO)
async with ClientSession() as websession:
try:
# Create a client:
client = Client(websession)
# Get user data for the client's latitude/longitude:
user_coord_resp = await client.user_reports.status_by_coordinates(
LATITUDE, LONGITUDE)
_LOGGER.info(
'User data by latitude/longitude (%s, %s): %s', LATITUDE,
LONGITUDE, user_coord_resp)
# Get user data for the a specific ZIP code:
user_zip_resp = await client.user_reports.status_by_zip(ZIP_CODE)
_LOGGER.info(
'User data by ZIP code (%s): %s', ZIP_CODE, user_zip_resp)
# Get CDC data for the client's latitude/longitude:
cdc_coord_resp = await client.cdc_reports.status_by_coordinates(
LATITUDE, LONGITUDE)
_LOGGER.info(
'CDC data by latitude/longitude (%s, %s): %s', LATITUDE,
LONGITUDE, cdc_coord_resp)
# Get CDC data for North Dakota
cdc_state_resp = await client.cdc_reports.status_by_state(STATE)
_LOGGER.info(
'CDC data by state name (%s): %s', STATE, cdc_state_resp)
except FluNearYouError as err:
print(err) | python | async def main() -> None:
"""Create the aiohttp session and run the example."""
logging.basicConfig(level=logging.INFO)
async with ClientSession() as websession:
try:
# Create a client:
client = Client(websession)
# Get user data for the client's latitude/longitude:
user_coord_resp = await client.user_reports.status_by_coordinates(
LATITUDE, LONGITUDE)
_LOGGER.info(
'User data by latitude/longitude (%s, %s): %s', LATITUDE,
LONGITUDE, user_coord_resp)
# Get user data for the a specific ZIP code:
user_zip_resp = await client.user_reports.status_by_zip(ZIP_CODE)
_LOGGER.info(
'User data by ZIP code (%s): %s', ZIP_CODE, user_zip_resp)
# Get CDC data for the client's latitude/longitude:
cdc_coord_resp = await client.cdc_reports.status_by_coordinates(
LATITUDE, LONGITUDE)
_LOGGER.info(
'CDC data by latitude/longitude (%s, %s): %s', LATITUDE,
LONGITUDE, cdc_coord_resp)
# Get CDC data for North Dakota
cdc_state_resp = await client.cdc_reports.status_by_state(STATE)
_LOGGER.info(
'CDC data by state name (%s): %s', STATE, cdc_state_resp)
except FluNearYouError as err:
print(err) | [
"async",
"def",
"main",
"(",
")",
"->",
"None",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
")",
"async",
"with",
"ClientSession",
"(",
")",
"as",
"websession",
":",
"try",
":",
"# Create a client:",
"client",
"=",
"Client",
"(",
"websession",
")",
"# Get user data for the client's latitude/longitude:",
"user_coord_resp",
"=",
"await",
"client",
".",
"user_reports",
".",
"status_by_coordinates",
"(",
"LATITUDE",
",",
"LONGITUDE",
")",
"_LOGGER",
".",
"info",
"(",
"'User data by latitude/longitude (%s, %s): %s'",
",",
"LATITUDE",
",",
"LONGITUDE",
",",
"user_coord_resp",
")",
"# Get user data for the a specific ZIP code:",
"user_zip_resp",
"=",
"await",
"client",
".",
"user_reports",
".",
"status_by_zip",
"(",
"ZIP_CODE",
")",
"_LOGGER",
".",
"info",
"(",
"'User data by ZIP code (%s): %s'",
",",
"ZIP_CODE",
",",
"user_zip_resp",
")",
"# Get CDC data for the client's latitude/longitude:",
"cdc_coord_resp",
"=",
"await",
"client",
".",
"cdc_reports",
".",
"status_by_coordinates",
"(",
"LATITUDE",
",",
"LONGITUDE",
")",
"_LOGGER",
".",
"info",
"(",
"'CDC data by latitude/longitude (%s, %s): %s'",
",",
"LATITUDE",
",",
"LONGITUDE",
",",
"cdc_coord_resp",
")",
"# Get CDC data for North Dakota",
"cdc_state_resp",
"=",
"await",
"client",
".",
"cdc_reports",
".",
"status_by_state",
"(",
"STATE",
")",
"_LOGGER",
".",
"info",
"(",
"'CDC data by state name (%s): %s'",
",",
"STATE",
",",
"cdc_state_resp",
")",
"except",
"FluNearYouError",
"as",
"err",
":",
"print",
"(",
"err",
")"
] | Create the aiohttp session and run the example. | [
"Create",
"the",
"aiohttp",
"session",
"and",
"run",
"the",
"example",
"."
] | train | https://github.com/bachya/pyflunearyou/blob/16a2f839c8df851e925e010a6b5c5708386febac/example.py#L18-L50 |
osilkin98/PyBRY | pybry/lbryd_api.py | LbryApi.call | def call(cls, method, params=None, timeout=600):
""" Makes a Call to the LBRY API
:param str method: Method to call from the LBRY API. See the full list of methods at
https://lbryio.github.io/lbry/cli/
:param dict params: Parameters to give the method selected
:param float timeout: The number of seconds to wait for a connection until we time out; 600 By Default.
:raises LBRYException: If the request returns an error when calling the API
:return: A Python `dict` object containing the data requested from the API
:rtype: dict
"""
params = [] if params is None else params
return cls.make_request(SERVER_ADDRESS, method, params, timeout=timeout) | python | def call(cls, method, params=None, timeout=600):
""" Makes a Call to the LBRY API
:param str method: Method to call from the LBRY API. See the full list of methods at
https://lbryio.github.io/lbry/cli/
:param dict params: Parameters to give the method selected
:param float timeout: The number of seconds to wait for a connection until we time out; 600 By Default.
:raises LBRYException: If the request returns an error when calling the API
:return: A Python `dict` object containing the data requested from the API
:rtype: dict
"""
params = [] if params is None else params
return cls.make_request(SERVER_ADDRESS, method, params, timeout=timeout) | [
"def",
"call",
"(",
"cls",
",",
"method",
",",
"params",
"=",
"None",
",",
"timeout",
"=",
"600",
")",
":",
"params",
"=",
"[",
"]",
"if",
"params",
"is",
"None",
"else",
"params",
"return",
"cls",
".",
"make_request",
"(",
"SERVER_ADDRESS",
",",
"method",
",",
"params",
",",
"timeout",
"=",
"timeout",
")"
] | Makes a Call to the LBRY API
:param str method: Method to call from the LBRY API. See the full list of methods at
https://lbryio.github.io/lbry/cli/
:param dict params: Parameters to give the method selected
:param float timeout: The number of seconds to wait for a connection until we time out; 600 By Default.
:raises LBRYException: If the request returns an error when calling the API
:return: A Python `dict` object containing the data requested from the API
:rtype: dict | [
"Makes",
"a",
"Call",
"to",
"the",
"LBRY",
"API"
] | train | https://github.com/osilkin98/PyBRY/blob/af86805a8077916f72f3fe980943d4cd741e61f0/pybry/lbryd_api.py#L14-L28 |
linkhub-sdk/popbill.py | popbill/htTaxinvoiceService.py | HTTaxinvoiceService.getJobState | def getJobState(self, CorpNum, JobID, UserID=None):
""" μμ§ μν νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
UserID : νλΉνμ μμ΄λ
return
μμ§ μν μ 보
raise
PopbillException
"""
if JobID == None or len(JobID) != 18:
raise PopbillException(-99999999, "μμ
μμ΄λ(jobID)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
return self._httpget('/HomeTax/Taxinvoice/' + JobID + '/State', CorpNum, UserID) | python | def getJobState(self, CorpNum, JobID, UserID=None):
""" μμ§ μν νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
UserID : νλΉνμ μμ΄λ
return
μμ§ μν μ 보
raise
PopbillException
"""
if JobID == None or len(JobID) != 18:
raise PopbillException(-99999999, "μμ
μμ΄λ(jobID)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
return self._httpget('/HomeTax/Taxinvoice/' + JobID + '/State', CorpNum, UserID) | [
"def",
"getJobState",
"(",
"self",
",",
"CorpNum",
",",
"JobID",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"JobID",
"==",
"None",
"or",
"len",
"(",
"JobID",
")",
"!=",
"18",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"μμ
μμ΄λ(jobID)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.\")",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/HomeTax/Taxinvoice/'",
"+",
"JobID",
"+",
"'/State'",
",",
"CorpNum",
",",
"UserID",
")"
] | μμ§ μν νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
UserID : νλΉνμ μμ΄λ
return
μμ§ μν μ 보
raise
PopbillException | [
"μμ§",
"μν",
"νμΈ",
"args",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
"JobID",
":",
"μμ
μμ΄λ",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
"return",
"μμ§",
"μν",
"μ 보",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/htTaxinvoiceService.py#L71-L85 |
linkhub-sdk/popbill.py | popbill/htTaxinvoiceService.py | HTTaxinvoiceService.search | def search(self, CorpNum, JobID, Type, TaxType, PurposeType, TaxRegIDType, TaxRegIDYN, TaxRegID, Page, PerPage,
Order, UserID=None):
""" μμ§ κ²°κ³Ό μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
Type : λ¬Έμνν λ°°μ΄, N-μΌλ°μ μμΈκΈκ³μ°μ, M-μμ μ μμΈκΈκ³μ°μ
TaxType : κ³ΌμΈνν λ°°μ΄, T-κ³ΌμΈ, N-λ©΄μΈ, Z-μμΈ
PurposeType : μμ/μ²κ΅¬, R-μμ, C-μ²κ΅¬, N-μμ
TaxRegIDType : μ’
μ¬μ
μ₯λ²νΈ μ¬μ
μμ ν, S-곡κΈμ, B-곡κΈλ°λμ, T-μνμ
TaxRegIDYN : μ’
μ¬μ
μ₯λ²νΈ μ 무, 곡백-μ 체쑰ν, 0-μ’
μ¬μ
μ₯λ²νΈ μμ, 1-μ’
μ¬μ
μ₯λ²νΈ μμ
TaxRegID : μ’
μ¬μ
μ₯λ²νΈ, μ½€λ§(",")λ‘ κ΅¬λΆ νμ¬ κ΅¬μ± ex) '0001,0002'
Page : νμ΄μ§ λ²νΈ
PerPage : νμ΄μ§λΉ λͺ©λ‘ κ°μ, μ΅λ 1000κ°
Order : μ λ ¬ λ°©ν₯, D-λ΄λ¦Όμ°¨μ, A-μ€λ¦μ°¨μ
UserID : νλΉνμ μμ΄λ
return
μμ§ κ²°κ³Ό μ 보
raise
PopbillException
"""
if JobID == None or len(JobID) != 18:
raise PopbillException(-99999999, "μμ
μμ΄λ(jobID)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
uri = '/HomeTax/Taxinvoice/' + JobID
uri += '?Type=' + ','.join(Type)
uri += '&TaxType=' + ','.join(TaxType)
uri += '&PurposeType=' + ','.join(PurposeType)
uri += '&TaxRegIDType=' + TaxRegIDType
uri += '&TaxRegID=' + TaxRegID
uri += '&Page=' + str(Page)
uri += '&PerPage=' + str(PerPage)
uri += '&Order=' + Order
if TaxRegIDYN != '':
uri += '&TaxRegIDYN=' + TaxRegIDYN
return self._httpget(uri, CorpNum, UserID) | python | def search(self, CorpNum, JobID, Type, TaxType, PurposeType, TaxRegIDType, TaxRegIDYN, TaxRegID, Page, PerPage,
Order, UserID=None):
""" μμ§ κ²°κ³Ό μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
Type : λ¬Έμνν λ°°μ΄, N-μΌλ°μ μμΈκΈκ³μ°μ, M-μμ μ μμΈκΈκ³μ°μ
TaxType : κ³ΌμΈνν λ°°μ΄, T-κ³ΌμΈ, N-λ©΄μΈ, Z-μμΈ
PurposeType : μμ/μ²κ΅¬, R-μμ, C-μ²κ΅¬, N-μμ
TaxRegIDType : μ’
μ¬μ
μ₯λ²νΈ μ¬μ
μμ ν, S-곡κΈμ, B-곡κΈλ°λμ, T-μνμ
TaxRegIDYN : μ’
μ¬μ
μ₯λ²νΈ μ 무, 곡백-μ 체쑰ν, 0-μ’
μ¬μ
μ₯λ²νΈ μμ, 1-μ’
μ¬μ
μ₯λ²νΈ μμ
TaxRegID : μ’
μ¬μ
μ₯λ²νΈ, μ½€λ§(",")λ‘ κ΅¬λΆ νμ¬ κ΅¬μ± ex) '0001,0002'
Page : νμ΄μ§ λ²νΈ
PerPage : νμ΄μ§λΉ λͺ©λ‘ κ°μ, μ΅λ 1000κ°
Order : μ λ ¬ λ°©ν₯, D-λ΄λ¦Όμ°¨μ, A-μ€λ¦μ°¨μ
UserID : νλΉνμ μμ΄λ
return
μμ§ κ²°κ³Ό μ 보
raise
PopbillException
"""
if JobID == None or len(JobID) != 18:
raise PopbillException(-99999999, "μμ
μμ΄λ(jobID)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
uri = '/HomeTax/Taxinvoice/' + JobID
uri += '?Type=' + ','.join(Type)
uri += '&TaxType=' + ','.join(TaxType)
uri += '&PurposeType=' + ','.join(PurposeType)
uri += '&TaxRegIDType=' + TaxRegIDType
uri += '&TaxRegID=' + TaxRegID
uri += '&Page=' + str(Page)
uri += '&PerPage=' + str(PerPage)
uri += '&Order=' + Order
if TaxRegIDYN != '':
uri += '&TaxRegIDYN=' + TaxRegIDYN
return self._httpget(uri, CorpNum, UserID) | [
"def",
"search",
"(",
"self",
",",
"CorpNum",
",",
"JobID",
",",
"Type",
",",
"TaxType",
",",
"PurposeType",
",",
"TaxRegIDType",
",",
"TaxRegIDYN",
",",
"TaxRegID",
",",
"Page",
",",
"PerPage",
",",
"Order",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"JobID",
"==",
"None",
"or",
"len",
"(",
"JobID",
")",
"!=",
"18",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"μμ
μμ΄λ(jobID)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.\")",
"",
"uri",
"=",
"'/HomeTax/Taxinvoice/'",
"+",
"JobID",
"uri",
"+=",
"'?Type='",
"+",
"','",
".",
"join",
"(",
"Type",
")",
"uri",
"+=",
"'&TaxType='",
"+",
"','",
".",
"join",
"(",
"TaxType",
")",
"uri",
"+=",
"'&PurposeType='",
"+",
"','",
".",
"join",
"(",
"PurposeType",
")",
"uri",
"+=",
"'&TaxRegIDType='",
"+",
"TaxRegIDType",
"uri",
"+=",
"'&TaxRegID='",
"+",
"TaxRegID",
"uri",
"+=",
"'&Page='",
"+",
"str",
"(",
"Page",
")",
"uri",
"+=",
"'&PerPage='",
"+",
"str",
"(",
"PerPage",
")",
"uri",
"+=",
"'&Order='",
"+",
"Order",
"if",
"TaxRegIDYN",
"!=",
"''",
":",
"uri",
"+=",
"'&TaxRegIDYN='",
"+",
"TaxRegIDYN",
"return",
"self",
".",
"_httpget",
"(",
"uri",
",",
"CorpNum",
",",
"UserID",
")"
] | μμ§ κ²°κ³Ό μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
Type : λ¬Έμνν λ°°μ΄, N-μΌλ°μ μμΈκΈκ³μ°μ, M-μμ μ μμΈκΈκ³μ°μ
TaxType : κ³ΌμΈνν λ°°μ΄, T-κ³ΌμΈ, N-λ©΄μΈ, Z-μμΈ
PurposeType : μμ/μ²κ΅¬, R-μμ, C-μ²κ΅¬, N-μμ
TaxRegIDType : μ’
μ¬μ
μ₯λ²νΈ μ¬μ
μμ ν, S-곡κΈμ, B-곡κΈλ°λμ, T-μνμ
TaxRegIDYN : μ’
μ¬μ
μ₯λ²νΈ μ 무, 곡백-μ 체쑰ν, 0-μ’
μ¬μ
μ₯λ²νΈ μμ, 1-μ’
μ¬μ
μ₯λ²νΈ μμ
TaxRegID : μ’
μ¬μ
μ₯λ²νΈ, μ½€λ§(",")λ‘ κ΅¬λΆ νμ¬ κ΅¬μ± ex) '0001,0002'
Page : νμ΄μ§ λ²νΈ
PerPage : νμ΄μ§λΉ λͺ©λ‘ κ°μ, μ΅λ 1000κ°
Order : μ λ ¬ λ°©ν₯, D-λ΄λ¦Όμ°¨μ, A-μ€λ¦μ°¨μ
UserID : νλΉνμ μμ΄λ
return
μμ§ κ²°κ³Ό μ 보
raise
PopbillException | [
"μμ§",
"κ²°κ³Ό",
"μ‘°ν",
"args",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
"JobID",
":",
"μμ
μμ΄λ",
"Type",
":",
"λ¬Έμνν",
"λ°°μ΄",
"N",
"-",
"μΌλ°μ μμΈκΈκ³μ°μ",
"M",
"-",
"μμ μ μμΈκΈκ³μ°μ",
"TaxType",
":",
"κ³ΌμΈνν",
"λ°°μ΄",
"T",
"-",
"κ³ΌμΈ",
"N",
"-",
"λ©΄μΈ",
"Z",
"-",
"μμΈ",
"PurposeType",
":",
"μμ",
"/",
"μ²κ΅¬",
"R",
"-",
"μμ",
"C",
"-",
"μ²κ΅¬",
"N",
"-",
"μμ",
"TaxRegIDType",
":",
"μ’
μ¬μ
μ₯λ²νΈ",
"μ¬μ
μμ ν",
"S",
"-",
"곡κΈμ",
"B",
"-",
"곡κΈλ°λμ",
"T",
"-",
"μνμ",
"TaxRegIDYN",
":",
"μ’
μ¬μ
μ₯λ²νΈ",
"μ 무",
"곡백",
"-",
"μ 체쑰ν",
"0",
"-",
"μ’
μ¬μ
μ₯λ²νΈ",
"μμ",
"1",
"-",
"μ’
μ¬μ
μ₯λ²νΈ",
"μμ",
"TaxRegID",
":",
"μ’
μ¬μ
μ₯λ²νΈ",
"μ½€λ§",
"(",
")",
"λ‘",
"ꡬλΆ",
"νμ¬",
"ꡬμ±",
"ex",
")",
"0001",
"0002",
"Page",
":",
"νμ΄μ§",
"λ²νΈ",
"PerPage",
":",
"νμ΄μ§λΉ",
"λͺ©λ‘",
"κ°μ",
"μ΅λ",
"1000κ°",
"Order",
":",
"μ λ ¬",
"λ°©ν₯",
"D",
"-",
"λ΄λ¦Όμ°¨μ",
"A",
"-",
"μ€λ¦μ°¨μ",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
"return",
"μμ§",
"κ²°κ³Ό",
"μ 보",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/htTaxinvoiceService.py#L100-L137 |
linkhub-sdk/popbill.py | popbill/htTaxinvoiceService.py | HTTaxinvoiceService.summary | def summary(self, CorpNum, JobID, Type, TaxType, PurposeType, TaxRegIDType, TaxRegIDYN, TaxRegID, UserID=None):
""" μμ§ κ²°κ³Ό μμ½μ 보 μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
Type : λ¬Έμνν λ°°μ΄, N-μΌλ°μ μμΈκΈκ³μ°μ, M-μμ μ μμΈκΈκ³μ°μ
TaxType : κ³ΌμΈνν λ°°μ΄, T-κ³ΌμΈ, N-λ©΄μΈ, Z-μμΈ
PurposeType : μμ/μ²κ΅¬, R-μμ, C-μ²κ΅¬, N-μμ
TaxRegIDType : μ’
μ¬μ
μ₯λ²νΈ μ¬μ
μμ ν, S-곡κΈμ, B-곡κΈλ°λμ, T-μνμ
TaxRegIDYN : μ’
μ¬μ
μ₯λ²νΈ μ 무, 곡백-μ 체쑰ν, 0-μ’
μ¬μ
μ₯λ²νΈ μμ, 1-μ’
μ¬μ
μ₯λ²νΈ μμ
TaxRegID : μ’
μ¬μ
μ₯λ²νΈ, μ½€λ§(",")λ‘ κ΅¬λΆ νμ¬ κ΅¬μ± ex) '0001,0002'
UserID : νλΉνμ μμ΄λ
return
μμ§ κ²°κ³Ό μμ½μ 보
raise
PopbillException
"""
if JobID == None or len(JobID) != 18:
raise PopbillException(-99999999, "μμ
μμ΄λ(jobID)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
uri = '/HomeTax/Taxinvoice/' + JobID + '/Summary'
uri += '?Type=' + ','.join(Type)
uri += '&TaxType=' + ','.join(TaxType)
uri += '&PurposeType=' + ','.join(PurposeType)
uri += '&TaxRegIDType=' + TaxRegIDType
uri += '&TaxRegID=' + TaxRegID
if TaxRegIDYN != '':
uri += '&TaxRegIDYN=' + TaxRegIDYN
return self._httpget(uri, CorpNum, UserID) | python | def summary(self, CorpNum, JobID, Type, TaxType, PurposeType, TaxRegIDType, TaxRegIDYN, TaxRegID, UserID=None):
""" μμ§ κ²°κ³Ό μμ½μ 보 μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
Type : λ¬Έμνν λ°°μ΄, N-μΌλ°μ μμΈκΈκ³μ°μ, M-μμ μ μμΈκΈκ³μ°μ
TaxType : κ³ΌμΈνν λ°°μ΄, T-κ³ΌμΈ, N-λ©΄μΈ, Z-μμΈ
PurposeType : μμ/μ²κ΅¬, R-μμ, C-μ²κ΅¬, N-μμ
TaxRegIDType : μ’
μ¬μ
μ₯λ²νΈ μ¬μ
μμ ν, S-곡κΈμ, B-곡κΈλ°λμ, T-μνμ
TaxRegIDYN : μ’
μ¬μ
μ₯λ²νΈ μ 무, 곡백-μ 체쑰ν, 0-μ’
μ¬μ
μ₯λ²νΈ μμ, 1-μ’
μ¬μ
μ₯λ²νΈ μμ
TaxRegID : μ’
μ¬μ
μ₯λ²νΈ, μ½€λ§(",")λ‘ κ΅¬λΆ νμ¬ κ΅¬μ± ex) '0001,0002'
UserID : νλΉνμ μμ΄λ
return
μμ§ κ²°κ³Ό μμ½μ 보
raise
PopbillException
"""
if JobID == None or len(JobID) != 18:
raise PopbillException(-99999999, "μμ
μμ΄λ(jobID)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
uri = '/HomeTax/Taxinvoice/' + JobID + '/Summary'
uri += '?Type=' + ','.join(Type)
uri += '&TaxType=' + ','.join(TaxType)
uri += '&PurposeType=' + ','.join(PurposeType)
uri += '&TaxRegIDType=' + TaxRegIDType
uri += '&TaxRegID=' + TaxRegID
if TaxRegIDYN != '':
uri += '&TaxRegIDYN=' + TaxRegIDYN
return self._httpget(uri, CorpNum, UserID) | [
"def",
"summary",
"(",
"self",
",",
"CorpNum",
",",
"JobID",
",",
"Type",
",",
"TaxType",
",",
"PurposeType",
",",
"TaxRegIDType",
",",
"TaxRegIDYN",
",",
"TaxRegID",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"JobID",
"==",
"None",
"or",
"len",
"(",
"JobID",
")",
"!=",
"18",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"μμ
μμ΄λ(jobID)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.\")",
"",
"uri",
"=",
"'/HomeTax/Taxinvoice/'",
"+",
"JobID",
"+",
"'/Summary'",
"uri",
"+=",
"'?Type='",
"+",
"','",
".",
"join",
"(",
"Type",
")",
"uri",
"+=",
"'&TaxType='",
"+",
"','",
".",
"join",
"(",
"TaxType",
")",
"uri",
"+=",
"'&PurposeType='",
"+",
"','",
".",
"join",
"(",
"PurposeType",
")",
"uri",
"+=",
"'&TaxRegIDType='",
"+",
"TaxRegIDType",
"uri",
"+=",
"'&TaxRegID='",
"+",
"TaxRegID",
"if",
"TaxRegIDYN",
"!=",
"''",
":",
"uri",
"+=",
"'&TaxRegIDYN='",
"+",
"TaxRegIDYN",
"return",
"self",
".",
"_httpget",
"(",
"uri",
",",
"CorpNum",
",",
"UserID",
")"
] | μμ§ κ²°κ³Ό μμ½μ 보 μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
Type : λ¬Έμνν λ°°μ΄, N-μΌλ°μ μμΈκΈκ³μ°μ, M-μμ μ μμΈκΈκ³μ°μ
TaxType : κ³ΌμΈνν λ°°μ΄, T-κ³ΌμΈ, N-λ©΄μΈ, Z-μμΈ
PurposeType : μμ/μ²κ΅¬, R-μμ, C-μ²κ΅¬, N-μμ
TaxRegIDType : μ’
μ¬μ
μ₯λ²νΈ μ¬μ
μμ ν, S-곡κΈμ, B-곡κΈλ°λμ, T-μνμ
TaxRegIDYN : μ’
μ¬μ
μ₯λ²νΈ μ 무, 곡백-μ 체쑰ν, 0-μ’
μ¬μ
μ₯λ²νΈ μμ, 1-μ’
μ¬μ
μ₯λ²νΈ μμ
TaxRegID : μ’
μ¬μ
μ₯λ²νΈ, μ½€λ§(",")λ‘ κ΅¬λΆ νμ¬ κ΅¬μ± ex) '0001,0002'
UserID : νλΉνμ μμ΄λ
return
μμ§ κ²°κ³Ό μμ½μ 보
raise
PopbillException | [
"μμ§",
"κ²°κ³Ό",
"μμ½μ 보",
"μ‘°ν",
"args",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
"JobID",
":",
"μμ
μμ΄λ",
"Type",
":",
"λ¬Έμνν",
"λ°°μ΄",
"N",
"-",
"μΌλ°μ μμΈκΈκ³μ°μ",
"M",
"-",
"μμ μ μμΈκΈκ³μ°μ",
"TaxType",
":",
"κ³ΌμΈνν",
"λ°°μ΄",
"T",
"-",
"κ³ΌμΈ",
"N",
"-",
"λ©΄μΈ",
"Z",
"-",
"μμΈ",
"PurposeType",
":",
"μμ",
"/",
"μ²κ΅¬",
"R",
"-",
"μμ",
"C",
"-",
"μ²κ΅¬",
"N",
"-",
"μμ",
"TaxRegIDType",
":",
"μ’
μ¬μ
μ₯λ²νΈ",
"μ¬μ
μμ ν",
"S",
"-",
"곡κΈμ",
"B",
"-",
"곡κΈλ°λμ",
"T",
"-",
"μνμ",
"TaxRegIDYN",
":",
"μ’
μ¬μ
μ₯λ²νΈ",
"μ 무",
"곡백",
"-",
"μ 체쑰ν",
"0",
"-",
"μ’
μ¬μ
μ₯λ²νΈ",
"μμ",
"1",
"-",
"μ’
μ¬μ
μ₯λ²νΈ",
"μμ",
"TaxRegID",
":",
"μ’
μ¬μ
μ₯λ²νΈ",
"μ½€λ§",
"(",
")",
"λ‘",
"ꡬλΆ",
"νμ¬",
"ꡬμ±",
"ex",
")",
"0001",
"0002",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
"return",
"μμ§",
"κ²°κ³Ό",
"μμ½μ 보",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/htTaxinvoiceService.py#L139-L169 |
linkhub-sdk/popbill.py | popbill/htTaxinvoiceService.py | HTTaxinvoiceService.getTaxinvoice | def getTaxinvoice(self, CorpNum, NTSConfirmNum, UserID=None):
""" μ μμΈκΈκ³μ°μ μμΈμ 보 νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
NTSConfirmNum : κ΅μΈμ² μΉμΈλ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ μμΈκΈκ³μ°μ μ 보κ°μ²΄
raise
PopbillException
"""
if NTSConfirmNum == None or len(NTSConfirmNum) != 24:
raise PopbillException(-99999999, "κ΅μΈμ²μΉμΈλ²νΈ(NTSConfirmNum)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
return self._httpget('/HomeTax/Taxinvoice/' + NTSConfirmNum, CorpNum, UserID) | python | def getTaxinvoice(self, CorpNum, NTSConfirmNum, UserID=None):
""" μ μμΈκΈκ³μ°μ μμΈμ 보 νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
NTSConfirmNum : κ΅μΈμ² μΉμΈλ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ μμΈκΈκ³μ°μ μ 보κ°μ²΄
raise
PopbillException
"""
if NTSConfirmNum == None or len(NTSConfirmNum) != 24:
raise PopbillException(-99999999, "κ΅μΈμ²μΉμΈλ²νΈ(NTSConfirmNum)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
return self._httpget('/HomeTax/Taxinvoice/' + NTSConfirmNum, CorpNum, UserID) | [
"def",
"getTaxinvoice",
"(",
"self",
",",
"CorpNum",
",",
"NTSConfirmNum",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"NTSConfirmNum",
"==",
"None",
"or",
"len",
"(",
"NTSConfirmNum",
")",
"!=",
"24",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"κ΅μΈμ²μΉμΈλ²νΈ(NTSConfirmNum)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.\")",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/HomeTax/Taxinvoice/'",
"+",
"NTSConfirmNum",
",",
"CorpNum",
",",
"UserID",
")"
] | μ μμΈκΈκ³μ°μ μμΈμ 보 νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
NTSConfirmNum : κ΅μΈμ² μΉμΈλ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ μμΈκΈκ³μ°μ μ 보κ°μ²΄
raise
PopbillException | [
"μ μμΈκΈκ³μ°μ",
"μμΈμ 보",
"νμΈ",
"args",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
"NTSConfirmNum",
":",
"κ΅μΈμ²",
"μΉμΈλ²νΈ",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
"return",
"μ μμΈκΈκ³μ°μ",
"μ 보κ°μ²΄",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/htTaxinvoiceService.py#L171-L185 |
linkhub-sdk/popbill.py | popbill/htTaxinvoiceService.py | HTTaxinvoiceService.getXML | def getXML(self, CorpNum, NTSConfirmNum, UserID=None):
""" μ μμΈκΈκ³μ°μ μμΈμ 보 νμΈ - XML
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
NTSConfirmNum : κ΅μΈμ² μΉμΈλ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ μμΈκΈκ³μ°μ μ 보κ°μ²΄
raise
PopbillException
"""
if NTSConfirmNum == None or len(NTSConfirmNum) != 24:
raise PopbillException(-99999999, "κ΅μΈμ²μΉμΈλ²νΈ(NTSConfirmNum)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
return self._httpget('/HomeTax/Taxinvoice/' + NTSConfirmNum + '?T=xml', CorpNum, UserID) | python | def getXML(self, CorpNum, NTSConfirmNum, UserID=None):
""" μ μμΈκΈκ³μ°μ μμΈμ 보 νμΈ - XML
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
NTSConfirmNum : κ΅μΈμ² μΉμΈλ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ μμΈκΈκ³μ°μ μ 보κ°μ²΄
raise
PopbillException
"""
if NTSConfirmNum == None or len(NTSConfirmNum) != 24:
raise PopbillException(-99999999, "κ΅μΈμ²μΉμΈλ²νΈ(NTSConfirmNum)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
return self._httpget('/HomeTax/Taxinvoice/' + NTSConfirmNum + '?T=xml', CorpNum, UserID) | [
"def",
"getXML",
"(",
"self",
",",
"CorpNum",
",",
"NTSConfirmNum",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"NTSConfirmNum",
"==",
"None",
"or",
"len",
"(",
"NTSConfirmNum",
")",
"!=",
"24",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"κ΅μΈμ²μΉμΈλ²νΈ(NTSConfirmNum)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.\")",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/HomeTax/Taxinvoice/'",
"+",
"NTSConfirmNum",
"+",
"'?T=xml'",
",",
"CorpNum",
",",
"UserID",
")"
] | μ μμΈκΈκ³μ°μ μμΈμ 보 νμΈ - XML
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
NTSConfirmNum : κ΅μΈμ² μΉμΈλ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ μμΈκΈκ³μ°μ μ 보κ°μ²΄
raise
PopbillException | [
"μ μμΈκΈκ³μ°μ",
"μμΈμ 보",
"νμΈ",
"-",
"XML",
"args",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
"NTSConfirmNum",
":",
"κ΅μΈμ²",
"μΉμΈλ²νΈ",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
"return",
"μ μμΈκΈκ³μ°μ",
"μ 보κ°μ²΄",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/htTaxinvoiceService.py#L187-L201 |
linkhub-sdk/popbill.py | popbill/htTaxinvoiceService.py | HTTaxinvoiceService.getPopUpURL | def getPopUpURL(self, CorpNum, NTSConfirmNum, UserID=None):
""" ννμ€ μ μμΈκΈκ³μ°μ 보기 νμ
URL
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
NTSConfirmNum : κ΅μΈμ² μΉμΈ λ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ μμΈκΈκ³μ°μ 보기 νμ
URL λ°ν
raise
PopbillException
"""
if NTSConfirmNum == None or len(NTSConfirmNum) != 24:
raise PopbillException(-99999999, "κ΅μΈμ²μΉμΈλ²νΈ(NTSConfirmNum)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
return self._httpget('/HomeTax/Taxinvoice/' + NTSConfirmNum + '/PopUp', CorpNum, UserID).url | python | def getPopUpURL(self, CorpNum, NTSConfirmNum, UserID=None):
""" ννμ€ μ μμΈκΈκ³μ°μ 보기 νμ
URL
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
NTSConfirmNum : κ΅μΈμ² μΉμΈ λ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ μμΈκΈκ³μ°μ 보기 νμ
URL λ°ν
raise
PopbillException
"""
if NTSConfirmNum == None or len(NTSConfirmNum) != 24:
raise PopbillException(-99999999, "κ΅μΈμ²μΉμΈλ²νΈ(NTSConfirmNum)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
return self._httpget('/HomeTax/Taxinvoice/' + NTSConfirmNum + '/PopUp', CorpNum, UserID).url | [
"def",
"getPopUpURL",
"(",
"self",
",",
"CorpNum",
",",
"NTSConfirmNum",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"NTSConfirmNum",
"==",
"None",
"or",
"len",
"(",
"NTSConfirmNum",
")",
"!=",
"24",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"κ΅μΈμ²μΉμΈλ²νΈ(NTSConfirmNum)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.\")",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/HomeTax/Taxinvoice/'",
"+",
"NTSConfirmNum",
"+",
"'/PopUp'",
",",
"CorpNum",
",",
"UserID",
")",
".",
"url"
] | ννμ€ μ μμΈκΈκ³μ°μ 보기 νμ
URL
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
NTSConfirmNum : κ΅μΈμ² μΉμΈ λ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ μμΈκΈκ³μ°μ 보기 νμ
URL λ°ν
raise
PopbillException | [
"ννμ€",
"μ μμΈκΈκ³μ°μ",
"보기",
"νμ
",
"URL",
"args",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
"NTSConfirmNum",
":",
"κ΅μΈμ²",
"μΉμΈ",
"λ²νΈ",
"UserID",
":",
"νλΉνμ",
"μμ΄λ",
"return",
"μ μμΈκΈκ³μ°μ",
"보기",
"νμ
",
"URL",
"λ°ν",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/htTaxinvoiceService.py#L252-L267 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/__init__.py | Publisher.make_document | def make_document(self, titlestring):
"""
This method may be used to create a new document for writing as xml
to the OPS subdirectory of the ePub structure.
"""
#root = etree.XML('''<?xml version="1.0"?>\
#<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.1//EN' 'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'>\
#<html xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml" xmlns:ops="http://www.idpf.org/2007/ops">\
#</html>''')
root = etree.XML('''<?xml version="1.0"?>\
<!DOCTYPE html>\
<html xmlns="http://www.w3.org/1999/xhtml">\
</html>''')
document = etree.ElementTree(root)
html = document.getroot()
head = etree.SubElement(html, 'head')
etree.SubElement(html, 'body')
title = etree.SubElement(head, 'title')
title.text = titlestring
#The href for the css stylesheet is a standin, can be overwritten
etree.SubElement(head,
'link',
{'href': 'css/default.css',
'rel': 'stylesheet',
'type': 'text/css'})
return document | python | def make_document(self, titlestring):
"""
This method may be used to create a new document for writing as xml
to the OPS subdirectory of the ePub structure.
"""
#root = etree.XML('''<?xml version="1.0"?>\
#<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.1//EN' 'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'>\
#<html xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml" xmlns:ops="http://www.idpf.org/2007/ops">\
#</html>''')
root = etree.XML('''<?xml version="1.0"?>\
<!DOCTYPE html>\
<html xmlns="http://www.w3.org/1999/xhtml">\
</html>''')
document = etree.ElementTree(root)
html = document.getroot()
head = etree.SubElement(html, 'head')
etree.SubElement(html, 'body')
title = etree.SubElement(head, 'title')
title.text = titlestring
#The href for the css stylesheet is a standin, can be overwritten
etree.SubElement(head,
'link',
{'href': 'css/default.css',
'rel': 'stylesheet',
'type': 'text/css'})
return document | [
"def",
"make_document",
"(",
"self",
",",
"titlestring",
")",
":",
"#root = etree.XML('''<?xml version=\"1.0\"?>\\",
"#<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.1//EN' 'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'>\\",
"#<html xml:lang=\"en-US\" xmlns=\"http://www.w3.org/1999/xhtml\" xmlns:ops=\"http://www.idpf.org/2007/ops\">\\",
"#</html>''')",
"root",
"=",
"etree",
".",
"XML",
"(",
"'''<?xml version=\"1.0\"?>\\\n<!DOCTYPE html>\\\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\\\n</html>'''",
")",
"document",
"=",
"etree",
".",
"ElementTree",
"(",
"root",
")",
"html",
"=",
"document",
".",
"getroot",
"(",
")",
"head",
"=",
"etree",
".",
"SubElement",
"(",
"html",
",",
"'head'",
")",
"etree",
".",
"SubElement",
"(",
"html",
",",
"'body'",
")",
"title",
"=",
"etree",
".",
"SubElement",
"(",
"head",
",",
"'title'",
")",
"title",
".",
"text",
"=",
"titlestring",
"#The href for the css stylesheet is a standin, can be overwritten",
"etree",
".",
"SubElement",
"(",
"head",
",",
"'link'",
",",
"{",
"'href'",
":",
"'css/default.css'",
",",
"'rel'",
":",
"'stylesheet'",
",",
"'type'",
":",
"'text/css'",
"}",
")",
"return",
"document"
] | This method may be used to create a new document for writing as xml
to the OPS subdirectory of the ePub structure. | [
"This",
"method",
"may",
"be",
"used",
"to",
"create",
"a",
"new",
"document",
"for",
"writing",
"as",
"xml",
"to",
"the",
"OPS",
"subdirectory",
"of",
"the",
"ePub",
"structure",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/__init__.py#L196-L224 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/__init__.py | Publisher.write_document | def write_document(self, name, document):
"""
This function will write a document to an XML file.
"""
with open(name, 'wb') as out:
out.write(etree.tostring(document,
encoding='utf-8',
pretty_print=True)) | python | def write_document(self, name, document):
"""
This function will write a document to an XML file.
"""
with open(name, 'wb') as out:
out.write(etree.tostring(document,
encoding='utf-8',
pretty_print=True)) | [
"def",
"write_document",
"(",
"self",
",",
"name",
",",
"document",
")",
":",
"with",
"open",
"(",
"name",
",",
"'wb'",
")",
"as",
"out",
":",
"out",
".",
"write",
"(",
"etree",
".",
"tostring",
"(",
"document",
",",
"encoding",
"=",
"'utf-8'",
",",
"pretty_print",
"=",
"True",
")",
")"
] | This function will write a document to an XML file. | [
"This",
"function",
"will",
"write",
"a",
"document",
"to",
"an",
"XML",
"file",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/__init__.py#L281-L288 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/__init__.py | Publisher.format_date_string | def format_date_string(self, date_tuple):
"""
Receives a date_tuple object, and outputs a string
for placement in the article content.
"""
months = ['', 'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
date_string = ''
if date_tuple.season:
return '{0}, {1}'.format(date_tuple.season, date_tuple.year)
else:
if not date_tuple.month and not date_tuple.day:
return '{0}'.format(date_tuple.year)
if date_tuple.month:
date_string += months[int(date_tuple.month)]
if date_tuple.day:
date_string += ' ' + date_tuple.day
return ', '.join([date_string, date_tuple.year]) | python | def format_date_string(self, date_tuple):
"""
Receives a date_tuple object, and outputs a string
for placement in the article content.
"""
months = ['', 'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
date_string = ''
if date_tuple.season:
return '{0}, {1}'.format(date_tuple.season, date_tuple.year)
else:
if not date_tuple.month and not date_tuple.day:
return '{0}'.format(date_tuple.year)
if date_tuple.month:
date_string += months[int(date_tuple.month)]
if date_tuple.day:
date_string += ' ' + date_tuple.day
return ', '.join([date_string, date_tuple.year]) | [
"def",
"format_date_string",
"(",
"self",
",",
"date_tuple",
")",
":",
"months",
"=",
"[",
"''",
",",
"'January'",
",",
"'February'",
",",
"'March'",
",",
"'April'",
",",
"'May'",
",",
"'June'",
",",
"'July'",
",",
"'August'",
",",
"'September'",
",",
"'October'",
",",
"'November'",
",",
"'December'",
"]",
"date_string",
"=",
"''",
"if",
"date_tuple",
".",
"season",
":",
"return",
"'{0}, {1}'",
".",
"format",
"(",
"date_tuple",
".",
"season",
",",
"date_tuple",
".",
"year",
")",
"else",
":",
"if",
"not",
"date_tuple",
".",
"month",
"and",
"not",
"date_tuple",
".",
"day",
":",
"return",
"'{0}'",
".",
"format",
"(",
"date_tuple",
".",
"year",
")",
"if",
"date_tuple",
".",
"month",
":",
"date_string",
"+=",
"months",
"[",
"int",
"(",
"date_tuple",
".",
"month",
")",
"]",
"if",
"date_tuple",
".",
"day",
":",
"date_string",
"+=",
"' '",
"+",
"date_tuple",
".",
"day",
"return",
"', '",
".",
"join",
"(",
"[",
"date_string",
",",
"date_tuple",
".",
"year",
"]",
")"
] | Receives a date_tuple object, and outputs a string
for placement in the article content. | [
"Receives",
"a",
"date_tuple",
"object",
"and",
"outputs",
"a",
"string",
"for",
"placement",
"in",
"the",
"article",
"content",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/__init__.py#L539-L556 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/__init__.py | Publisher.has_out_of_flow_tables | def has_out_of_flow_tables(self):
"""
Returns True if the article has out-of-flow tables, indicates separate
tables document.
This method is used to indicate whether rendering this article's content
will result in the creation of out-of-flow HTML tables. This method has
a base class implementation representing a common logic; if an article
has a graphic(image) representation of a table then the HTML
representation will be placed out-of-flow if it exists, if there is no
graphic(image) represenation then the HTML representation will be placed
in-flow.
Returns
-------
bool
True if there are out-of-flow HTML tables, False otherwise
"""
if self.article.body is None:
return False
for table_wrap in self.article.body.findall('.//table-wrap'):
graphic = table_wrap.xpath('./graphic | ./alternatives/graphic')
table = table_wrap.xpath('./table | ./alternatives/table')
if graphic and table:
return True
return False | python | def has_out_of_flow_tables(self):
"""
Returns True if the article has out-of-flow tables, indicates separate
tables document.
This method is used to indicate whether rendering this article's content
will result in the creation of out-of-flow HTML tables. This method has
a base class implementation representing a common logic; if an article
has a graphic(image) representation of a table then the HTML
representation will be placed out-of-flow if it exists, if there is no
graphic(image) represenation then the HTML representation will be placed
in-flow.
Returns
-------
bool
True if there are out-of-flow HTML tables, False otherwise
"""
if self.article.body is None:
return False
for table_wrap in self.article.body.findall('.//table-wrap'):
graphic = table_wrap.xpath('./graphic | ./alternatives/graphic')
table = table_wrap.xpath('./table | ./alternatives/table')
if graphic and table:
return True
return False | [
"def",
"has_out_of_flow_tables",
"(",
"self",
")",
":",
"if",
"self",
".",
"article",
".",
"body",
"is",
"None",
":",
"return",
"False",
"for",
"table_wrap",
"in",
"self",
".",
"article",
".",
"body",
".",
"findall",
"(",
"'.//table-wrap'",
")",
":",
"graphic",
"=",
"table_wrap",
".",
"xpath",
"(",
"'./graphic | ./alternatives/graphic'",
")",
"table",
"=",
"table_wrap",
".",
"xpath",
"(",
"'./table | ./alternatives/table'",
")",
"if",
"graphic",
"and",
"table",
":",
"return",
"True",
"return",
"False"
] | Returns True if the article has out-of-flow tables, indicates separate
tables document.
This method is used to indicate whether rendering this article's content
will result in the creation of out-of-flow HTML tables. This method has
a base class implementation representing a common logic; if an article
has a graphic(image) representation of a table then the HTML
representation will be placed out-of-flow if it exists, if there is no
graphic(image) represenation then the HTML representation will be placed
in-flow.
Returns
-------
bool
True if there are out-of-flow HTML tables, False otherwise | [
"Returns",
"True",
"if",
"the",
"article",
"has",
"out",
"-",
"of",
"-",
"flow",
"tables",
"indicates",
"separate",
"tables",
"document",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/__init__.py#L686-L711 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/navigation/__init__.py | Navigation.process | def process(self, article):
"""
Ingests an Article to create navigation structures and parse global
metadata.
"""
if self.article is not None and not self.collection:
log.warning('Could not process additional article. Navigation only \
handles one article unless collection mode is set.')
return False
if article.publisher is None:
log.error('''Navigation cannot be generated for an Article \
without a publisher!''')
return
self.article = article
self.article_doi = self.article.doi.split('/')[1]
self.all_dois.append(self.article.doi)
if self.collection:
pass
else:
self.title = self.article.publisher.nav_title()
for author in self.article.publisher.nav_contributors():
self.contributors.add(author)
#Analyze the structure of the article to create internal mapping
self.map_navigation() | python | def process(self, article):
"""
Ingests an Article to create navigation structures and parse global
metadata.
"""
if self.article is not None and not self.collection:
log.warning('Could not process additional article. Navigation only \
handles one article unless collection mode is set.')
return False
if article.publisher is None:
log.error('''Navigation cannot be generated for an Article \
without a publisher!''')
return
self.article = article
self.article_doi = self.article.doi.split('/')[1]
self.all_dois.append(self.article.doi)
if self.collection:
pass
else:
self.title = self.article.publisher.nav_title()
for author in self.article.publisher.nav_contributors():
self.contributors.add(author)
#Analyze the structure of the article to create internal mapping
self.map_navigation() | [
"def",
"process",
"(",
"self",
",",
"article",
")",
":",
"if",
"self",
".",
"article",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"collection",
":",
"log",
".",
"warning",
"(",
"'Could not process additional article. Navigation only \\\nhandles one article unless collection mode is set.'",
")",
"return",
"False",
"if",
"article",
".",
"publisher",
"is",
"None",
":",
"log",
".",
"error",
"(",
"'''Navigation cannot be generated for an Article \\\nwithout a publisher!'''",
")",
"return",
"self",
".",
"article",
"=",
"article",
"self",
".",
"article_doi",
"=",
"self",
".",
"article",
".",
"doi",
".",
"split",
"(",
"'/'",
")",
"[",
"1",
"]",
"self",
".",
"all_dois",
".",
"append",
"(",
"self",
".",
"article",
".",
"doi",
")",
"if",
"self",
".",
"collection",
":",
"pass",
"else",
":",
"self",
".",
"title",
"=",
"self",
".",
"article",
".",
"publisher",
".",
"nav_title",
"(",
")",
"for",
"author",
"in",
"self",
".",
"article",
".",
"publisher",
".",
"nav_contributors",
"(",
")",
":",
"self",
".",
"contributors",
".",
"add",
"(",
"author",
")",
"#Analyze the structure of the article to create internal mapping",
"self",
".",
"map_navigation",
"(",
")"
] | Ingests an Article to create navigation structures and parse global
metadata. | [
"Ingests",
"an",
"Article",
"to",
"create",
"navigation",
"structures",
"and",
"parse",
"global",
"metadata",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/navigation/__init__.py#L63-L88 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/navigation/__init__.py | Navigation.map_navigation | def map_navigation(self):
"""
This is a wrapper for depth-first recursive analysis of the article
"""
#All articles should have titles
title_id = 'titlepage-{0}'.format(self.article_doi)
title_label = self.article.publisher.nav_title()
title_source = 'main.{0}.xhtml#title'.format(self.article_doi)
title_navpoint = navpoint(title_id, title_label, self.play_order,
title_source, [])
self.nav.append(title_navpoint)
#When processing a collection of articles, we will want all subsequent
#navpoints for this article to be located under the title
if self.collection:
nav_insertion = title_navpoint.children
else:
nav_insertion = self.nav
#If the article has a body, we'll need to parse it for navigation
if self.article.body is not None:
#Here is where we invoke the recursive parsing!
for nav_pt in self.recursive_article_navmap(self.article.body):
nav_insertion.append(nav_pt)
#Add a navpoint to the references if appropriate
if self.article.root.xpath('./back/ref'):
ref_id = 'references-{0}'.format(self.article_doi)
ref_label = 'References'
ref_source = 'biblio.{0}.xhtml#references'.format(self.article_doi)
ref_navpoint = navpoint(ref_id, ref_label, self.play_order,
ref_source, [])
nav_insertion.append(ref_navpoint) | python | def map_navigation(self):
"""
This is a wrapper for depth-first recursive analysis of the article
"""
#All articles should have titles
title_id = 'titlepage-{0}'.format(self.article_doi)
title_label = self.article.publisher.nav_title()
title_source = 'main.{0}.xhtml#title'.format(self.article_doi)
title_navpoint = navpoint(title_id, title_label, self.play_order,
title_source, [])
self.nav.append(title_navpoint)
#When processing a collection of articles, we will want all subsequent
#navpoints for this article to be located under the title
if self.collection:
nav_insertion = title_navpoint.children
else:
nav_insertion = self.nav
#If the article has a body, we'll need to parse it for navigation
if self.article.body is not None:
#Here is where we invoke the recursive parsing!
for nav_pt in self.recursive_article_navmap(self.article.body):
nav_insertion.append(nav_pt)
#Add a navpoint to the references if appropriate
if self.article.root.xpath('./back/ref'):
ref_id = 'references-{0}'.format(self.article_doi)
ref_label = 'References'
ref_source = 'biblio.{0}.xhtml#references'.format(self.article_doi)
ref_navpoint = navpoint(ref_id, ref_label, self.play_order,
ref_source, [])
nav_insertion.append(ref_navpoint) | [
"def",
"map_navigation",
"(",
"self",
")",
":",
"#All articles should have titles",
"title_id",
"=",
"'titlepage-{0}'",
".",
"format",
"(",
"self",
".",
"article_doi",
")",
"title_label",
"=",
"self",
".",
"article",
".",
"publisher",
".",
"nav_title",
"(",
")",
"title_source",
"=",
"'main.{0}.xhtml#title'",
".",
"format",
"(",
"self",
".",
"article_doi",
")",
"title_navpoint",
"=",
"navpoint",
"(",
"title_id",
",",
"title_label",
",",
"self",
".",
"play_order",
",",
"title_source",
",",
"[",
"]",
")",
"self",
".",
"nav",
".",
"append",
"(",
"title_navpoint",
")",
"#When processing a collection of articles, we will want all subsequent",
"#navpoints for this article to be located under the title",
"if",
"self",
".",
"collection",
":",
"nav_insertion",
"=",
"title_navpoint",
".",
"children",
"else",
":",
"nav_insertion",
"=",
"self",
".",
"nav",
"#If the article has a body, we'll need to parse it for navigation",
"if",
"self",
".",
"article",
".",
"body",
"is",
"not",
"None",
":",
"#Here is where we invoke the recursive parsing!",
"for",
"nav_pt",
"in",
"self",
".",
"recursive_article_navmap",
"(",
"self",
".",
"article",
".",
"body",
")",
":",
"nav_insertion",
".",
"append",
"(",
"nav_pt",
")",
"#Add a navpoint to the references if appropriate",
"if",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./back/ref'",
")",
":",
"ref_id",
"=",
"'references-{0}'",
".",
"format",
"(",
"self",
".",
"article_doi",
")",
"ref_label",
"=",
"'References'",
"ref_source",
"=",
"'biblio.{0}.xhtml#references'",
".",
"format",
"(",
"self",
".",
"article_doi",
")",
"ref_navpoint",
"=",
"navpoint",
"(",
"ref_id",
",",
"ref_label",
",",
"self",
".",
"play_order",
",",
"ref_source",
",",
"[",
"]",
")",
"nav_insertion",
".",
"append",
"(",
"ref_navpoint",
")"
] | This is a wrapper for depth-first recursive analysis of the article | [
"This",
"is",
"a",
"wrapper",
"for",
"depth",
"-",
"first",
"recursive",
"analysis",
"of",
"the",
"article"
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/navigation/__init__.py#L90-L121 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/navigation/__init__.py | Navigation.recursive_article_navmap | def recursive_article_navmap(self, src_element, depth=0, first=True):
"""
This function recursively traverses the content of an input article to
add the correct elements to the NCX file's navMap and Lists.
"""
if depth > self.nav_depth:
self.nav_depth = depth
navpoints = []
tagnames = ['sec', 'fig', 'table-wrap']
for child in src_element:
try:
tagname = child.tag
except AttributeError:
continue
else:
if tagname not in tagnames:
continue
#Safely handle missing id attributes
if 'id' not in child.attrib:
child.attrib['id'] = self.auto_id
#If in collection mode, we'll prepend the article DOI to avoid
#collisions
if self.collection:
child_id = '-'.join([self.article_doi,
child.attrib['id']])
else:
child_id = child.attrib['id']
#Attempt to infer the correct text as a label
#Skip the element if we cannot
child_title = child.find('title')
if child_title is None:
continue # If there is no immediate title, skip this element
label = element_methods.all_text(child_title)
if not label:
continue # If no text in the title, skip this element
source = 'main.{0}.xhtml#{1}'.format(self.article_doi,
child.attrib['id'])
if tagname == 'sec':
children = self.recursive_article_navmap(child, depth=depth + 1)
navpoints.append(navpoint(child_id,
label,
self.play_order,
source,
children))
#figs and table-wraps do not have children
elif tagname == 'fig': # Add navpoints to list_of_figures
self.figures_list.append(navpoint(child.attrib['id'],
label,
None,
source,
[]))
elif tagname == 'table-wrap': # Add navpoints to list_of_tables
self.tables_list.append(navpoint(child.attrib['id'],
label,
None,
source,
[]))
return navpoints | python | def recursive_article_navmap(self, src_element, depth=0, first=True):
"""
This function recursively traverses the content of an input article to
add the correct elements to the NCX file's navMap and Lists.
"""
if depth > self.nav_depth:
self.nav_depth = depth
navpoints = []
tagnames = ['sec', 'fig', 'table-wrap']
for child in src_element:
try:
tagname = child.tag
except AttributeError:
continue
else:
if tagname not in tagnames:
continue
#Safely handle missing id attributes
if 'id' not in child.attrib:
child.attrib['id'] = self.auto_id
#If in collection mode, we'll prepend the article DOI to avoid
#collisions
if self.collection:
child_id = '-'.join([self.article_doi,
child.attrib['id']])
else:
child_id = child.attrib['id']
#Attempt to infer the correct text as a label
#Skip the element if we cannot
child_title = child.find('title')
if child_title is None:
continue # If there is no immediate title, skip this element
label = element_methods.all_text(child_title)
if not label:
continue # If no text in the title, skip this element
source = 'main.{0}.xhtml#{1}'.format(self.article_doi,
child.attrib['id'])
if tagname == 'sec':
children = self.recursive_article_navmap(child, depth=depth + 1)
navpoints.append(navpoint(child_id,
label,
self.play_order,
source,
children))
#figs and table-wraps do not have children
elif tagname == 'fig': # Add navpoints to list_of_figures
self.figures_list.append(navpoint(child.attrib['id'],
label,
None,
source,
[]))
elif tagname == 'table-wrap': # Add navpoints to list_of_tables
self.tables_list.append(navpoint(child.attrib['id'],
label,
None,
source,
[]))
return navpoints | [
"def",
"recursive_article_navmap",
"(",
"self",
",",
"src_element",
",",
"depth",
"=",
"0",
",",
"first",
"=",
"True",
")",
":",
"if",
"depth",
">",
"self",
".",
"nav_depth",
":",
"self",
".",
"nav_depth",
"=",
"depth",
"navpoints",
"=",
"[",
"]",
"tagnames",
"=",
"[",
"'sec'",
",",
"'fig'",
",",
"'table-wrap'",
"]",
"for",
"child",
"in",
"src_element",
":",
"try",
":",
"tagname",
"=",
"child",
".",
"tag",
"except",
"AttributeError",
":",
"continue",
"else",
":",
"if",
"tagname",
"not",
"in",
"tagnames",
":",
"continue",
"#Safely handle missing id attributes",
"if",
"'id'",
"not",
"in",
"child",
".",
"attrib",
":",
"child",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"self",
".",
"auto_id",
"#If in collection mode, we'll prepend the article DOI to avoid",
"#collisions",
"if",
"self",
".",
"collection",
":",
"child_id",
"=",
"'-'",
".",
"join",
"(",
"[",
"self",
".",
"article_doi",
",",
"child",
".",
"attrib",
"[",
"'id'",
"]",
"]",
")",
"else",
":",
"child_id",
"=",
"child",
".",
"attrib",
"[",
"'id'",
"]",
"#Attempt to infer the correct text as a label",
"#Skip the element if we cannot",
"child_title",
"=",
"child",
".",
"find",
"(",
"'title'",
")",
"if",
"child_title",
"is",
"None",
":",
"continue",
"# If there is no immediate title, skip this element",
"label",
"=",
"element_methods",
".",
"all_text",
"(",
"child_title",
")",
"if",
"not",
"label",
":",
"continue",
"# If no text in the title, skip this element",
"source",
"=",
"'main.{0}.xhtml#{1}'",
".",
"format",
"(",
"self",
".",
"article_doi",
",",
"child",
".",
"attrib",
"[",
"'id'",
"]",
")",
"if",
"tagname",
"==",
"'sec'",
":",
"children",
"=",
"self",
".",
"recursive_article_navmap",
"(",
"child",
",",
"depth",
"=",
"depth",
"+",
"1",
")",
"navpoints",
".",
"append",
"(",
"navpoint",
"(",
"child_id",
",",
"label",
",",
"self",
".",
"play_order",
",",
"source",
",",
"children",
")",
")",
"#figs and table-wraps do not have children",
"elif",
"tagname",
"==",
"'fig'",
":",
"# Add navpoints to list_of_figures",
"self",
".",
"figures_list",
".",
"append",
"(",
"navpoint",
"(",
"child",
".",
"attrib",
"[",
"'id'",
"]",
",",
"label",
",",
"None",
",",
"source",
",",
"[",
"]",
")",
")",
"elif",
"tagname",
"==",
"'table-wrap'",
":",
"# Add navpoints to list_of_tables",
"self",
".",
"tables_list",
".",
"append",
"(",
"navpoint",
"(",
"child",
".",
"attrib",
"[",
"'id'",
"]",
",",
"label",
",",
"None",
",",
"source",
",",
"[",
"]",
")",
")",
"return",
"navpoints"
] | This function recursively traverses the content of an input article to
add the correct elements to the NCX file's navMap and Lists. | [
"This",
"function",
"recursively",
"traverses",
"the",
"content",
"of",
"an",
"input",
"article",
"to",
"add",
"the",
"correct",
"elements",
"to",
"the",
"NCX",
"file",
"s",
"navMap",
"and",
"Lists",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/navigation/__init__.py#L123-L183 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/navigation/__init__.py | Navigation.render_EPUB2 | def render_EPUB2(self, location):
"""
Creates the NCX specified file for EPUB2
"""
def make_navlabel(text):
"""
Creates and returns a navLabel element with the supplied text.
"""
navlabel = etree.Element('navLabel')
navlabel_text = etree.SubElement(navlabel, 'text')
navlabel_text.text = text
return navlabel
def make_navMap(nav=None):
if nav is None:
nav_element = etree.Element('navMap')
for nav_point in self.nav:
nav_element.append(make_navMap(nav=nav_point))
else:
nav_element = etree.Element('navPoint')
nav_element.attrib['id'] = nav.id
nav_element.attrib['playOrder'] = nav.playOrder
nav_element.append(make_navlabel(nav.label))
content_element = etree.SubElement(nav_element, 'content')
content_element.attrib['src'] = nav.source
for child in nav.children:
nav_element.append(make_navMap(nav=child))
return nav_element
root = etree.XML('''\
<?xml version="1.0"?>\
<ncx version="2005-1" xmlns="http://www.daisy.org/z3986/2005/ncx/">\
<head>\
<meta name="dtb:uid" content="{uid}"/>\
<meta name="dtb:depth" content="{depth}"/>\
<meta name="dtb:totalPageCount" content="0"/>\
<meta name="dtb:maxPageNumber" content="0"/>\
<meta name="dtb:generator" content="OpenAccess_EPUB {version}"/>\
</head>\
</ncx>'''.format(**{'uid': ','.join(self.all_dois),
'depth': self.nav_depth,
'version': __version__}))
document = etree.ElementTree(root)
ncx = document.getroot()
#Create the docTitle element
doctitle = etree.SubElement(ncx, 'docTitle')
doctitle_text = etree.SubElement(doctitle, 'text')
doctitle_text.text = self.title
#Create the docAuthor elements
for contributor in self.contributors:
if contributor.role == 'author':
docauthor = etree.SubElement(ncx, 'docAuthor')
docauthor_text = etree.SubElement(docauthor, 'text')
docauthor_text.text = contributor.name
#Create the navMap element
ncx.append(make_navMap())
if self.figures_list:
navlist = etree.SubElement(ncx, 'navList')
navlist.append(make_navlabel('List of Figures'))
for nav_pt in self.figures_list:
navtarget = etree.SubElement(navlist, 'navTarget')
navtarget.attrib['id'] = nav_pt.id
navtarget.append(self.make_navlabel(nav_pt.label))
content = etree.SubElement(navtarget, 'content')
content.attrib['src'] = nav_pt.source
if self.tables_list:
navlist = etree.SubElement(ncx, 'navList')
navlist.append(make_navlabel('List of Tables'))
for nav_pt in self.tables_list:
navtarget = etree.SubElement(navlist, 'navTarget')
navtarget.attrib['id'] = nav_pt.id
navtarget.append(self.make_navlabel(nav_pt.label))
content = etree.SubElement(navtarget, 'content')
content.attrib['src'] = nav_pt.source
with open(os.path.join(location, 'EPUB', 'toc.ncx'), 'wb') as output:
output.write(etree.tostring(document, encoding='utf-8', pretty_print=True)) | python | def render_EPUB2(self, location):
"""
Creates the NCX specified file for EPUB2
"""
def make_navlabel(text):
"""
Creates and returns a navLabel element with the supplied text.
"""
navlabel = etree.Element('navLabel')
navlabel_text = etree.SubElement(navlabel, 'text')
navlabel_text.text = text
return navlabel
def make_navMap(nav=None):
if nav is None:
nav_element = etree.Element('navMap')
for nav_point in self.nav:
nav_element.append(make_navMap(nav=nav_point))
else:
nav_element = etree.Element('navPoint')
nav_element.attrib['id'] = nav.id
nav_element.attrib['playOrder'] = nav.playOrder
nav_element.append(make_navlabel(nav.label))
content_element = etree.SubElement(nav_element, 'content')
content_element.attrib['src'] = nav.source
for child in nav.children:
nav_element.append(make_navMap(nav=child))
return nav_element
root = etree.XML('''\
<?xml version="1.0"?>\
<ncx version="2005-1" xmlns="http://www.daisy.org/z3986/2005/ncx/">\
<head>\
<meta name="dtb:uid" content="{uid}"/>\
<meta name="dtb:depth" content="{depth}"/>\
<meta name="dtb:totalPageCount" content="0"/>\
<meta name="dtb:maxPageNumber" content="0"/>\
<meta name="dtb:generator" content="OpenAccess_EPUB {version}"/>\
</head>\
</ncx>'''.format(**{'uid': ','.join(self.all_dois),
'depth': self.nav_depth,
'version': __version__}))
document = etree.ElementTree(root)
ncx = document.getroot()
#Create the docTitle element
doctitle = etree.SubElement(ncx, 'docTitle')
doctitle_text = etree.SubElement(doctitle, 'text')
doctitle_text.text = self.title
#Create the docAuthor elements
for contributor in self.contributors:
if contributor.role == 'author':
docauthor = etree.SubElement(ncx, 'docAuthor')
docauthor_text = etree.SubElement(docauthor, 'text')
docauthor_text.text = contributor.name
#Create the navMap element
ncx.append(make_navMap())
if self.figures_list:
navlist = etree.SubElement(ncx, 'navList')
navlist.append(make_navlabel('List of Figures'))
for nav_pt in self.figures_list:
navtarget = etree.SubElement(navlist, 'navTarget')
navtarget.attrib['id'] = nav_pt.id
navtarget.append(self.make_navlabel(nav_pt.label))
content = etree.SubElement(navtarget, 'content')
content.attrib['src'] = nav_pt.source
if self.tables_list:
navlist = etree.SubElement(ncx, 'navList')
navlist.append(make_navlabel('List of Tables'))
for nav_pt in self.tables_list:
navtarget = etree.SubElement(navlist, 'navTarget')
navtarget.attrib['id'] = nav_pt.id
navtarget.append(self.make_navlabel(nav_pt.label))
content = etree.SubElement(navtarget, 'content')
content.attrib['src'] = nav_pt.source
with open(os.path.join(location, 'EPUB', 'toc.ncx'), 'wb') as output:
output.write(etree.tostring(document, encoding='utf-8', pretty_print=True)) | [
"def",
"render_EPUB2",
"(",
"self",
",",
"location",
")",
":",
"def",
"make_navlabel",
"(",
"text",
")",
":",
"\"\"\"\n Creates and returns a navLabel element with the supplied text.\n \"\"\"",
"navlabel",
"=",
"etree",
".",
"Element",
"(",
"'navLabel'",
")",
"navlabel_text",
"=",
"etree",
".",
"SubElement",
"(",
"navlabel",
",",
"'text'",
")",
"navlabel_text",
".",
"text",
"=",
"text",
"return",
"navlabel",
"def",
"make_navMap",
"(",
"nav",
"=",
"None",
")",
":",
"if",
"nav",
"is",
"None",
":",
"nav_element",
"=",
"etree",
".",
"Element",
"(",
"'navMap'",
")",
"for",
"nav_point",
"in",
"self",
".",
"nav",
":",
"nav_element",
".",
"append",
"(",
"make_navMap",
"(",
"nav",
"=",
"nav_point",
")",
")",
"else",
":",
"nav_element",
"=",
"etree",
".",
"Element",
"(",
"'navPoint'",
")",
"nav_element",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"nav",
".",
"id",
"nav_element",
".",
"attrib",
"[",
"'playOrder'",
"]",
"=",
"nav",
".",
"playOrder",
"nav_element",
".",
"append",
"(",
"make_navlabel",
"(",
"nav",
".",
"label",
")",
")",
"content_element",
"=",
"etree",
".",
"SubElement",
"(",
"nav_element",
",",
"'content'",
")",
"content_element",
".",
"attrib",
"[",
"'src'",
"]",
"=",
"nav",
".",
"source",
"for",
"child",
"in",
"nav",
".",
"children",
":",
"nav_element",
".",
"append",
"(",
"make_navMap",
"(",
"nav",
"=",
"child",
")",
")",
"return",
"nav_element",
"root",
"=",
"etree",
".",
"XML",
"(",
"'''\\\n<?xml version=\"1.0\"?>\\\n<ncx version=\"2005-1\" xmlns=\"http://www.daisy.org/z3986/2005/ncx/\">\\\n<head>\\\n<meta name=\"dtb:uid\" content=\"{uid}\"/>\\\n<meta name=\"dtb:depth\" content=\"{depth}\"/>\\\n<meta name=\"dtb:totalPageCount\" content=\"0\"/>\\\n<meta name=\"dtb:maxPageNumber\" content=\"0\"/>\\\n<meta name=\"dtb:generator\" content=\"OpenAccess_EPUB {version}\"/>\\\n</head>\\\n</ncx>'''",
".",
"format",
"(",
"*",
"*",
"{",
"'uid'",
":",
"','",
".",
"join",
"(",
"self",
".",
"all_dois",
")",
",",
"'depth'",
":",
"self",
".",
"nav_depth",
",",
"'version'",
":",
"__version__",
"}",
")",
")",
"document",
"=",
"etree",
".",
"ElementTree",
"(",
"root",
")",
"ncx",
"=",
"document",
".",
"getroot",
"(",
")",
"#Create the docTitle element",
"doctitle",
"=",
"etree",
".",
"SubElement",
"(",
"ncx",
",",
"'docTitle'",
")",
"doctitle_text",
"=",
"etree",
".",
"SubElement",
"(",
"doctitle",
",",
"'text'",
")",
"doctitle_text",
".",
"text",
"=",
"self",
".",
"title",
"#Create the docAuthor elements",
"for",
"contributor",
"in",
"self",
".",
"contributors",
":",
"if",
"contributor",
".",
"role",
"==",
"'author'",
":",
"docauthor",
"=",
"etree",
".",
"SubElement",
"(",
"ncx",
",",
"'docAuthor'",
")",
"docauthor_text",
"=",
"etree",
".",
"SubElement",
"(",
"docauthor",
",",
"'text'",
")",
"docauthor_text",
".",
"text",
"=",
"contributor",
".",
"name",
"#Create the navMap element",
"ncx",
".",
"append",
"(",
"make_navMap",
"(",
")",
")",
"if",
"self",
".",
"figures_list",
":",
"navlist",
"=",
"etree",
".",
"SubElement",
"(",
"ncx",
",",
"'navList'",
")",
"navlist",
".",
"append",
"(",
"make_navlabel",
"(",
"'List of Figures'",
")",
")",
"for",
"nav_pt",
"in",
"self",
".",
"figures_list",
":",
"navtarget",
"=",
"etree",
".",
"SubElement",
"(",
"navlist",
",",
"'navTarget'",
")",
"navtarget",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"nav_pt",
".",
"id",
"navtarget",
".",
"append",
"(",
"self",
".",
"make_navlabel",
"(",
"nav_pt",
".",
"label",
")",
")",
"content",
"=",
"etree",
".",
"SubElement",
"(",
"navtarget",
",",
"'content'",
")",
"content",
".",
"attrib",
"[",
"'src'",
"]",
"=",
"nav_pt",
".",
"source",
"if",
"self",
".",
"tables_list",
":",
"navlist",
"=",
"etree",
".",
"SubElement",
"(",
"ncx",
",",
"'navList'",
")",
"navlist",
".",
"append",
"(",
"make_navlabel",
"(",
"'List of Tables'",
")",
")",
"for",
"nav_pt",
"in",
"self",
".",
"tables_list",
":",
"navtarget",
"=",
"etree",
".",
"SubElement",
"(",
"navlist",
",",
"'navTarget'",
")",
"navtarget",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"nav_pt",
".",
"id",
"navtarget",
".",
"append",
"(",
"self",
".",
"make_navlabel",
"(",
"nav_pt",
".",
"label",
")",
")",
"content",
"=",
"etree",
".",
"SubElement",
"(",
"navtarget",
",",
"'content'",
")",
"content",
".",
"attrib",
"[",
"'src'",
"]",
"=",
"nav_pt",
".",
"source",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"location",
",",
"'EPUB'",
",",
"'toc.ncx'",
")",
",",
"'wb'",
")",
"as",
"output",
":",
"output",
".",
"write",
"(",
"etree",
".",
"tostring",
"(",
"document",
",",
"encoding",
"=",
"'utf-8'",
",",
"pretty_print",
"=",
"True",
")",
")"
] | Creates the NCX specified file for EPUB2 | [
"Creates",
"the",
"NCX",
"specified",
"file",
"for",
"EPUB2"
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/navigation/__init__.py#L185-L266 |
MSchnei/pyprf_feature | pyprf_feature/simulation/pRF_functions.py | funcGauss1D | def funcGauss1D(x, mu, sig):
""" Create 1D Gaussian. Source:
http://mathworld.wolfram.com/GaussianFunction.html
"""
arrOut = np.exp(-np.power((x - mu)/sig, 2.)/2)
# normalize
# arrOut = arrOut/(np.sqrt(2.*np.pi)*sig)
# normalize (laternative)
arrOut = arrOut/np.sum(arrOut)
return arrOut | python | def funcGauss1D(x, mu, sig):
""" Create 1D Gaussian. Source:
http://mathworld.wolfram.com/GaussianFunction.html
"""
arrOut = np.exp(-np.power((x - mu)/sig, 2.)/2)
# normalize
# arrOut = arrOut/(np.sqrt(2.*np.pi)*sig)
# normalize (laternative)
arrOut = arrOut/np.sum(arrOut)
return arrOut | [
"def",
"funcGauss1D",
"(",
"x",
",",
"mu",
",",
"sig",
")",
":",
"arrOut",
"=",
"np",
".",
"exp",
"(",
"-",
"np",
".",
"power",
"(",
"(",
"x",
"-",
"mu",
")",
"/",
"sig",
",",
"2.",
")",
"/",
"2",
")",
"# normalize",
"# arrOut = arrOut/(np.sqrt(2.*np.pi)*sig)",
"# normalize (laternative)",
"arrOut",
"=",
"arrOut",
"/",
"np",
".",
"sum",
"(",
"arrOut",
")",
"return",
"arrOut"
] | Create 1D Gaussian. Source:
http://mathworld.wolfram.com/GaussianFunction.html | [
"Create",
"1D",
"Gaussian",
".",
"Source",
":",
"http",
":",
"//",
"mathworld",
".",
"wolfram",
".",
"com",
"/",
"GaussianFunction",
".",
"html"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/simulation/pRF_functions.py#L20-L29 |
MSchnei/pyprf_feature | pyprf_feature/simulation/pRF_functions.py | funcGauss2D | def funcGauss2D(varSizeX, varSizeY, varPosX, varPosY, varSd):
""" Create 2D Gaussian kernel. Source:
http://mathworld.wolfram.com/GaussianFunction.html
"""
varSizeX = int(varSizeX)
varSizeY = int(varSizeY)
# aryX and aryY are in reversed order, this seems to be necessary:
aryY, aryX = sp.mgrid[0:varSizeX,
0:varSizeY]
# The actual creation of the Gaussian array:
aryGauss = (
(
np.power((aryX - varPosX), 2.0) +
np.power((aryY - varPosY), 2.0)
) /
(2.0 * np.power(varSd, 2.0))
)
aryGauss = np.exp(-aryGauss)
# normalize
# aryGauss = aryGauss/(2*np.pi*np.power(varSd, 2))
return aryGauss | python | def funcGauss2D(varSizeX, varSizeY, varPosX, varPosY, varSd):
""" Create 2D Gaussian kernel. Source:
http://mathworld.wolfram.com/GaussianFunction.html
"""
varSizeX = int(varSizeX)
varSizeY = int(varSizeY)
# aryX and aryY are in reversed order, this seems to be necessary:
aryY, aryX = sp.mgrid[0:varSizeX,
0:varSizeY]
# The actual creation of the Gaussian array:
aryGauss = (
(
np.power((aryX - varPosX), 2.0) +
np.power((aryY - varPosY), 2.0)
) /
(2.0 * np.power(varSd, 2.0))
)
aryGauss = np.exp(-aryGauss)
# normalize
# aryGauss = aryGauss/(2*np.pi*np.power(varSd, 2))
return aryGauss | [
"def",
"funcGauss2D",
"(",
"varSizeX",
",",
"varSizeY",
",",
"varPosX",
",",
"varPosY",
",",
"varSd",
")",
":",
"varSizeX",
"=",
"int",
"(",
"varSizeX",
")",
"varSizeY",
"=",
"int",
"(",
"varSizeY",
")",
"# aryX and aryY are in reversed order, this seems to be necessary:",
"aryY",
",",
"aryX",
"=",
"sp",
".",
"mgrid",
"[",
"0",
":",
"varSizeX",
",",
"0",
":",
"varSizeY",
"]",
"# The actual creation of the Gaussian array:",
"aryGauss",
"=",
"(",
"(",
"np",
".",
"power",
"(",
"(",
"aryX",
"-",
"varPosX",
")",
",",
"2.0",
")",
"+",
"np",
".",
"power",
"(",
"(",
"aryY",
"-",
"varPosY",
")",
",",
"2.0",
")",
")",
"/",
"(",
"2.0",
"*",
"np",
".",
"power",
"(",
"varSd",
",",
"2.0",
")",
")",
")",
"aryGauss",
"=",
"np",
".",
"exp",
"(",
"-",
"aryGauss",
")",
"# normalize",
"# aryGauss = aryGauss/(2*np.pi*np.power(varSd, 2))",
"return",
"aryGauss"
] | Create 2D Gaussian kernel. Source:
http://mathworld.wolfram.com/GaussianFunction.html | [
"Create",
"2D",
"Gaussian",
"kernel",
".",
"Source",
":",
"http",
":",
"//",
"mathworld",
".",
"wolfram",
".",
"com",
"/",
"GaussianFunction",
".",
"html"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/simulation/pRF_functions.py#L33-L58 |
MSchnei/pyprf_feature | pyprf_feature/simulation/pRF_functions.py | funcConvPar | def funcConvPar(aryDm,
vecHrf,
varNumVol):
"""
Function for convolution of pixel-wise 'design matrix' with HRF model.
"""
# In order to avoid an artefact at the end of the time series, we have to
# concatenate an empty array to both the design matrix and the HRF model
# before convolution.
aryDm = np.concatenate((aryDm, np.zeros((aryDm.shape[0], 100))), axis=1)
vecHrf = np.concatenate((vecHrf, np.zeros((100,))))
aryDmConv = np.empty((aryDm.shape[0], varNumVol))
for idx in range(0, aryDm.shape[0]):
vecDm = aryDm[idx, :]
# Convolve design matrix with HRF model:
aryDmConv[idx, :] = np.convolve(vecDm, vecHrf,
mode='full')[:varNumVol]
return aryDmConv | python | def funcConvPar(aryDm,
vecHrf,
varNumVol):
"""
Function for convolution of pixel-wise 'design matrix' with HRF model.
"""
# In order to avoid an artefact at the end of the time series, we have to
# concatenate an empty array to both the design matrix and the HRF model
# before convolution.
aryDm = np.concatenate((aryDm, np.zeros((aryDm.shape[0], 100))), axis=1)
vecHrf = np.concatenate((vecHrf, np.zeros((100,))))
aryDmConv = np.empty((aryDm.shape[0], varNumVol))
for idx in range(0, aryDm.shape[0]):
vecDm = aryDm[idx, :]
# Convolve design matrix with HRF model:
aryDmConv[idx, :] = np.convolve(vecDm, vecHrf,
mode='full')[:varNumVol]
return aryDmConv | [
"def",
"funcConvPar",
"(",
"aryDm",
",",
"vecHrf",
",",
"varNumVol",
")",
":",
"# In order to avoid an artefact at the end of the time series, we have to",
"# concatenate an empty array to both the design matrix and the HRF model",
"# before convolution.",
"aryDm",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryDm",
",",
"np",
".",
"zeros",
"(",
"(",
"aryDm",
".",
"shape",
"[",
"0",
"]",
",",
"100",
")",
")",
")",
",",
"axis",
"=",
"1",
")",
"vecHrf",
"=",
"np",
".",
"concatenate",
"(",
"(",
"vecHrf",
",",
"np",
".",
"zeros",
"(",
"(",
"100",
",",
")",
")",
")",
")",
"aryDmConv",
"=",
"np",
".",
"empty",
"(",
"(",
"aryDm",
".",
"shape",
"[",
"0",
"]",
",",
"varNumVol",
")",
")",
"for",
"idx",
"in",
"range",
"(",
"0",
",",
"aryDm",
".",
"shape",
"[",
"0",
"]",
")",
":",
"vecDm",
"=",
"aryDm",
"[",
"idx",
",",
":",
"]",
"# Convolve design matrix with HRF model:",
"aryDmConv",
"[",
"idx",
",",
":",
"]",
"=",
"np",
".",
"convolve",
"(",
"vecDm",
",",
"vecHrf",
",",
"mode",
"=",
"'full'",
")",
"[",
":",
"varNumVol",
"]",
"return",
"aryDmConv"
] | Function for convolution of pixel-wise 'design matrix' with HRF model. | [
"Function",
"for",
"convolution",
"of",
"pixel",
"-",
"wise",
"design",
"matrix",
"with",
"HRF",
"model",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/simulation/pRF_functions.py#L112-L131 |
MSchnei/pyprf_feature | pyprf_feature/simulation/pRF_functions.py | simulateAR1 | def simulateAR1(n,
beta,
sigma,
c,
burnin,
varNumCNR,
varNumTP,
):
"""
Simulates an AR(1) model using the parameters beta, c, and sigma.
Returns an array with length n
n := number of time points
beta := correlation weight
sigma := standard deviation of the noise, can be a vector
c := constant added to the noise, default 0
based on:
source1: https://github.com/ndronen/misc/blob/master/python/ar1.py
source2: http://stats.stackexchange.com/questions/22742/
problem-simulating-ar2-process
source3: https://kurtverstegen.wordpress.com/2013/12/07/simulation/
"""
# Output array with noise time courses
noise = np.empty((varNumCNR, varNumTP))
if burnin == 1:
burnin = 100
n = n + burnin
noiseTemp = c + sp.random.normal(0, 1, n)
sims = np.zeros(n)
sims[0] = noiseTemp[0]
for i in range(1, n):
sims[i] = beta*sims[i-1] + noiseTemp[i]
sims = sims[burnin:]
noise = sigma[:, np.newaxis]*sp.stats.mstats.zscore(sims)
return noise | python | def simulateAR1(n,
beta,
sigma,
c,
burnin,
varNumCNR,
varNumTP,
):
"""
Simulates an AR(1) model using the parameters beta, c, and sigma.
Returns an array with length n
n := number of time points
beta := correlation weight
sigma := standard deviation of the noise, can be a vector
c := constant added to the noise, default 0
based on:
source1: https://github.com/ndronen/misc/blob/master/python/ar1.py
source2: http://stats.stackexchange.com/questions/22742/
problem-simulating-ar2-process
source3: https://kurtverstegen.wordpress.com/2013/12/07/simulation/
"""
# Output array with noise time courses
noise = np.empty((varNumCNR, varNumTP))
if burnin == 1:
burnin = 100
n = n + burnin
noiseTemp = c + sp.random.normal(0, 1, n)
sims = np.zeros(n)
sims[0] = noiseTemp[0]
for i in range(1, n):
sims[i] = beta*sims[i-1] + noiseTemp[i]
sims = sims[burnin:]
noise = sigma[:, np.newaxis]*sp.stats.mstats.zscore(sims)
return noise | [
"def",
"simulateAR1",
"(",
"n",
",",
"beta",
",",
"sigma",
",",
"c",
",",
"burnin",
",",
"varNumCNR",
",",
"varNumTP",
",",
")",
":",
"# Output array with noise time courses",
"noise",
"=",
"np",
".",
"empty",
"(",
"(",
"varNumCNR",
",",
"varNumTP",
")",
")",
"if",
"burnin",
"==",
"1",
":",
"burnin",
"=",
"100",
"n",
"=",
"n",
"+",
"burnin",
"noiseTemp",
"=",
"c",
"+",
"sp",
".",
"random",
".",
"normal",
"(",
"0",
",",
"1",
",",
"n",
")",
"sims",
"=",
"np",
".",
"zeros",
"(",
"n",
")",
"sims",
"[",
"0",
"]",
"=",
"noiseTemp",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"n",
")",
":",
"sims",
"[",
"i",
"]",
"=",
"beta",
"*",
"sims",
"[",
"i",
"-",
"1",
"]",
"+",
"noiseTemp",
"[",
"i",
"]",
"sims",
"=",
"sims",
"[",
"burnin",
":",
"]",
"noise",
"=",
"sigma",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"*",
"sp",
".",
"stats",
".",
"mstats",
".",
"zscore",
"(",
"sims",
")",
"return",
"noise"
] | Simulates an AR(1) model using the parameters beta, c, and sigma.
Returns an array with length n
n := number of time points
beta := correlation weight
sigma := standard deviation of the noise, can be a vector
c := constant added to the noise, default 0
based on:
source1: https://github.com/ndronen/misc/blob/master/python/ar1.py
source2: http://stats.stackexchange.com/questions/22742/
problem-simulating-ar2-process
source3: https://kurtverstegen.wordpress.com/2013/12/07/simulation/ | [
"Simulates",
"an",
"AR",
"(",
"1",
")",
"model",
"using",
"the",
"parameters",
"beta",
"c",
"and",
"sigma",
".",
"Returns",
"an",
"array",
"with",
"length",
"n"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/simulation/pRF_functions.py#L149-L185 |
MSchnei/pyprf_feature | pyprf_feature/simulation/pRF_functions.py | funcNrlTcMotPred | def funcNrlTcMotPred(idxPrc,
varPixX,
varPixY,
NrlMdlChunk,
varNumTP,
aryBoxCar, # aryCond
path,
varNumNrlMdls,
varNumMtDrctn,
varPar,
queOut):
"""
Function for creating neural time course models.
This function should be used to create neural models if different
predictors for every motion direction are included.
"""
# # if hd5 method is used: open file for reading
# filename = 'aryBoxCar' + str(idxPrc) + '.hdf5'
# hdf5_path = os.path.join(path, filename)
# fileH = tables.openFile(hdf5_path, mode='r')
# Output array with pRF model time courses at all modelled standard
# deviations for current pixel position:
aryOut = np.empty((len(NrlMdlChunk), varNumTP, varNumMtDrctn),
dtype='float32')
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 1:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Number of pRF models to fit:
varNumLoops = varNumNrlMdls/varPar
# Vector with pRF values at which to give status feedback:
vecStatus = np.linspace(0,
varNumLoops,
num=(varStsStpSze+1),
endpoint=True)
vecStatus = np.ceil(vecStatus)
vecStatus = vecStatus.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatusPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatusPrc = np.ceil(vecStatusPrc)
vecStatusPrc = vecStatusPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through all Gauss parameters that are in this chunk
for idx, NrlMdlTrpl in enumerate(NrlMdlChunk):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 1:
# Status indicator:
if varCntSts02 == vecStatus[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatusPrc[varCntSts01]) +
' % --- ' +
str(vecStatus[varCntSts01]) +
' loops out of ' +
str(varNumLoops))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# x pos of Gauss model: NrlMdlTrpl[0]
# y pos of Gauss model: NrlMdlTrpl[1]
# std of Gauss model: NrlMdlTrpl[2]
# index of tng crv model: NrlMdlTrpl[3]
varTmpX = int(np.around(NrlMdlTrpl[0], 0))
varTmpY = int(np.around(NrlMdlTrpl[1], 0))
# Create pRF model (2D):
aryGauss = funcGauss2D(varPixX,
varPixY,
varTmpX,
varTmpY,
NrlMdlTrpl[2])
# Multiply pixel-wise box car model with Gaussian pRF models:
aryNrlTcTmp = np.multiply(aryBoxCar, aryGauss[:, :, None, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'. This is essentially an unscaled version of the
# neural time course model (i.e. not yet scaled for the size of
# the pRF).
aryNrlTcTmp = np.sum(aryNrlTcTmp, axis=(0, 1))
# Normalise the nrl time course model to the size of the pRF. This
# gives us the ratio of 'activation' of the pRF at each time point,
# or, in other words, the neural time course model.
aryNrlTcTmp = np.divide(aryNrlTcTmp,
np.sum(aryGauss, axis=(0, 1)))
# Put model time courses into the function's output array:
aryOut[idx, :, :] = aryNrlTcTmp
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 1:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# Output list:
lstOut = [idxPrc,
aryOut,
]
queOut.put(lstOut) | python | def funcNrlTcMotPred(idxPrc,
varPixX,
varPixY,
NrlMdlChunk,
varNumTP,
aryBoxCar, # aryCond
path,
varNumNrlMdls,
varNumMtDrctn,
varPar,
queOut):
"""
Function for creating neural time course models.
This function should be used to create neural models if different
predictors for every motion direction are included.
"""
# # if hd5 method is used: open file for reading
# filename = 'aryBoxCar' + str(idxPrc) + '.hdf5'
# hdf5_path = os.path.join(path, filename)
# fileH = tables.openFile(hdf5_path, mode='r')
# Output array with pRF model time courses at all modelled standard
# deviations for current pixel position:
aryOut = np.empty((len(NrlMdlChunk), varNumTP, varNumMtDrctn),
dtype='float32')
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 1:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Number of pRF models to fit:
varNumLoops = varNumNrlMdls/varPar
# Vector with pRF values at which to give status feedback:
vecStatus = np.linspace(0,
varNumLoops,
num=(varStsStpSze+1),
endpoint=True)
vecStatus = np.ceil(vecStatus)
vecStatus = vecStatus.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatusPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatusPrc = np.ceil(vecStatusPrc)
vecStatusPrc = vecStatusPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through all Gauss parameters that are in this chunk
for idx, NrlMdlTrpl in enumerate(NrlMdlChunk):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 1:
# Status indicator:
if varCntSts02 == vecStatus[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatusPrc[varCntSts01]) +
' % --- ' +
str(vecStatus[varCntSts01]) +
' loops out of ' +
str(varNumLoops))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# x pos of Gauss model: NrlMdlTrpl[0]
# y pos of Gauss model: NrlMdlTrpl[1]
# std of Gauss model: NrlMdlTrpl[2]
# index of tng crv model: NrlMdlTrpl[3]
varTmpX = int(np.around(NrlMdlTrpl[0], 0))
varTmpY = int(np.around(NrlMdlTrpl[1], 0))
# Create pRF model (2D):
aryGauss = funcGauss2D(varPixX,
varPixY,
varTmpX,
varTmpY,
NrlMdlTrpl[2])
# Multiply pixel-wise box car model with Gaussian pRF models:
aryNrlTcTmp = np.multiply(aryBoxCar, aryGauss[:, :, None, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'. This is essentially an unscaled version of the
# neural time course model (i.e. not yet scaled for the size of
# the pRF).
aryNrlTcTmp = np.sum(aryNrlTcTmp, axis=(0, 1))
# Normalise the nrl time course model to the size of the pRF. This
# gives us the ratio of 'activation' of the pRF at each time point,
# or, in other words, the neural time course model.
aryNrlTcTmp = np.divide(aryNrlTcTmp,
np.sum(aryGauss, axis=(0, 1)))
# Put model time courses into the function's output array:
aryOut[idx, :, :] = aryNrlTcTmp
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 1:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# Output list:
lstOut = [idxPrc,
aryOut,
]
queOut.put(lstOut) | [
"def",
"funcNrlTcMotPred",
"(",
"idxPrc",
",",
"varPixX",
",",
"varPixY",
",",
"NrlMdlChunk",
",",
"varNumTP",
",",
"aryBoxCar",
",",
"# aryCond",
"path",
",",
"varNumNrlMdls",
",",
"varNumMtDrctn",
",",
"varPar",
",",
"queOut",
")",
":",
"# # if hd5 method is used: open file for reading",
"# filename = 'aryBoxCar' + str(idxPrc) + '.hdf5'",
"# hdf5_path = os.path.join(path, filename)",
"# fileH = tables.openFile(hdf5_path, mode='r')",
"# Output array with pRF model time courses at all modelled standard",
"# deviations for current pixel position:",
"aryOut",
"=",
"np",
".",
"empty",
"(",
"(",
"len",
"(",
"NrlMdlChunk",
")",
",",
"varNumTP",
",",
"varNumMtDrctn",
")",
",",
"dtype",
"=",
"'float32'",
")",
"# Prepare status indicator if this is the first of the parallel processes:",
"if",
"idxPrc",
"==",
"1",
":",
"# We create a status indicator for the time consuming pRF model finding",
"# algorithm. Number of steps of the status indicator:",
"varStsStpSze",
"=",
"20",
"# Number of pRF models to fit:",
"varNumLoops",
"=",
"varNumNrlMdls",
"/",
"varPar",
"# Vector with pRF values at which to give status feedback:",
"vecStatus",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"varNumLoops",
",",
"num",
"=",
"(",
"varStsStpSze",
"+",
"1",
")",
",",
"endpoint",
"=",
"True",
")",
"vecStatus",
"=",
"np",
".",
"ceil",
"(",
"vecStatus",
")",
"vecStatus",
"=",
"vecStatus",
".",
"astype",
"(",
"int",
")",
"# Vector with corresponding percentage values at which to give status",
"# feedback:",
"vecStatusPrc",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"100",
",",
"num",
"=",
"(",
"varStsStpSze",
"+",
"1",
")",
",",
"endpoint",
"=",
"True",
")",
"vecStatusPrc",
"=",
"np",
".",
"ceil",
"(",
"vecStatusPrc",
")",
"vecStatusPrc",
"=",
"vecStatusPrc",
".",
"astype",
"(",
"int",
")",
"# Counter for status indicator:",
"varCntSts01",
"=",
"0",
"varCntSts02",
"=",
"0",
"# Loop through all Gauss parameters that are in this chunk",
"for",
"idx",
",",
"NrlMdlTrpl",
"in",
"enumerate",
"(",
"NrlMdlChunk",
")",
":",
"# Status indicator (only used in the first of the parallel",
"# processes):",
"if",
"idxPrc",
"==",
"1",
":",
"# Status indicator:",
"if",
"varCntSts02",
"==",
"vecStatus",
"[",
"varCntSts01",
"]",
":",
"# Prepare status message:",
"strStsMsg",
"=",
"(",
"'---------Progress: '",
"+",
"str",
"(",
"vecStatusPrc",
"[",
"varCntSts01",
"]",
")",
"+",
"' % --- '",
"+",
"str",
"(",
"vecStatus",
"[",
"varCntSts01",
"]",
")",
"+",
"' loops out of '",
"+",
"str",
"(",
"varNumLoops",
")",
")",
"print",
"(",
"strStsMsg",
")",
"# Only increment counter if the last value has not been",
"# reached yet:",
"if",
"varCntSts01",
"<",
"varStsStpSze",
":",
"varCntSts01",
"=",
"varCntSts01",
"+",
"int",
"(",
"1",
")",
"# x pos of Gauss model: NrlMdlTrpl[0]",
"# y pos of Gauss model: NrlMdlTrpl[1]",
"# std of Gauss model: NrlMdlTrpl[2]",
"# index of tng crv model: NrlMdlTrpl[3]",
"varTmpX",
"=",
"int",
"(",
"np",
".",
"around",
"(",
"NrlMdlTrpl",
"[",
"0",
"]",
",",
"0",
")",
")",
"varTmpY",
"=",
"int",
"(",
"np",
".",
"around",
"(",
"NrlMdlTrpl",
"[",
"1",
"]",
",",
"0",
")",
")",
"# Create pRF model (2D):",
"aryGauss",
"=",
"funcGauss2D",
"(",
"varPixX",
",",
"varPixY",
",",
"varTmpX",
",",
"varTmpY",
",",
"NrlMdlTrpl",
"[",
"2",
"]",
")",
"# Multiply pixel-wise box car model with Gaussian pRF models:",
"aryNrlTcTmp",
"=",
"np",
".",
"multiply",
"(",
"aryBoxCar",
",",
"aryGauss",
"[",
":",
",",
":",
",",
"None",
",",
"None",
"]",
")",
"# Calculate sum across x- and y-dimensions - the 'area under the",
"# Gaussian surface'. This is essentially an unscaled version of the",
"# neural time course model (i.e. not yet scaled for the size of",
"# the pRF).",
"aryNrlTcTmp",
"=",
"np",
".",
"sum",
"(",
"aryNrlTcTmp",
",",
"axis",
"=",
"(",
"0",
",",
"1",
")",
")",
"# Normalise the nrl time course model to the size of the pRF. This",
"# gives us the ratio of 'activation' of the pRF at each time point,",
"# or, in other words, the neural time course model.",
"aryNrlTcTmp",
"=",
"np",
".",
"divide",
"(",
"aryNrlTcTmp",
",",
"np",
".",
"sum",
"(",
"aryGauss",
",",
"axis",
"=",
"(",
"0",
",",
"1",
")",
")",
")",
"# Put model time courses into the function's output array:",
"aryOut",
"[",
"idx",
",",
":",
",",
":",
"]",
"=",
"aryNrlTcTmp",
"# Status indicator (only used in the first of the parallel",
"# processes):",
"if",
"idxPrc",
"==",
"1",
":",
"# Increment status indicator counter:",
"varCntSts02",
"=",
"varCntSts02",
"+",
"1",
"# Output list:",
"lstOut",
"=",
"[",
"idxPrc",
",",
"aryOut",
",",
"]",
"queOut",
".",
"put",
"(",
"lstOut",
")"
] | Function for creating neural time course models.
This function should be used to create neural models if different
predictors for every motion direction are included. | [
"Function",
"for",
"creating",
"neural",
"time",
"course",
"models",
".",
"This",
"function",
"should",
"be",
"used",
"to",
"create",
"neural",
"models",
"if",
"different",
"predictors",
"for",
"every",
"motion",
"direction",
"are",
"included",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/simulation/pRF_functions.py#L359-L485 |
MSchnei/pyprf_feature | pyprf_feature/simulation/pRF_functions.py | funcFindPrf | def funcFindPrf(idxPrc,
aryFuncChnk,
aryPrfTc,
aryMdls,
queOut):
"""
Function for finding best pRF model for voxel time course.
This function should be used if there is only one predictor.
"""
# Number of voxels to be fitted in this chunk:
varNumVoxChnk = aryFuncChnk.shape[0]
# Number of volumes:
varNumVol = aryFuncChnk.shape[1]
# Vectors for pRF finding results [number-of-voxels times one]:
vecBstXpos = np.zeros(varNumVoxChnk)
vecBstYpos = np.zeros(varNumVoxChnk)
vecBstSd = np.zeros(varNumVoxChnk)
# vecBstR2 = np.zeros(varNumVoxChnk)
# Vector for best R-square value. For each model fit, the R-square value is
# compared to this, and updated if it is lower than the best-fitting
# solution so far. We initialise with an arbitrary, high value
vecBstRes = np.add(np.zeros(varNumVoxChnk),
100000.0)
# We reshape the voxel time courses, so that time goes down the column,
# i.e. from top to bottom.
aryFuncChnk = aryFuncChnk.T
# Constant term for the model:
vecConst = np.ones((varNumVol), dtype=np.float32)
# Change type to float 32:
aryFuncChnk = aryFuncChnk.astype(np.float32)
aryPrfTc = aryPrfTc.astype(np.float32)
# Number of pRF models to fit:
varNumMdls = len(aryMdls)
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 0:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Vector with pRF values at which to give status feedback:
vecStatPrf = np.linspace(0,
varNumMdls,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrf = np.ceil(vecStatPrf)
vecStatPrf = vecStatPrf.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrc = np.ceil(vecStatPrc)
vecStatPrc = vecStatPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through pRF models:
for idxMdls in range(0, varNumMdls):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Status indicator:
if varCntSts02 == vecStatPrf[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatPrc[varCntSts01]) +
' % --- ' +
str(vecStatPrf[varCntSts01]) +
' pRF models out of ' +
str(varNumMdls))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# Current pRF time course model:
vecMdlTc = aryPrfTc[idxMdls, :].flatten()
# We create a design matrix including the current pRF time
# course model, and a constant term:
aryDsgn = np.vstack([vecMdlTc,
vecConst]).T
# Calculation of the ratio of the explained variance (R square)
# for the current model for all voxel time courses.
# print('------------np.linalg.lstsq on pRF: ' +
# str(idxX) +
# 'x ' +
# str(idxY) +
# 'y ' +
# str(idxSd) +
# 'z --- START')
# varTmeTmp01 = time.time()
# Change type to float32:
# aryDsgn = aryDsgn.astype(np.float32)
# Calculate the least-squares solution for all voxels:
vecTmpRes = np.linalg.lstsq(aryDsgn, aryFuncChnk)[1]
# varTmeTmp02 = time.time()
# varTmeTmp03 = np.around((varTmeTmp02 - varTmeTmp01),
# decimals=2)
# print('------------np.linalg.lstsq on pRF: ' +
# str(idxX) +
# 'x ' +
# str(idxY) +
# 'y ' +
# str(idxSd) +
# 'z --- DONE elapsed time: ' +
# str(varTmeTmp03) +
# 's')
# Check whether current residuals are lower than previously
# calculated ones:
vecLgcTmpRes = np.less(vecTmpRes, vecBstRes)
# Replace best x and y position values, and SD values.
vecBstXpos[vecLgcTmpRes] = aryMdls[idxMdls][0]
vecBstYpos[vecLgcTmpRes] = aryMdls[idxMdls][1]
vecBstSd[vecLgcTmpRes] = aryMdls[idxMdls][2]
# Replace best residual values:
vecBstRes[vecLgcTmpRes] = vecTmpRes[vecLgcTmpRes]
# varTmeTmp04 = time.time()
# varTmeTmp05 = np.around((varTmeTmp04 - varTmeTmp02),
# decimals=2)
# print('------------selection of best-fitting pRF model: ' +
# str(idxX) +
# 'x ' +
# str(idxY) +
# 'y ' +
# str(idxSd) +
# 'z --- elapsed time: ' +
# str(varTmeTmp05) +
# 's')
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# After finding the best fitting model for each voxel, we still have to
# calculate the coefficient of determination (R-squared) for each voxel. We
# start by calculating the total sum of squares (i.e. the deviation of the
# data from the mean). The mean of each time course:
vecFuncMean = np.mean(aryFuncChnk, axis=0)
# Deviation from the mean for each datapoint:
vecFuncDev = np.subtract(aryFuncChnk, vecFuncMean[None, :])
# Sum of squares:
vecSsTot = np.sum(np.power(vecFuncDev,
2.0),
axis=0)
# Coefficient of determination:
vecBstR2 = np.subtract(1.0,
np.divide(vecBstRes,
vecSsTot))
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
vecBstR2]
queOut.put(lstOut) | python | def funcFindPrf(idxPrc,
aryFuncChnk,
aryPrfTc,
aryMdls,
queOut):
"""
Function for finding best pRF model for voxel time course.
This function should be used if there is only one predictor.
"""
# Number of voxels to be fitted in this chunk:
varNumVoxChnk = aryFuncChnk.shape[0]
# Number of volumes:
varNumVol = aryFuncChnk.shape[1]
# Vectors for pRF finding results [number-of-voxels times one]:
vecBstXpos = np.zeros(varNumVoxChnk)
vecBstYpos = np.zeros(varNumVoxChnk)
vecBstSd = np.zeros(varNumVoxChnk)
# vecBstR2 = np.zeros(varNumVoxChnk)
# Vector for best R-square value. For each model fit, the R-square value is
# compared to this, and updated if it is lower than the best-fitting
# solution so far. We initialise with an arbitrary, high value
vecBstRes = np.add(np.zeros(varNumVoxChnk),
100000.0)
# We reshape the voxel time courses, so that time goes down the column,
# i.e. from top to bottom.
aryFuncChnk = aryFuncChnk.T
# Constant term for the model:
vecConst = np.ones((varNumVol), dtype=np.float32)
# Change type to float 32:
aryFuncChnk = aryFuncChnk.astype(np.float32)
aryPrfTc = aryPrfTc.astype(np.float32)
# Number of pRF models to fit:
varNumMdls = len(aryMdls)
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 0:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Vector with pRF values at which to give status feedback:
vecStatPrf = np.linspace(0,
varNumMdls,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrf = np.ceil(vecStatPrf)
vecStatPrf = vecStatPrf.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrc = np.ceil(vecStatPrc)
vecStatPrc = vecStatPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through pRF models:
for idxMdls in range(0, varNumMdls):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Status indicator:
if varCntSts02 == vecStatPrf[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatPrc[varCntSts01]) +
' % --- ' +
str(vecStatPrf[varCntSts01]) +
' pRF models out of ' +
str(varNumMdls))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# Current pRF time course model:
vecMdlTc = aryPrfTc[idxMdls, :].flatten()
# We create a design matrix including the current pRF time
# course model, and a constant term:
aryDsgn = np.vstack([vecMdlTc,
vecConst]).T
# Calculation of the ratio of the explained variance (R square)
# for the current model for all voxel time courses.
# print('------------np.linalg.lstsq on pRF: ' +
# str(idxX) +
# 'x ' +
# str(idxY) +
# 'y ' +
# str(idxSd) +
# 'z --- START')
# varTmeTmp01 = time.time()
# Change type to float32:
# aryDsgn = aryDsgn.astype(np.float32)
# Calculate the least-squares solution for all voxels:
vecTmpRes = np.linalg.lstsq(aryDsgn, aryFuncChnk)[1]
# varTmeTmp02 = time.time()
# varTmeTmp03 = np.around((varTmeTmp02 - varTmeTmp01),
# decimals=2)
# print('------------np.linalg.lstsq on pRF: ' +
# str(idxX) +
# 'x ' +
# str(idxY) +
# 'y ' +
# str(idxSd) +
# 'z --- DONE elapsed time: ' +
# str(varTmeTmp03) +
# 's')
# Check whether current residuals are lower than previously
# calculated ones:
vecLgcTmpRes = np.less(vecTmpRes, vecBstRes)
# Replace best x and y position values, and SD values.
vecBstXpos[vecLgcTmpRes] = aryMdls[idxMdls][0]
vecBstYpos[vecLgcTmpRes] = aryMdls[idxMdls][1]
vecBstSd[vecLgcTmpRes] = aryMdls[idxMdls][2]
# Replace best residual values:
vecBstRes[vecLgcTmpRes] = vecTmpRes[vecLgcTmpRes]
# varTmeTmp04 = time.time()
# varTmeTmp05 = np.around((varTmeTmp04 - varTmeTmp02),
# decimals=2)
# print('------------selection of best-fitting pRF model: ' +
# str(idxX) +
# 'x ' +
# str(idxY) +
# 'y ' +
# str(idxSd) +
# 'z --- elapsed time: ' +
# str(varTmeTmp05) +
# 's')
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# After finding the best fitting model for each voxel, we still have to
# calculate the coefficient of determination (R-squared) for each voxel. We
# start by calculating the total sum of squares (i.e. the deviation of the
# data from the mean). The mean of each time course:
vecFuncMean = np.mean(aryFuncChnk, axis=0)
# Deviation from the mean for each datapoint:
vecFuncDev = np.subtract(aryFuncChnk, vecFuncMean[None, :])
# Sum of squares:
vecSsTot = np.sum(np.power(vecFuncDev,
2.0),
axis=0)
# Coefficient of determination:
vecBstR2 = np.subtract(1.0,
np.divide(vecBstRes,
vecSsTot))
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
vecBstR2]
queOut.put(lstOut) | [
"def",
"funcFindPrf",
"(",
"idxPrc",
",",
"aryFuncChnk",
",",
"aryPrfTc",
",",
"aryMdls",
",",
"queOut",
")",
":",
"# Number of voxels to be fitted in this chunk:",
"varNumVoxChnk",
"=",
"aryFuncChnk",
".",
"shape",
"[",
"0",
"]",
"# Number of volumes:",
"varNumVol",
"=",
"aryFuncChnk",
".",
"shape",
"[",
"1",
"]",
"# Vectors for pRF finding results [number-of-voxels times one]:",
"vecBstXpos",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
"vecBstYpos",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
"vecBstSd",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
"# vecBstR2 = np.zeros(varNumVoxChnk)",
"# Vector for best R-square value. For each model fit, the R-square value is",
"# compared to this, and updated if it is lower than the best-fitting",
"# solution so far. We initialise with an arbitrary, high value",
"vecBstRes",
"=",
"np",
".",
"add",
"(",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
",",
"100000.0",
")",
"# We reshape the voxel time courses, so that time goes down the column,",
"# i.e. from top to bottom.",
"aryFuncChnk",
"=",
"aryFuncChnk",
".",
"T",
"# Constant term for the model:",
"vecConst",
"=",
"np",
".",
"ones",
"(",
"(",
"varNumVol",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# Change type to float 32:",
"aryFuncChnk",
"=",
"aryFuncChnk",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"aryPrfTc",
"=",
"aryPrfTc",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"# Number of pRF models to fit:",
"varNumMdls",
"=",
"len",
"(",
"aryMdls",
")",
"# Prepare status indicator if this is the first of the parallel processes:",
"if",
"idxPrc",
"==",
"0",
":",
"# We create a status indicator for the time consuming pRF model finding",
"# algorithm. Number of steps of the status indicator:",
"varStsStpSze",
"=",
"20",
"# Vector with pRF values at which to give status feedback:",
"vecStatPrf",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"varNumMdls",
",",
"num",
"=",
"(",
"varStsStpSze",
"+",
"1",
")",
",",
"endpoint",
"=",
"True",
")",
"vecStatPrf",
"=",
"np",
".",
"ceil",
"(",
"vecStatPrf",
")",
"vecStatPrf",
"=",
"vecStatPrf",
".",
"astype",
"(",
"int",
")",
"# Vector with corresponding percentage values at which to give status",
"# feedback:",
"vecStatPrc",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"100",
",",
"num",
"=",
"(",
"varStsStpSze",
"+",
"1",
")",
",",
"endpoint",
"=",
"True",
")",
"vecStatPrc",
"=",
"np",
".",
"ceil",
"(",
"vecStatPrc",
")",
"vecStatPrc",
"=",
"vecStatPrc",
".",
"astype",
"(",
"int",
")",
"# Counter for status indicator:",
"varCntSts01",
"=",
"0",
"varCntSts02",
"=",
"0",
"# Loop through pRF models:",
"for",
"idxMdls",
"in",
"range",
"(",
"0",
",",
"varNumMdls",
")",
":",
"# Status indicator (only used in the first of the parallel",
"# processes):",
"if",
"idxPrc",
"==",
"0",
":",
"# Status indicator:",
"if",
"varCntSts02",
"==",
"vecStatPrf",
"[",
"varCntSts01",
"]",
":",
"# Prepare status message:",
"strStsMsg",
"=",
"(",
"'---------Progress: '",
"+",
"str",
"(",
"vecStatPrc",
"[",
"varCntSts01",
"]",
")",
"+",
"' % --- '",
"+",
"str",
"(",
"vecStatPrf",
"[",
"varCntSts01",
"]",
")",
"+",
"' pRF models out of '",
"+",
"str",
"(",
"varNumMdls",
")",
")",
"print",
"(",
"strStsMsg",
")",
"# Only increment counter if the last value has not been",
"# reached yet:",
"if",
"varCntSts01",
"<",
"varStsStpSze",
":",
"varCntSts01",
"=",
"varCntSts01",
"+",
"int",
"(",
"1",
")",
"# Current pRF time course model:",
"vecMdlTc",
"=",
"aryPrfTc",
"[",
"idxMdls",
",",
":",
"]",
".",
"flatten",
"(",
")",
"# We create a design matrix including the current pRF time",
"# course model, and a constant term:",
"aryDsgn",
"=",
"np",
".",
"vstack",
"(",
"[",
"vecMdlTc",
",",
"vecConst",
"]",
")",
".",
"T",
"# Calculation of the ratio of the explained variance (R square)",
"# for the current model for all voxel time courses.",
"# print('------------np.linalg.lstsq on pRF: ' +",
"# str(idxX) +",
"# 'x ' +",
"# str(idxY) +",
"# 'y ' +",
"# str(idxSd) +",
"# 'z --- START')",
"# varTmeTmp01 = time.time()",
"# Change type to float32:",
"# aryDsgn = aryDsgn.astype(np.float32)",
"# Calculate the least-squares solution for all voxels:",
"vecTmpRes",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"aryDsgn",
",",
"aryFuncChnk",
")",
"[",
"1",
"]",
"# varTmeTmp02 = time.time()",
"# varTmeTmp03 = np.around((varTmeTmp02 - varTmeTmp01),",
"# decimals=2)",
"# print('------------np.linalg.lstsq on pRF: ' +",
"# str(idxX) +",
"# 'x ' +",
"# str(idxY) +",
"# 'y ' +",
"# str(idxSd) +",
"# 'z --- DONE elapsed time: ' +",
"# str(varTmeTmp03) +",
"# 's')",
"# Check whether current residuals are lower than previously",
"# calculated ones:",
"vecLgcTmpRes",
"=",
"np",
".",
"less",
"(",
"vecTmpRes",
",",
"vecBstRes",
")",
"# Replace best x and y position values, and SD values.",
"vecBstXpos",
"[",
"vecLgcTmpRes",
"]",
"=",
"aryMdls",
"[",
"idxMdls",
"]",
"[",
"0",
"]",
"vecBstYpos",
"[",
"vecLgcTmpRes",
"]",
"=",
"aryMdls",
"[",
"idxMdls",
"]",
"[",
"1",
"]",
"vecBstSd",
"[",
"vecLgcTmpRes",
"]",
"=",
"aryMdls",
"[",
"idxMdls",
"]",
"[",
"2",
"]",
"# Replace best residual values:",
"vecBstRes",
"[",
"vecLgcTmpRes",
"]",
"=",
"vecTmpRes",
"[",
"vecLgcTmpRes",
"]",
"# varTmeTmp04 = time.time()",
"# varTmeTmp05 = np.around((varTmeTmp04 - varTmeTmp02),",
"# decimals=2)",
"# print('------------selection of best-fitting pRF model: ' +",
"# str(idxX) +",
"# 'x ' +",
"# str(idxY) +",
"# 'y ' +",
"# str(idxSd) +",
"# 'z --- elapsed time: ' +",
"# str(varTmeTmp05) +",
"# 's')",
"# Status indicator (only used in the first of the parallel",
"# processes):",
"if",
"idxPrc",
"==",
"0",
":",
"# Increment status indicator counter:",
"varCntSts02",
"=",
"varCntSts02",
"+",
"1",
"# After finding the best fitting model for each voxel, we still have to",
"# calculate the coefficient of determination (R-squared) for each voxel. We",
"# start by calculating the total sum of squares (i.e. the deviation of the",
"# data from the mean). The mean of each time course:",
"vecFuncMean",
"=",
"np",
".",
"mean",
"(",
"aryFuncChnk",
",",
"axis",
"=",
"0",
")",
"# Deviation from the mean for each datapoint:",
"vecFuncDev",
"=",
"np",
".",
"subtract",
"(",
"aryFuncChnk",
",",
"vecFuncMean",
"[",
"None",
",",
":",
"]",
")",
"# Sum of squares:",
"vecSsTot",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"power",
"(",
"vecFuncDev",
",",
"2.0",
")",
",",
"axis",
"=",
"0",
")",
"# Coefficient of determination:",
"vecBstR2",
"=",
"np",
".",
"subtract",
"(",
"1.0",
",",
"np",
".",
"divide",
"(",
"vecBstRes",
",",
"vecSsTot",
")",
")",
"# Output list:",
"lstOut",
"=",
"[",
"idxPrc",
",",
"vecBstXpos",
",",
"vecBstYpos",
",",
"vecBstSd",
",",
"vecBstR2",
"]",
"queOut",
".",
"put",
"(",
"lstOut",
")"
] | Function for finding best pRF model for voxel time course.
This function should be used if there is only one predictor. | [
"Function",
"for",
"finding",
"best",
"pRF",
"model",
"for",
"voxel",
"time",
"course",
".",
"This",
"function",
"should",
"be",
"used",
"if",
"there",
"is",
"only",
"one",
"predictor",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/simulation/pRF_functions.py#L627-L817 |
MSchnei/pyprf_feature | pyprf_feature/simulation/pRF_functions.py | funcFindPrfMltpPrdXVal | def funcFindPrfMltpPrdXVal(idxPrc,
aryFuncChnkTrn,
aryFuncChnkTst,
aryPrfMdlsTrnConv,
aryPrfMdlsTstConv,
aryMdls,
queOut):
"""
Function for finding best pRF model for voxel time course.
This function should be used if there are several predictors.
"""
# Number of voxels to be fitted in this chunk:
varNumVoxChnk = aryFuncChnkTrn.shape[0]
# Number of volumes:
varNumVolTrn = aryFuncChnkTrn.shape[2]
varNumVolTst = aryFuncChnkTst.shape[2]
# get number of cross validations
varNumXval = aryPrfMdlsTrnConv.shape[2]
# Vectors for pRF finding results [number-of-voxels times one]:
vecBstXpos = np.zeros(varNumVoxChnk)
vecBstYpos = np.zeros(varNumVoxChnk)
vecBstSd = np.zeros(varNumVoxChnk)
# vecBstR2 = np.zeros(varNumVoxChnk)
# Vector for temporary residuals values that are obtained during
# the different loops of cross validation
vecTmpResXVal = np.empty((varNumVoxChnk, varNumXval), dtype='float32')
# Vector for best residual values.
vecBstRes = np.add(np.zeros(varNumVoxChnk),
100000.0)
# Constant term for the model:
vecConstTrn = np.ones((varNumVolTrn), dtype=np.float32)
vecConstTst = np.ones((varNumVolTst), dtype=np.float32)
# Change type to float 32:
aryPrfMdlsTrnConv = aryPrfMdlsTrnConv.astype(np.float32)
aryPrfMdlsTstConv = aryPrfMdlsTstConv.astype(np.float32)
# Number of pRF models to fit:
varNumMdls = len(aryMdls)
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 0:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Vector with pRF values at which to give status feedback:
vecStatPrf = np.linspace(0,
varNumMdls,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrf = np.ceil(vecStatPrf)
vecStatPrf = vecStatPrf.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrc = np.ceil(vecStatPrc)
vecStatPrc = vecStatPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through pRF models:
for idxMdls in range(0, varNumMdls):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Status indicator:
if varCntSts02 == vecStatPrf[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatPrc[varCntSts01]) +
' % --- ' +
str(vecStatPrf[varCntSts01]) +
' pRF models out of ' +
str(varNumMdls))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# Loop through different cross validations
for idxXval in range(0, varNumXval):
# Current pRF time course model:
vecMdlTrn = aryPrfMdlsTrnConv[idxMdls, :, idxXval, :]
vecMdlTst = aryPrfMdlsTstConv[idxMdls, :, idxXval, :]
# We create a design matrix including the current pRF time
# course model, and a constant term:
aryDsgnTrn = np.vstack([vecMdlTrn,
vecConstTrn]).T
aryDsgnTst = np.vstack([vecMdlTst,
vecConstTst]).T
# Calculate the least-squares solution for all voxels
# and get parameter estimates from the training fit
aryTmpPrmEst = np.linalg.lstsq(aryDsgnTrn,
aryFuncChnkTrn[:, idxXval, :].T)[0]
# calculate predicted model fit based on training data
aryTmpMdlTc = np.dot(aryDsgnTst, aryTmpPrmEst)
# calculate residual sum of squares between test data and
# predicted model fit based on training data
vecTmpResXVal[:, idxXval] = np.sum(
(np.subtract(aryFuncChnkTst[:, idxXval, :].T,
aryTmpMdlTc))**2, axis=0)
vecTmpRes = np.mean(vecTmpResXVal, axis=1)
# Check whether current residuals are lower than previously
# calculated ones:
vecLgcTmpRes = np.less(vecTmpRes, vecBstRes)
# Replace best x and y position values, and SD values.
vecBstXpos[vecLgcTmpRes] = aryMdls[idxMdls][0]
vecBstYpos[vecLgcTmpRes] = aryMdls[idxMdls][1]
vecBstSd[vecLgcTmpRes] = aryMdls[idxMdls][2]
# Replace best residual values:
vecBstRes[vecLgcTmpRes] = vecTmpRes[vecLgcTmpRes]
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
]
queOut.put(lstOut) | python | def funcFindPrfMltpPrdXVal(idxPrc,
aryFuncChnkTrn,
aryFuncChnkTst,
aryPrfMdlsTrnConv,
aryPrfMdlsTstConv,
aryMdls,
queOut):
"""
Function for finding best pRF model for voxel time course.
This function should be used if there are several predictors.
"""
# Number of voxels to be fitted in this chunk:
varNumVoxChnk = aryFuncChnkTrn.shape[0]
# Number of volumes:
varNumVolTrn = aryFuncChnkTrn.shape[2]
varNumVolTst = aryFuncChnkTst.shape[2]
# get number of cross validations
varNumXval = aryPrfMdlsTrnConv.shape[2]
# Vectors for pRF finding results [number-of-voxels times one]:
vecBstXpos = np.zeros(varNumVoxChnk)
vecBstYpos = np.zeros(varNumVoxChnk)
vecBstSd = np.zeros(varNumVoxChnk)
# vecBstR2 = np.zeros(varNumVoxChnk)
# Vector for temporary residuals values that are obtained during
# the different loops of cross validation
vecTmpResXVal = np.empty((varNumVoxChnk, varNumXval), dtype='float32')
# Vector for best residual values.
vecBstRes = np.add(np.zeros(varNumVoxChnk),
100000.0)
# Constant term for the model:
vecConstTrn = np.ones((varNumVolTrn), dtype=np.float32)
vecConstTst = np.ones((varNumVolTst), dtype=np.float32)
# Change type to float 32:
aryPrfMdlsTrnConv = aryPrfMdlsTrnConv.astype(np.float32)
aryPrfMdlsTstConv = aryPrfMdlsTstConv.astype(np.float32)
# Number of pRF models to fit:
varNumMdls = len(aryMdls)
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 0:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Vector with pRF values at which to give status feedback:
vecStatPrf = np.linspace(0,
varNumMdls,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrf = np.ceil(vecStatPrf)
vecStatPrf = vecStatPrf.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrc = np.ceil(vecStatPrc)
vecStatPrc = vecStatPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through pRF models:
for idxMdls in range(0, varNumMdls):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Status indicator:
if varCntSts02 == vecStatPrf[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatPrc[varCntSts01]) +
' % --- ' +
str(vecStatPrf[varCntSts01]) +
' pRF models out of ' +
str(varNumMdls))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# Loop through different cross validations
for idxXval in range(0, varNumXval):
# Current pRF time course model:
vecMdlTrn = aryPrfMdlsTrnConv[idxMdls, :, idxXval, :]
vecMdlTst = aryPrfMdlsTstConv[idxMdls, :, idxXval, :]
# We create a design matrix including the current pRF time
# course model, and a constant term:
aryDsgnTrn = np.vstack([vecMdlTrn,
vecConstTrn]).T
aryDsgnTst = np.vstack([vecMdlTst,
vecConstTst]).T
# Calculate the least-squares solution for all voxels
# and get parameter estimates from the training fit
aryTmpPrmEst = np.linalg.lstsq(aryDsgnTrn,
aryFuncChnkTrn[:, idxXval, :].T)[0]
# calculate predicted model fit based on training data
aryTmpMdlTc = np.dot(aryDsgnTst, aryTmpPrmEst)
# calculate residual sum of squares between test data and
# predicted model fit based on training data
vecTmpResXVal[:, idxXval] = np.sum(
(np.subtract(aryFuncChnkTst[:, idxXval, :].T,
aryTmpMdlTc))**2, axis=0)
vecTmpRes = np.mean(vecTmpResXVal, axis=1)
# Check whether current residuals are lower than previously
# calculated ones:
vecLgcTmpRes = np.less(vecTmpRes, vecBstRes)
# Replace best x and y position values, and SD values.
vecBstXpos[vecLgcTmpRes] = aryMdls[idxMdls][0]
vecBstYpos[vecLgcTmpRes] = aryMdls[idxMdls][1]
vecBstSd[vecLgcTmpRes] = aryMdls[idxMdls][2]
# Replace best residual values:
vecBstRes[vecLgcTmpRes] = vecTmpRes[vecLgcTmpRes]
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
]
queOut.put(lstOut) | [
"def",
"funcFindPrfMltpPrdXVal",
"(",
"idxPrc",
",",
"aryFuncChnkTrn",
",",
"aryFuncChnkTst",
",",
"aryPrfMdlsTrnConv",
",",
"aryPrfMdlsTstConv",
",",
"aryMdls",
",",
"queOut",
")",
":",
"# Number of voxels to be fitted in this chunk:",
"varNumVoxChnk",
"=",
"aryFuncChnkTrn",
".",
"shape",
"[",
"0",
"]",
"# Number of volumes:",
"varNumVolTrn",
"=",
"aryFuncChnkTrn",
".",
"shape",
"[",
"2",
"]",
"varNumVolTst",
"=",
"aryFuncChnkTst",
".",
"shape",
"[",
"2",
"]",
"# get number of cross validations",
"varNumXval",
"=",
"aryPrfMdlsTrnConv",
".",
"shape",
"[",
"2",
"]",
"# Vectors for pRF finding results [number-of-voxels times one]:",
"vecBstXpos",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
"vecBstYpos",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
"vecBstSd",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
"# vecBstR2 = np.zeros(varNumVoxChnk)",
"# Vector for temporary residuals values that are obtained during",
"# the different loops of cross validation",
"vecTmpResXVal",
"=",
"np",
".",
"empty",
"(",
"(",
"varNumVoxChnk",
",",
"varNumXval",
")",
",",
"dtype",
"=",
"'float32'",
")",
"# Vector for best residual values.",
"vecBstRes",
"=",
"np",
".",
"add",
"(",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
",",
"100000.0",
")",
"# Constant term for the model:",
"vecConstTrn",
"=",
"np",
".",
"ones",
"(",
"(",
"varNumVolTrn",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"vecConstTst",
"=",
"np",
".",
"ones",
"(",
"(",
"varNumVolTst",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# Change type to float 32:",
"aryPrfMdlsTrnConv",
"=",
"aryPrfMdlsTrnConv",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"aryPrfMdlsTstConv",
"=",
"aryPrfMdlsTstConv",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"# Number of pRF models to fit:",
"varNumMdls",
"=",
"len",
"(",
"aryMdls",
")",
"# Prepare status indicator if this is the first of the parallel processes:",
"if",
"idxPrc",
"==",
"0",
":",
"# We create a status indicator for the time consuming pRF model finding",
"# algorithm. Number of steps of the status indicator:",
"varStsStpSze",
"=",
"20",
"# Vector with pRF values at which to give status feedback:",
"vecStatPrf",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"varNumMdls",
",",
"num",
"=",
"(",
"varStsStpSze",
"+",
"1",
")",
",",
"endpoint",
"=",
"True",
")",
"vecStatPrf",
"=",
"np",
".",
"ceil",
"(",
"vecStatPrf",
")",
"vecStatPrf",
"=",
"vecStatPrf",
".",
"astype",
"(",
"int",
")",
"# Vector with corresponding percentage values at which to give status",
"# feedback:",
"vecStatPrc",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"100",
",",
"num",
"=",
"(",
"varStsStpSze",
"+",
"1",
")",
",",
"endpoint",
"=",
"True",
")",
"vecStatPrc",
"=",
"np",
".",
"ceil",
"(",
"vecStatPrc",
")",
"vecStatPrc",
"=",
"vecStatPrc",
".",
"astype",
"(",
"int",
")",
"# Counter for status indicator:",
"varCntSts01",
"=",
"0",
"varCntSts02",
"=",
"0",
"# Loop through pRF models:",
"for",
"idxMdls",
"in",
"range",
"(",
"0",
",",
"varNumMdls",
")",
":",
"# Status indicator (only used in the first of the parallel",
"# processes):",
"if",
"idxPrc",
"==",
"0",
":",
"# Status indicator:",
"if",
"varCntSts02",
"==",
"vecStatPrf",
"[",
"varCntSts01",
"]",
":",
"# Prepare status message:",
"strStsMsg",
"=",
"(",
"'---------Progress: '",
"+",
"str",
"(",
"vecStatPrc",
"[",
"varCntSts01",
"]",
")",
"+",
"' % --- '",
"+",
"str",
"(",
"vecStatPrf",
"[",
"varCntSts01",
"]",
")",
"+",
"' pRF models out of '",
"+",
"str",
"(",
"varNumMdls",
")",
")",
"print",
"(",
"strStsMsg",
")",
"# Only increment counter if the last value has not been",
"# reached yet:",
"if",
"varCntSts01",
"<",
"varStsStpSze",
":",
"varCntSts01",
"=",
"varCntSts01",
"+",
"int",
"(",
"1",
")",
"# Loop through different cross validations",
"for",
"idxXval",
"in",
"range",
"(",
"0",
",",
"varNumXval",
")",
":",
"# Current pRF time course model:",
"vecMdlTrn",
"=",
"aryPrfMdlsTrnConv",
"[",
"idxMdls",
",",
":",
",",
"idxXval",
",",
":",
"]",
"vecMdlTst",
"=",
"aryPrfMdlsTstConv",
"[",
"idxMdls",
",",
":",
",",
"idxXval",
",",
":",
"]",
"# We create a design matrix including the current pRF time",
"# course model, and a constant term:",
"aryDsgnTrn",
"=",
"np",
".",
"vstack",
"(",
"[",
"vecMdlTrn",
",",
"vecConstTrn",
"]",
")",
".",
"T",
"aryDsgnTst",
"=",
"np",
".",
"vstack",
"(",
"[",
"vecMdlTst",
",",
"vecConstTst",
"]",
")",
".",
"T",
"# Calculate the least-squares solution for all voxels",
"# and get parameter estimates from the training fit",
"aryTmpPrmEst",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"aryDsgnTrn",
",",
"aryFuncChnkTrn",
"[",
":",
",",
"idxXval",
",",
":",
"]",
".",
"T",
")",
"[",
"0",
"]",
"# calculate predicted model fit based on training data",
"aryTmpMdlTc",
"=",
"np",
".",
"dot",
"(",
"aryDsgnTst",
",",
"aryTmpPrmEst",
")",
"# calculate residual sum of squares between test data and",
"# predicted model fit based on training data",
"vecTmpResXVal",
"[",
":",
",",
"idxXval",
"]",
"=",
"np",
".",
"sum",
"(",
"(",
"np",
".",
"subtract",
"(",
"aryFuncChnkTst",
"[",
":",
",",
"idxXval",
",",
":",
"]",
".",
"T",
",",
"aryTmpMdlTc",
")",
")",
"**",
"2",
",",
"axis",
"=",
"0",
")",
"vecTmpRes",
"=",
"np",
".",
"mean",
"(",
"vecTmpResXVal",
",",
"axis",
"=",
"1",
")",
"# Check whether current residuals are lower than previously",
"# calculated ones:",
"vecLgcTmpRes",
"=",
"np",
".",
"less",
"(",
"vecTmpRes",
",",
"vecBstRes",
")",
"# Replace best x and y position values, and SD values.",
"vecBstXpos",
"[",
"vecLgcTmpRes",
"]",
"=",
"aryMdls",
"[",
"idxMdls",
"]",
"[",
"0",
"]",
"vecBstYpos",
"[",
"vecLgcTmpRes",
"]",
"=",
"aryMdls",
"[",
"idxMdls",
"]",
"[",
"1",
"]",
"vecBstSd",
"[",
"vecLgcTmpRes",
"]",
"=",
"aryMdls",
"[",
"idxMdls",
"]",
"[",
"2",
"]",
"# Replace best residual values:",
"vecBstRes",
"[",
"vecLgcTmpRes",
"]",
"=",
"vecTmpRes",
"[",
"vecLgcTmpRes",
"]",
"# Status indicator (only used in the first of the parallel",
"# processes):",
"if",
"idxPrc",
"==",
"0",
":",
"# Increment status indicator counter:",
"varCntSts02",
"=",
"varCntSts02",
"+",
"1",
"# Output list:",
"lstOut",
"=",
"[",
"idxPrc",
",",
"vecBstXpos",
",",
"vecBstYpos",
",",
"vecBstSd",
",",
"]",
"queOut",
".",
"put",
"(",
"lstOut",
")"
] | Function for finding best pRF model for voxel time course.
This function should be used if there are several predictors. | [
"Function",
"for",
"finding",
"best",
"pRF",
"model",
"for",
"voxel",
"time",
"course",
".",
"This",
"function",
"should",
"be",
"used",
"if",
"there",
"are",
"several",
"predictors",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/simulation/pRF_functions.py#L1015-L1168 |
pmacosta/pcsv | pcsv/concatenate.py | concatenate | def concatenate(
fname1,
fname2,
dfilter1=None,
dfilter2=None,
has_header1=True,
has_header2=True,
frow1=0,
frow2=0,
ofname=None,
ocols=None,
):
r"""
Concatenate two comma-separated values file.
Data rows from the second file are appended at the end of the data rows
from the first file
:param fname1: Name of the first comma-separated values file, the file
whose data appears first in the output file
:type fname1: FileNameExists_
:param fname2: Name of the second comma-separated values file, the file
whose data appears last in the output file
:type fname2: FileNameExists_
:param dfilter1: Row and/or column filter for the first file. If None no
data filtering is done on the file
:type dfilter1: :ref:`CsvDataFilter` or None
:param dfilter2: Row and/or column filter for the second file. If None no
data filtering is done on the file
:type dfilter2: :ref:`CsvDataFilter` or None
:param has_header1: Flag that indicates whether the first comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header1: boolean
:param has_header2: Flag that indicates whether the second comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header2: boolean
:param frow1: First comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow1: NonNegativeInteger_
:param frow2: Second comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow2: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the data from the first and second files.
If None the first file is replaced "in place"
:type ofname: FileName_ or None
:param ocols: Column names of the output comma-separated values file.
If None the column names in the first file are used if
**has_header1** is True or the column names in the second
files are used if **has_header1** is False and
**has_header2** is True, otherwise no header is used
:type ocols: list or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.concatenate.concatenate
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`dfilter1\` is not valid)
* RuntimeError (Argument \`dfilter2\` is not valid)
* RuntimeError (Argument \`fname1\` is not valid)
* RuntimeError (Argument \`fname2\` is not valid)
* RuntimeError (Argument \`frow1\` is not valid)
* RuntimeError (Argument \`frow2\` is not valid)
* RuntimeError (Argument \`ocols\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Files have different number of columns)
* RuntimeError (Invalid column specification)
* RuntimeError (Number of columns in data files and output columns are
different)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
# pylint: disable=R0913,R0914
iro = pexdoc.exh.addex(RuntimeError, "Files have different number of columns")
iom = pexdoc.exh.addex(
RuntimeError, "Number of columns in data files and output columns are different"
)
# Read and validate file 1
obj1 = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1)
# Read and validate file 2
obj2 = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2)
# Assign output data structure
ofname = fname1 if ofname is None else ofname
# Create new header
if (ocols is None) and has_header1:
ocols = [obj1.header()] if obj1.cfilter is None else [obj1.cfilter]
elif (ocols is None) and has_header2:
ocols = [obj2.header()] if obj2.cfilter is None else [obj2.cfilter]
elif ocols is None:
ocols = []
else:
iom((obj1.cfilter is not None) and (len(obj1.cfilter) != len(ocols)))
ocols = [ocols]
# Miscellaneous data validation
iro(_C(obj1.cfilter, obj2.cfilter) and (len(obj1.cfilter) != len(obj2.cfilter)))
# Write final output
data = ocols + obj1.data(filtered=True) + obj2.data(filtered=True)
write(fname=ofname, data=data, append=False) | python | def concatenate(
fname1,
fname2,
dfilter1=None,
dfilter2=None,
has_header1=True,
has_header2=True,
frow1=0,
frow2=0,
ofname=None,
ocols=None,
):
r"""
Concatenate two comma-separated values file.
Data rows from the second file are appended at the end of the data rows
from the first file
:param fname1: Name of the first comma-separated values file, the file
whose data appears first in the output file
:type fname1: FileNameExists_
:param fname2: Name of the second comma-separated values file, the file
whose data appears last in the output file
:type fname2: FileNameExists_
:param dfilter1: Row and/or column filter for the first file. If None no
data filtering is done on the file
:type dfilter1: :ref:`CsvDataFilter` or None
:param dfilter2: Row and/or column filter for the second file. If None no
data filtering is done on the file
:type dfilter2: :ref:`CsvDataFilter` or None
:param has_header1: Flag that indicates whether the first comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header1: boolean
:param has_header2: Flag that indicates whether the second comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header2: boolean
:param frow1: First comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow1: NonNegativeInteger_
:param frow2: Second comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow2: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the data from the first and second files.
If None the first file is replaced "in place"
:type ofname: FileName_ or None
:param ocols: Column names of the output comma-separated values file.
If None the column names in the first file are used if
**has_header1** is True or the column names in the second
files are used if **has_header1** is False and
**has_header2** is True, otherwise no header is used
:type ocols: list or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.concatenate.concatenate
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`dfilter1\` is not valid)
* RuntimeError (Argument \`dfilter2\` is not valid)
* RuntimeError (Argument \`fname1\` is not valid)
* RuntimeError (Argument \`fname2\` is not valid)
* RuntimeError (Argument \`frow1\` is not valid)
* RuntimeError (Argument \`frow2\` is not valid)
* RuntimeError (Argument \`ocols\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Files have different number of columns)
* RuntimeError (Invalid column specification)
* RuntimeError (Number of columns in data files and output columns are
different)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
# pylint: disable=R0913,R0914
iro = pexdoc.exh.addex(RuntimeError, "Files have different number of columns")
iom = pexdoc.exh.addex(
RuntimeError, "Number of columns in data files and output columns are different"
)
# Read and validate file 1
obj1 = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1)
# Read and validate file 2
obj2 = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2)
# Assign output data structure
ofname = fname1 if ofname is None else ofname
# Create new header
if (ocols is None) and has_header1:
ocols = [obj1.header()] if obj1.cfilter is None else [obj1.cfilter]
elif (ocols is None) and has_header2:
ocols = [obj2.header()] if obj2.cfilter is None else [obj2.cfilter]
elif ocols is None:
ocols = []
else:
iom((obj1.cfilter is not None) and (len(obj1.cfilter) != len(ocols)))
ocols = [ocols]
# Miscellaneous data validation
iro(_C(obj1.cfilter, obj2.cfilter) and (len(obj1.cfilter) != len(obj2.cfilter)))
# Write final output
data = ocols + obj1.data(filtered=True) + obj2.data(filtered=True)
write(fname=ofname, data=data, append=False) | [
"def",
"concatenate",
"(",
"fname1",
",",
"fname2",
",",
"dfilter1",
"=",
"None",
",",
"dfilter2",
"=",
"None",
",",
"has_header1",
"=",
"True",
",",
"has_header2",
"=",
"True",
",",
"frow1",
"=",
"0",
",",
"frow2",
"=",
"0",
",",
"ofname",
"=",
"None",
",",
"ocols",
"=",
"None",
",",
")",
":",
"# pylint: disable=R0913,R0914",
"iro",
"=",
"pexdoc",
".",
"exh",
".",
"addex",
"(",
"RuntimeError",
",",
"\"Files have different number of columns\"",
")",
"iom",
"=",
"pexdoc",
".",
"exh",
".",
"addex",
"(",
"RuntimeError",
",",
"\"Number of columns in data files and output columns are different\"",
")",
"# Read and validate file 1",
"obj1",
"=",
"CsvFile",
"(",
"fname",
"=",
"fname1",
",",
"dfilter",
"=",
"dfilter1",
",",
"has_header",
"=",
"has_header1",
",",
"frow",
"=",
"frow1",
")",
"# Read and validate file 2",
"obj2",
"=",
"CsvFile",
"(",
"fname",
"=",
"fname2",
",",
"dfilter",
"=",
"dfilter2",
",",
"has_header",
"=",
"has_header2",
",",
"frow",
"=",
"frow2",
")",
"# Assign output data structure",
"ofname",
"=",
"fname1",
"if",
"ofname",
"is",
"None",
"else",
"ofname",
"# Create new header",
"if",
"(",
"ocols",
"is",
"None",
")",
"and",
"has_header1",
":",
"ocols",
"=",
"[",
"obj1",
".",
"header",
"(",
")",
"]",
"if",
"obj1",
".",
"cfilter",
"is",
"None",
"else",
"[",
"obj1",
".",
"cfilter",
"]",
"elif",
"(",
"ocols",
"is",
"None",
")",
"and",
"has_header2",
":",
"ocols",
"=",
"[",
"obj2",
".",
"header",
"(",
")",
"]",
"if",
"obj2",
".",
"cfilter",
"is",
"None",
"else",
"[",
"obj2",
".",
"cfilter",
"]",
"elif",
"ocols",
"is",
"None",
":",
"ocols",
"=",
"[",
"]",
"else",
":",
"iom",
"(",
"(",
"obj1",
".",
"cfilter",
"is",
"not",
"None",
")",
"and",
"(",
"len",
"(",
"obj1",
".",
"cfilter",
")",
"!=",
"len",
"(",
"ocols",
")",
")",
")",
"ocols",
"=",
"[",
"ocols",
"]",
"# Miscellaneous data validation",
"iro",
"(",
"_C",
"(",
"obj1",
".",
"cfilter",
",",
"obj2",
".",
"cfilter",
")",
"and",
"(",
"len",
"(",
"obj1",
".",
"cfilter",
")",
"!=",
"len",
"(",
"obj2",
".",
"cfilter",
")",
")",
")",
"# Write final output",
"data",
"=",
"ocols",
"+",
"obj1",
".",
"data",
"(",
"filtered",
"=",
"True",
")",
"+",
"obj2",
".",
"data",
"(",
"filtered",
"=",
"True",
")",
"write",
"(",
"fname",
"=",
"ofname",
",",
"data",
"=",
"data",
",",
"append",
"=",
"False",
")"
] | r"""
Concatenate two comma-separated values file.
Data rows from the second file are appended at the end of the data rows
from the first file
:param fname1: Name of the first comma-separated values file, the file
whose data appears first in the output file
:type fname1: FileNameExists_
:param fname2: Name of the second comma-separated values file, the file
whose data appears last in the output file
:type fname2: FileNameExists_
:param dfilter1: Row and/or column filter for the first file. If None no
data filtering is done on the file
:type dfilter1: :ref:`CsvDataFilter` or None
:param dfilter2: Row and/or column filter for the second file. If None no
data filtering is done on the file
:type dfilter2: :ref:`CsvDataFilter` or None
:param has_header1: Flag that indicates whether the first comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header1: boolean
:param has_header2: Flag that indicates whether the second comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header2: boolean
:param frow1: First comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow1: NonNegativeInteger_
:param frow2: Second comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow2: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the data from the first and second files.
If None the first file is replaced "in place"
:type ofname: FileName_ or None
:param ocols: Column names of the output comma-separated values file.
If None the column names in the first file are used if
**has_header1** is True or the column names in the second
files are used if **has_header1** is False and
**has_header2** is True, otherwise no header is used
:type ocols: list or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.concatenate.concatenate
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`dfilter1\` is not valid)
* RuntimeError (Argument \`dfilter2\` is not valid)
* RuntimeError (Argument \`fname1\` is not valid)
* RuntimeError (Argument \`fname2\` is not valid)
* RuntimeError (Argument \`frow1\` is not valid)
* RuntimeError (Argument \`frow2\` is not valid)
* RuntimeError (Argument \`ocols\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Files have different number of columns)
* RuntimeError (Invalid column specification)
* RuntimeError (Number of columns in data files and output columns are
different)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]] | [
"r",
"Concatenate",
"two",
"comma",
"-",
"separated",
"values",
"file",
"."
] | train | https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/concatenate.py#L46-L179 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/images.py | move_images_to_cache | def move_images_to_cache(source, destination):
"""
Handles the movement of images to the cache. Must be helpful if it finds
that the folder for this article already exists.
"""
if os.path.isdir(destination):
log.debug('Cached images for this article already exist')
return
else:
log.debug('Cache location: {0}'.format(destination))
try:
shutil.copytree(source, destination)
except:
log.exception('Images could not be moved to cache')
else:
log.info('Moved images to cache'.format(destination)) | python | def move_images_to_cache(source, destination):
"""
Handles the movement of images to the cache. Must be helpful if it finds
that the folder for this article already exists.
"""
if os.path.isdir(destination):
log.debug('Cached images for this article already exist')
return
else:
log.debug('Cache location: {0}'.format(destination))
try:
shutil.copytree(source, destination)
except:
log.exception('Images could not be moved to cache')
else:
log.info('Moved images to cache'.format(destination)) | [
"def",
"move_images_to_cache",
"(",
"source",
",",
"destination",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"destination",
")",
":",
"log",
".",
"debug",
"(",
"'Cached images for this article already exist'",
")",
"return",
"else",
":",
"log",
".",
"debug",
"(",
"'Cache location: {0}'",
".",
"format",
"(",
"destination",
")",
")",
"try",
":",
"shutil",
".",
"copytree",
"(",
"source",
",",
"destination",
")",
"except",
":",
"log",
".",
"exception",
"(",
"'Images could not be moved to cache'",
")",
"else",
":",
"log",
".",
"info",
"(",
"'Moved images to cache'",
".",
"format",
"(",
"destination",
")",
")"
] | Handles the movement of images to the cache. Must be helpful if it finds
that the folder for this article already exists. | [
"Handles",
"the",
"movement",
"of",
"images",
"to",
"the",
"cache",
".",
"Must",
"be",
"helpful",
"if",
"it",
"finds",
"that",
"the",
"folder",
"for",
"this",
"article",
"already",
"exists",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/images.py#L19-L34 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/images.py | explicit_images | def explicit_images(images, image_destination, rootname, config):
"""
The method used to handle an explicitly defined image directory by the
user as a parsed argument.
"""
log.info('Explicit image directory specified: {0}'.format(images))
if '*' in images:
images = images.replace('*', rootname)
log.debug('Wildcard expansion for image directory: {0}'.format(images))
try:
shutil.copytree(images, image_destination)
except:
#The following is basically a recipe for log.exception() but with a
#CRITICAL level if the execution should be killed immediately
#log.critical('Unable to copy from indicated directory', exc_info=True)
log.exception('Unable to copy from indicated directory')
return False
else:
return True | python | def explicit_images(images, image_destination, rootname, config):
"""
The method used to handle an explicitly defined image directory by the
user as a parsed argument.
"""
log.info('Explicit image directory specified: {0}'.format(images))
if '*' in images:
images = images.replace('*', rootname)
log.debug('Wildcard expansion for image directory: {0}'.format(images))
try:
shutil.copytree(images, image_destination)
except:
#The following is basically a recipe for log.exception() but with a
#CRITICAL level if the execution should be killed immediately
#log.critical('Unable to copy from indicated directory', exc_info=True)
log.exception('Unable to copy from indicated directory')
return False
else:
return True | [
"def",
"explicit_images",
"(",
"images",
",",
"image_destination",
",",
"rootname",
",",
"config",
")",
":",
"log",
".",
"info",
"(",
"'Explicit image directory specified: {0}'",
".",
"format",
"(",
"images",
")",
")",
"if",
"'*'",
"in",
"images",
":",
"images",
"=",
"images",
".",
"replace",
"(",
"'*'",
",",
"rootname",
")",
"log",
".",
"debug",
"(",
"'Wildcard expansion for image directory: {0}'",
".",
"format",
"(",
"images",
")",
")",
"try",
":",
"shutil",
".",
"copytree",
"(",
"images",
",",
"image_destination",
")",
"except",
":",
"#The following is basically a recipe for log.exception() but with a",
"#CRITICAL level if the execution should be killed immediately",
"#log.critical('Unable to copy from indicated directory', exc_info=True)",
"log",
".",
"exception",
"(",
"'Unable to copy from indicated directory'",
")",
"return",
"False",
"else",
":",
"return",
"True"
] | The method used to handle an explicitly defined image directory by the
user as a parsed argument. | [
"The",
"method",
"used",
"to",
"handle",
"an",
"explicitly",
"defined",
"image",
"directory",
"by",
"the",
"user",
"as",
"a",
"parsed",
"argument",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/images.py#L37-L55 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/images.py | input_relative_images | def input_relative_images(input_path, image_destination, rootname, config):
"""
The method used to handle Input-Relative image inclusion.
"""
log.debug('Looking for input relative images')
input_dirname = os.path.dirname(input_path)
for path in config.input_relative_images:
if '*' in path:
path = path.replace('*', rootname)
log.debug('Wildcard expansion for image directory: {0}'.format(path))
images = os.path.normpath(os.path.join(input_dirname, path))
if os.path.isdir(images):
log.info('Input-Relative image directory found: {0}'.format(images))
shutil.copytree(images, image_destination)
return True
return False | python | def input_relative_images(input_path, image_destination, rootname, config):
"""
The method used to handle Input-Relative image inclusion.
"""
log.debug('Looking for input relative images')
input_dirname = os.path.dirname(input_path)
for path in config.input_relative_images:
if '*' in path:
path = path.replace('*', rootname)
log.debug('Wildcard expansion for image directory: {0}'.format(path))
images = os.path.normpath(os.path.join(input_dirname, path))
if os.path.isdir(images):
log.info('Input-Relative image directory found: {0}'.format(images))
shutil.copytree(images, image_destination)
return True
return False | [
"def",
"input_relative_images",
"(",
"input_path",
",",
"image_destination",
",",
"rootname",
",",
"config",
")",
":",
"log",
".",
"debug",
"(",
"'Looking for input relative images'",
")",
"input_dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"input_path",
")",
"for",
"path",
"in",
"config",
".",
"input_relative_images",
":",
"if",
"'*'",
"in",
"path",
":",
"path",
"=",
"path",
".",
"replace",
"(",
"'*'",
",",
"rootname",
")",
"log",
".",
"debug",
"(",
"'Wildcard expansion for image directory: {0}'",
".",
"format",
"(",
"path",
")",
")",
"images",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"input_dirname",
",",
"path",
")",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"images",
")",
":",
"log",
".",
"info",
"(",
"'Input-Relative image directory found: {0}'",
".",
"format",
"(",
"images",
")",
")",
"shutil",
".",
"copytree",
"(",
"images",
",",
"image_destination",
")",
"return",
"True",
"return",
"False"
] | The method used to handle Input-Relative image inclusion. | [
"The",
"method",
"used",
"to",
"handle",
"Input",
"-",
"Relative",
"image",
"inclusion",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/images.py#L58-L73 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/images.py | image_cache | def image_cache(article_cache, img_dir):
"""
The method to be used by get_images() for copying images out of the cache.
"""
log.debug('Looking for image directory in the cache')
if os.path.isdir(article_cache):
log.info('Cached image directory found: {0}'.format(article_cache))
shutil.copytree(article_cache, img_dir)
return True
return False | python | def image_cache(article_cache, img_dir):
"""
The method to be used by get_images() for copying images out of the cache.
"""
log.debug('Looking for image directory in the cache')
if os.path.isdir(article_cache):
log.info('Cached image directory found: {0}'.format(article_cache))
shutil.copytree(article_cache, img_dir)
return True
return False | [
"def",
"image_cache",
"(",
"article_cache",
",",
"img_dir",
")",
":",
"log",
".",
"debug",
"(",
"'Looking for image directory in the cache'",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"article_cache",
")",
":",
"log",
".",
"info",
"(",
"'Cached image directory found: {0}'",
".",
"format",
"(",
"article_cache",
")",
")",
"shutil",
".",
"copytree",
"(",
"article_cache",
",",
"img_dir",
")",
"return",
"True",
"return",
"False"
] | The method to be used by get_images() for copying images out of the cache. | [
"The",
"method",
"to",
"be",
"used",
"by",
"get_images",
"()",
"for",
"copying",
"images",
"out",
"of",
"the",
"cache",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/images.py#L76-L85 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/images.py | get_images | def get_images(output_directory, explicit, input_path, config, parsed_article):
"""
Main logic controller for the placement of images into the output directory
Controlling logic for placement of the appropriate imager files into the
EPUB directory. This function interacts with interface arguments as well as
the local installation config.py file. These may change behavior of this
function in terms of how it looks for images relative to the input, where it
finds explicit images, whether it will attempt to download images, and
whether successfully downloaded images will be stored in the cache.
Parameters
----------
output_directory : str
The directory path where the EPUB is being constructed/output
explicit : str
A directory path to a user specified directory of images. Allows *
wildcard expansion.
input_path : str
The absolute path to the input XML file.
config : config module
The imported configuration module
parsed_article : openaccess_epub.article.Article object
The Article instance for the article being converted to EPUB
"""
#Split the DOI
journal_doi, article_doi = parsed_article.doi.split('/')
log.debug('journal-doi : {0}'.format(journal_doi))
log.debug('article-doi : {0}'.format(article_doi))
#Get the rootname for wildcard expansion
rootname = utils.file_root_name(input_path)
#Specify where to place the images in the output
img_dir = os.path.join(output_directory,
'EPUB',
'images-{0}'.format(article_doi))
log.info('Using {0} as image directory target'.format(img_dir))
#Construct path to cache for article
article_cache = os.path.join(config.image_cache, journal_doi, article_doi)
#Use manual image directory, explicit images
if explicit:
success = explicit_images(explicit, img_dir, rootname, config)
if success and config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
#Explicit images prevents all other image methods
return success
#Input-Relative import, looks for any one of the listed options
if config.use_input_relative_images:
#Prevents other image methods only if successful
if input_relative_images(input_path, img_dir, rootname, config):
if config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
return True
#Use cache for article if it exists
if config.use_image_cache:
#Prevents other image methods only if successful
if image_cache(article_cache, img_dir):
return True
#Download images from Internet
if config.use_image_fetching:
os.mkdir(img_dir)
if journal_doi == '10.3389':
fetch_frontiers_images(article_doi, img_dir)
if config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
return True
elif journal_doi == '10.1371':
success = fetch_plos_images(article_doi, img_dir, parsed_article)
if success and config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
return success
else:
log.error('Fetching images for this publisher is not supported!')
return False
return False | python | def get_images(output_directory, explicit, input_path, config, parsed_article):
"""
Main logic controller for the placement of images into the output directory
Controlling logic for placement of the appropriate imager files into the
EPUB directory. This function interacts with interface arguments as well as
the local installation config.py file. These may change behavior of this
function in terms of how it looks for images relative to the input, where it
finds explicit images, whether it will attempt to download images, and
whether successfully downloaded images will be stored in the cache.
Parameters
----------
output_directory : str
The directory path where the EPUB is being constructed/output
explicit : str
A directory path to a user specified directory of images. Allows *
wildcard expansion.
input_path : str
The absolute path to the input XML file.
config : config module
The imported configuration module
parsed_article : openaccess_epub.article.Article object
The Article instance for the article being converted to EPUB
"""
#Split the DOI
journal_doi, article_doi = parsed_article.doi.split('/')
log.debug('journal-doi : {0}'.format(journal_doi))
log.debug('article-doi : {0}'.format(article_doi))
#Get the rootname for wildcard expansion
rootname = utils.file_root_name(input_path)
#Specify where to place the images in the output
img_dir = os.path.join(output_directory,
'EPUB',
'images-{0}'.format(article_doi))
log.info('Using {0} as image directory target'.format(img_dir))
#Construct path to cache for article
article_cache = os.path.join(config.image_cache, journal_doi, article_doi)
#Use manual image directory, explicit images
if explicit:
success = explicit_images(explicit, img_dir, rootname, config)
if success and config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
#Explicit images prevents all other image methods
return success
#Input-Relative import, looks for any one of the listed options
if config.use_input_relative_images:
#Prevents other image methods only if successful
if input_relative_images(input_path, img_dir, rootname, config):
if config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
return True
#Use cache for article if it exists
if config.use_image_cache:
#Prevents other image methods only if successful
if image_cache(article_cache, img_dir):
return True
#Download images from Internet
if config.use_image_fetching:
os.mkdir(img_dir)
if journal_doi == '10.3389':
fetch_frontiers_images(article_doi, img_dir)
if config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
return True
elif journal_doi == '10.1371':
success = fetch_plos_images(article_doi, img_dir, parsed_article)
if success and config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
return success
else:
log.error('Fetching images for this publisher is not supported!')
return False
return False | [
"def",
"get_images",
"(",
"output_directory",
",",
"explicit",
",",
"input_path",
",",
"config",
",",
"parsed_article",
")",
":",
"#Split the DOI",
"journal_doi",
",",
"article_doi",
"=",
"parsed_article",
".",
"doi",
".",
"split",
"(",
"'/'",
")",
"log",
".",
"debug",
"(",
"'journal-doi : {0}'",
".",
"format",
"(",
"journal_doi",
")",
")",
"log",
".",
"debug",
"(",
"'article-doi : {0}'",
".",
"format",
"(",
"article_doi",
")",
")",
"#Get the rootname for wildcard expansion",
"rootname",
"=",
"utils",
".",
"file_root_name",
"(",
"input_path",
")",
"#Specify where to place the images in the output",
"img_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"'EPUB'",
",",
"'images-{0}'",
".",
"format",
"(",
"article_doi",
")",
")",
"log",
".",
"info",
"(",
"'Using {0} as image directory target'",
".",
"format",
"(",
"img_dir",
")",
")",
"#Construct path to cache for article",
"article_cache",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"image_cache",
",",
"journal_doi",
",",
"article_doi",
")",
"#Use manual image directory, explicit images",
"if",
"explicit",
":",
"success",
"=",
"explicit_images",
"(",
"explicit",
",",
"img_dir",
",",
"rootname",
",",
"config",
")",
"if",
"success",
"and",
"config",
".",
"use_image_cache",
":",
"move_images_to_cache",
"(",
"img_dir",
",",
"article_cache",
")",
"#Explicit images prevents all other image methods",
"return",
"success",
"#Input-Relative import, looks for any one of the listed options",
"if",
"config",
".",
"use_input_relative_images",
":",
"#Prevents other image methods only if successful",
"if",
"input_relative_images",
"(",
"input_path",
",",
"img_dir",
",",
"rootname",
",",
"config",
")",
":",
"if",
"config",
".",
"use_image_cache",
":",
"move_images_to_cache",
"(",
"img_dir",
",",
"article_cache",
")",
"return",
"True",
"#Use cache for article if it exists",
"if",
"config",
".",
"use_image_cache",
":",
"#Prevents other image methods only if successful",
"if",
"image_cache",
"(",
"article_cache",
",",
"img_dir",
")",
":",
"return",
"True",
"#Download images from Internet",
"if",
"config",
".",
"use_image_fetching",
":",
"os",
".",
"mkdir",
"(",
"img_dir",
")",
"if",
"journal_doi",
"==",
"'10.3389'",
":",
"fetch_frontiers_images",
"(",
"article_doi",
",",
"img_dir",
")",
"if",
"config",
".",
"use_image_cache",
":",
"move_images_to_cache",
"(",
"img_dir",
",",
"article_cache",
")",
"return",
"True",
"elif",
"journal_doi",
"==",
"'10.1371'",
":",
"success",
"=",
"fetch_plos_images",
"(",
"article_doi",
",",
"img_dir",
",",
"parsed_article",
")",
"if",
"success",
"and",
"config",
".",
"use_image_cache",
":",
"move_images_to_cache",
"(",
"img_dir",
",",
"article_cache",
")",
"return",
"success",
"else",
":",
"log",
".",
"error",
"(",
"'Fetching images for this publisher is not supported!'",
")",
"return",
"False",
"return",
"False"
] | Main logic controller for the placement of images into the output directory
Controlling logic for placement of the appropriate imager files into the
EPUB directory. This function interacts with interface arguments as well as
the local installation config.py file. These may change behavior of this
function in terms of how it looks for images relative to the input, where it
finds explicit images, whether it will attempt to download images, and
whether successfully downloaded images will be stored in the cache.
Parameters
----------
output_directory : str
The directory path where the EPUB is being constructed/output
explicit : str
A directory path to a user specified directory of images. Allows *
wildcard expansion.
input_path : str
The absolute path to the input XML file.
config : config module
The imported configuration module
parsed_article : openaccess_epub.article.Article object
The Article instance for the article being converted to EPUB | [
"Main",
"logic",
"controller",
"for",
"the",
"placement",
"of",
"images",
"into",
"the",
"output",
"directory"
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/images.py#L88-L168 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/images.py | make_image_cache | def make_image_cache(img_cache):
"""
Initiates the image cache if it does not exist
"""
log.info('Initiating the image cache at {0}'.format(img_cache))
if not os.path.isdir(img_cache):
utils.mkdir_p(img_cache)
utils.mkdir_p(os.path.join(img_cache, '10.1371'))
utils.mkdir_p(os.path.join(img_cache, '10.3389')) | python | def make_image_cache(img_cache):
"""
Initiates the image cache if it does not exist
"""
log.info('Initiating the image cache at {0}'.format(img_cache))
if not os.path.isdir(img_cache):
utils.mkdir_p(img_cache)
utils.mkdir_p(os.path.join(img_cache, '10.1371'))
utils.mkdir_p(os.path.join(img_cache, '10.3389')) | [
"def",
"make_image_cache",
"(",
"img_cache",
")",
":",
"log",
".",
"info",
"(",
"'Initiating the image cache at {0}'",
".",
"format",
"(",
"img_cache",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"img_cache",
")",
":",
"utils",
".",
"mkdir_p",
"(",
"img_cache",
")",
"utils",
".",
"mkdir_p",
"(",
"os",
".",
"path",
".",
"join",
"(",
"img_cache",
",",
"'10.1371'",
")",
")",
"utils",
".",
"mkdir_p",
"(",
"os",
".",
"path",
".",
"join",
"(",
"img_cache",
",",
"'10.3389'",
")",
")"
] | Initiates the image cache if it does not exist | [
"Initiates",
"the",
"image",
"cache",
"if",
"it",
"does",
"not",
"exist"
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/images.py#L171-L179 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/images.py | fetch_frontiers_images | def fetch_frontiers_images(doi, output_dir):
"""
Fetch the images from Frontiers' website. This method may fail to properly
locate all the images and should be avoided if the files can be accessed
locally. Downloading the images to an appropriate directory in the cache,
or to a directory specified by passed argument are the preferred means to
access images.
"""
log.info('Fetching Frontiers images')
log.warning('This method may fail to locate all images.')
def download_image(fetch, img_file):
try:
image = urllib.request.urlopen(fetch)
except urllib.error.HTTPError as e:
if e.code == 503: # Server overloaded
time.sleep(1) # Wait one second
try:
image = urllib.request.urlopen(fetch)
except:
return None
elif e.code == 500:
print('urllib.error.HTTPError {0}'.format(e.code))
return None
else:
with open(img_file, 'wb') as outimage:
outimage.write(image.read())
return True
def check_equation_completion(images):
"""
In some cases, equations images are not exposed in the fulltext (hidden
behind a rasterized table). This attempts to look for gaps and fix them
"""
log.info('Checking for complete equations')
files = os.listdir(output_dir)
inline_equations = []
for e in files:
if e[0] == 'i':
inline_equations.append(e)
missing = []
highest = 0
if inline_equations:
inline_equations.sort()
highest = int(inline_equations[-1][1:4])
i = 1
while i < highest:
name = 'i{0}.gif'.format(str(i).zfill(3))
if name not in inline_equations:
missing.append(name)
i += 1
get = images[0][:-8]
for m in missing:
loc = os.path.join(output_dir, m)
download_image(get + m, loc)
print('Downloaded image {0}'.format(loc))
#It is possible that we need to go further than the highest
highest += 1
name = 'i{0}.gif'.format(str(highest).zfill(3))
loc = os.path.join(output_dir, name)
while download_image(get + name, loc):
print('Downloaded image {0}'.format(loc))
highest += 1
name = 'i{0}.gif'.format(str(highest).zfill(3))
print('Processing images for {0}...'.format(doi))
#We use the DOI of the article to locate the page.
doistr = 'http://dx.doi.org/{0}'.format(doi)
logging.debug('Accessing DOI address-{0}'.format(doistr))
page = urllib.request.urlopen(doistr)
if page.geturl()[-8:] == 'abstract':
full = page.geturl()[:-8] + 'full'
elif page.geturl()[-4:] == 'full':
full = page.geturl()
print(full)
page = urllib.request.urlopen(full)
with open('temp', 'w') as temp:
temp.write(page.read())
images = []
with open('temp', 'r') as temp:
for l in temp.readlines():
images += re.findall('<a href="(?P<href>http://\w{7}.\w{3}.\w{3}.rackcdn.com/\d{5}/f\w{4}-\d{2}-\d{5}-HTML/image_m/f\w{4}-\d{2}-\d{5}-\D{1,2}\d{3}.\D{3})', l)
images += re.findall('<a href="(?P<href>http://\w{7}.\w{3}.\w{3}.rackcdn.com/\d{5}/f\w{4}-\d{2}-\d{5}-r2/image_m/f\w{4}-\d{2}-\d{5}-\D{1,2}\d{3}.\D{3})', l)
images += re.findall('<img src="(?P<src>http://\w{7}.\w{3}.\w{3}.rackcdn.com/\d{5}/f\w{4}-\d{2}-\d{5}-HTML/image_n/f\w{4}-\d{2}-\d{5}-\D{1,2}\d{3}.\D{3})', l)
os.remove('temp')
for i in images:
loc = os.path.join(output_dir, i.split('-')[-1])
download_image(i, loc)
print('Downloaded image {0}'.format(loc))
if images:
check_equation_completion(images)
print("Done downloading images") | python | def fetch_frontiers_images(doi, output_dir):
"""
Fetch the images from Frontiers' website. This method may fail to properly
locate all the images and should be avoided if the files can be accessed
locally. Downloading the images to an appropriate directory in the cache,
or to a directory specified by passed argument are the preferred means to
access images.
"""
log.info('Fetching Frontiers images')
log.warning('This method may fail to locate all images.')
def download_image(fetch, img_file):
try:
image = urllib.request.urlopen(fetch)
except urllib.error.HTTPError as e:
if e.code == 503: # Server overloaded
time.sleep(1) # Wait one second
try:
image = urllib.request.urlopen(fetch)
except:
return None
elif e.code == 500:
print('urllib.error.HTTPError {0}'.format(e.code))
return None
else:
with open(img_file, 'wb') as outimage:
outimage.write(image.read())
return True
def check_equation_completion(images):
"""
In some cases, equations images are not exposed in the fulltext (hidden
behind a rasterized table). This attempts to look for gaps and fix them
"""
log.info('Checking for complete equations')
files = os.listdir(output_dir)
inline_equations = []
for e in files:
if e[0] == 'i':
inline_equations.append(e)
missing = []
highest = 0
if inline_equations:
inline_equations.sort()
highest = int(inline_equations[-1][1:4])
i = 1
while i < highest:
name = 'i{0}.gif'.format(str(i).zfill(3))
if name not in inline_equations:
missing.append(name)
i += 1
get = images[0][:-8]
for m in missing:
loc = os.path.join(output_dir, m)
download_image(get + m, loc)
print('Downloaded image {0}'.format(loc))
#It is possible that we need to go further than the highest
highest += 1
name = 'i{0}.gif'.format(str(highest).zfill(3))
loc = os.path.join(output_dir, name)
while download_image(get + name, loc):
print('Downloaded image {0}'.format(loc))
highest += 1
name = 'i{0}.gif'.format(str(highest).zfill(3))
print('Processing images for {0}...'.format(doi))
#We use the DOI of the article to locate the page.
doistr = 'http://dx.doi.org/{0}'.format(doi)
logging.debug('Accessing DOI address-{0}'.format(doistr))
page = urllib.request.urlopen(doistr)
if page.geturl()[-8:] == 'abstract':
full = page.geturl()[:-8] + 'full'
elif page.geturl()[-4:] == 'full':
full = page.geturl()
print(full)
page = urllib.request.urlopen(full)
with open('temp', 'w') as temp:
temp.write(page.read())
images = []
with open('temp', 'r') as temp:
for l in temp.readlines():
images += re.findall('<a href="(?P<href>http://\w{7}.\w{3}.\w{3}.rackcdn.com/\d{5}/f\w{4}-\d{2}-\d{5}-HTML/image_m/f\w{4}-\d{2}-\d{5}-\D{1,2}\d{3}.\D{3})', l)
images += re.findall('<a href="(?P<href>http://\w{7}.\w{3}.\w{3}.rackcdn.com/\d{5}/f\w{4}-\d{2}-\d{5}-r2/image_m/f\w{4}-\d{2}-\d{5}-\D{1,2}\d{3}.\D{3})', l)
images += re.findall('<img src="(?P<src>http://\w{7}.\w{3}.\w{3}.rackcdn.com/\d{5}/f\w{4}-\d{2}-\d{5}-HTML/image_n/f\w{4}-\d{2}-\d{5}-\D{1,2}\d{3}.\D{3})', l)
os.remove('temp')
for i in images:
loc = os.path.join(output_dir, i.split('-')[-1])
download_image(i, loc)
print('Downloaded image {0}'.format(loc))
if images:
check_equation_completion(images)
print("Done downloading images") | [
"def",
"fetch_frontiers_images",
"(",
"doi",
",",
"output_dir",
")",
":",
"log",
".",
"info",
"(",
"'Fetching Frontiers images'",
")",
"log",
".",
"warning",
"(",
"'This method may fail to locate all images.'",
")",
"def",
"download_image",
"(",
"fetch",
",",
"img_file",
")",
":",
"try",
":",
"image",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"fetch",
")",
"except",
"urllib",
".",
"error",
".",
"HTTPError",
"as",
"e",
":",
"if",
"e",
".",
"code",
"==",
"503",
":",
"# Server overloaded",
"time",
".",
"sleep",
"(",
"1",
")",
"# Wait one second",
"try",
":",
"image",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"fetch",
")",
"except",
":",
"return",
"None",
"elif",
"e",
".",
"code",
"==",
"500",
":",
"print",
"(",
"'urllib.error.HTTPError {0}'",
".",
"format",
"(",
"e",
".",
"code",
")",
")",
"return",
"None",
"else",
":",
"with",
"open",
"(",
"img_file",
",",
"'wb'",
")",
"as",
"outimage",
":",
"outimage",
".",
"write",
"(",
"image",
".",
"read",
"(",
")",
")",
"return",
"True",
"def",
"check_equation_completion",
"(",
"images",
")",
":",
"\"\"\"\n In some cases, equations images are not exposed in the fulltext (hidden\n behind a rasterized table). This attempts to look for gaps and fix them\n \"\"\"",
"log",
".",
"info",
"(",
"'Checking for complete equations'",
")",
"files",
"=",
"os",
".",
"listdir",
"(",
"output_dir",
")",
"inline_equations",
"=",
"[",
"]",
"for",
"e",
"in",
"files",
":",
"if",
"e",
"[",
"0",
"]",
"==",
"'i'",
":",
"inline_equations",
".",
"append",
"(",
"e",
")",
"missing",
"=",
"[",
"]",
"highest",
"=",
"0",
"if",
"inline_equations",
":",
"inline_equations",
".",
"sort",
"(",
")",
"highest",
"=",
"int",
"(",
"inline_equations",
"[",
"-",
"1",
"]",
"[",
"1",
":",
"4",
"]",
")",
"i",
"=",
"1",
"while",
"i",
"<",
"highest",
":",
"name",
"=",
"'i{0}.gif'",
".",
"format",
"(",
"str",
"(",
"i",
")",
".",
"zfill",
"(",
"3",
")",
")",
"if",
"name",
"not",
"in",
"inline_equations",
":",
"missing",
".",
"append",
"(",
"name",
")",
"i",
"+=",
"1",
"get",
"=",
"images",
"[",
"0",
"]",
"[",
":",
"-",
"8",
"]",
"for",
"m",
"in",
"missing",
":",
"loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"m",
")",
"download_image",
"(",
"get",
"+",
"m",
",",
"loc",
")",
"print",
"(",
"'Downloaded image {0}'",
".",
"format",
"(",
"loc",
")",
")",
"#It is possible that we need to go further than the highest",
"highest",
"+=",
"1",
"name",
"=",
"'i{0}.gif'",
".",
"format",
"(",
"str",
"(",
"highest",
")",
".",
"zfill",
"(",
"3",
")",
")",
"loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"name",
")",
"while",
"download_image",
"(",
"get",
"+",
"name",
",",
"loc",
")",
":",
"print",
"(",
"'Downloaded image {0}'",
".",
"format",
"(",
"loc",
")",
")",
"highest",
"+=",
"1",
"name",
"=",
"'i{0}.gif'",
".",
"format",
"(",
"str",
"(",
"highest",
")",
".",
"zfill",
"(",
"3",
")",
")",
"print",
"(",
"'Processing images for {0}...'",
".",
"format",
"(",
"doi",
")",
")",
"#We use the DOI of the article to locate the page.",
"doistr",
"=",
"'http://dx.doi.org/{0}'",
".",
"format",
"(",
"doi",
")",
"logging",
".",
"debug",
"(",
"'Accessing DOI address-{0}'",
".",
"format",
"(",
"doistr",
")",
")",
"page",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"doistr",
")",
"if",
"page",
".",
"geturl",
"(",
")",
"[",
"-",
"8",
":",
"]",
"==",
"'abstract'",
":",
"full",
"=",
"page",
".",
"geturl",
"(",
")",
"[",
":",
"-",
"8",
"]",
"+",
"'full'",
"elif",
"page",
".",
"geturl",
"(",
")",
"[",
"-",
"4",
":",
"]",
"==",
"'full'",
":",
"full",
"=",
"page",
".",
"geturl",
"(",
")",
"print",
"(",
"full",
")",
"page",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"full",
")",
"with",
"open",
"(",
"'temp'",
",",
"'w'",
")",
"as",
"temp",
":",
"temp",
".",
"write",
"(",
"page",
".",
"read",
"(",
")",
")",
"images",
"=",
"[",
"]",
"with",
"open",
"(",
"'temp'",
",",
"'r'",
")",
"as",
"temp",
":",
"for",
"l",
"in",
"temp",
".",
"readlines",
"(",
")",
":",
"images",
"+=",
"re",
".",
"findall",
"(",
"'<a href=\"(?P<href>http://\\w{7}.\\w{3}.\\w{3}.rackcdn.com/\\d{5}/f\\w{4}-\\d{2}-\\d{5}-HTML/image_m/f\\w{4}-\\d{2}-\\d{5}-\\D{1,2}\\d{3}.\\D{3})'",
",",
"l",
")",
"images",
"+=",
"re",
".",
"findall",
"(",
"'<a href=\"(?P<href>http://\\w{7}.\\w{3}.\\w{3}.rackcdn.com/\\d{5}/f\\w{4}-\\d{2}-\\d{5}-r2/image_m/f\\w{4}-\\d{2}-\\d{5}-\\D{1,2}\\d{3}.\\D{3})'",
",",
"l",
")",
"images",
"+=",
"re",
".",
"findall",
"(",
"'<img src=\"(?P<src>http://\\w{7}.\\w{3}.\\w{3}.rackcdn.com/\\d{5}/f\\w{4}-\\d{2}-\\d{5}-HTML/image_n/f\\w{4}-\\d{2}-\\d{5}-\\D{1,2}\\d{3}.\\D{3})'",
",",
"l",
")",
"os",
".",
"remove",
"(",
"'temp'",
")",
"for",
"i",
"in",
"images",
":",
"loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"i",
".",
"split",
"(",
"'-'",
")",
"[",
"-",
"1",
"]",
")",
"download_image",
"(",
"i",
",",
"loc",
")",
"print",
"(",
"'Downloaded image {0}'",
".",
"format",
"(",
"loc",
")",
")",
"if",
"images",
":",
"check_equation_completion",
"(",
"images",
")",
"print",
"(",
"\"Done downloading images\"",
")"
] | Fetch the images from Frontiers' website. This method may fail to properly
locate all the images and should be avoided if the files can be accessed
locally. Downloading the images to an appropriate directory in the cache,
or to a directory specified by passed argument are the preferred means to
access images. | [
"Fetch",
"the",
"images",
"from",
"Frontiers",
"website",
".",
"This",
"method",
"may",
"fail",
"to",
"properly",
"locate",
"all",
"the",
"images",
"and",
"should",
"be",
"avoided",
"if",
"the",
"files",
"can",
"be",
"accessed",
"locally",
".",
"Downloading",
"the",
"images",
"to",
"an",
"appropriate",
"directory",
"in",
"the",
"cache",
"or",
"to",
"a",
"directory",
"specified",
"by",
"passed",
"argument",
"are",
"the",
"preferred",
"means",
"to",
"access",
"images",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/images.py#L182-L273 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/images.py | fetch_plos_images | def fetch_plos_images(article_doi, output_dir, document):
"""
Fetch the images for a PLoS article from the internet.
PLoS images are known through the inspection of <graphic> and
<inline-graphic> elements. The information in these tags are then parsed
into appropriate URLs for downloading.
"""
log.info('Processing images for {0}...'.format(article_doi))
#A dict of URLs for PLoS subjournals
journal_urls = {'pgen': 'http://www.plosgenetics.org/article/{0}',
'pcbi': 'http://www.ploscompbiol.org/article/{0}',
'ppat': 'http://www.plospathogens.org/article/{0}',
'pntd': 'http://www.plosntds.org/article/{0}',
'pmed': 'http://www.plosmedicine.org/article/{0}',
'pbio': 'http://www.plosbiology.org/article/{0}',
'pone': 'http://www.plosone.org/article/{0}',
'pctr': 'http://clinicaltrials.ploshubs.org/article/{0}'}
#Identify subjournal name for base URL
subjournal_name = article_doi.split('.')[1]
base_url = journal_urls[subjournal_name]
#Acquire <graphic> and <inline-graphic> xml elements
graphics = document.document.getroot().findall('.//graphic')
graphics += document.document.getroot().findall('.//inline-graphic')
#Begin to download
log.info('Downloading images, this may take some time...')
for graphic in graphics:
nsmap = document.document.getroot().nsmap
xlink_href = graphic.attrib['{' + nsmap['xlink'] + '}' + 'href']
#Equations are handled a bit differently than the others
#Here we decide that an image name starting with "e" is an equation
if xlink_href.split('.')[-1].startswith('e'):
resource = 'fetchObject.action?uri=' + xlink_href + '&representation=PNG'
else:
resource = xlink_href + '/largerimage'
full_url = base_url.format(resource)
try:
image = urllib.request.urlopen(full_url)
except urllib.error.HTTPError as e:
if e.code == 503: # Server overload error
time.sleep(1) # Wait a second
try:
image = urllib.request.urlopen(full_url)
except:
return False # Happened twice, give up
else:
log.error('urllib.error.HTTPError {0}'.format(e.code))
return False
else:
img_name = xlink_href.split('.')[-1] + '.png'
img_path = os.path.join(output_dir, img_name)
with open(img_path, 'wb') as output:
output.write(image.read())
log.info('Downloaded image {0}'.format(img_name))
log.info('Done downloading images')
return True | python | def fetch_plos_images(article_doi, output_dir, document):
"""
Fetch the images for a PLoS article from the internet.
PLoS images are known through the inspection of <graphic> and
<inline-graphic> elements. The information in these tags are then parsed
into appropriate URLs for downloading.
"""
log.info('Processing images for {0}...'.format(article_doi))
#A dict of URLs for PLoS subjournals
journal_urls = {'pgen': 'http://www.plosgenetics.org/article/{0}',
'pcbi': 'http://www.ploscompbiol.org/article/{0}',
'ppat': 'http://www.plospathogens.org/article/{0}',
'pntd': 'http://www.plosntds.org/article/{0}',
'pmed': 'http://www.plosmedicine.org/article/{0}',
'pbio': 'http://www.plosbiology.org/article/{0}',
'pone': 'http://www.plosone.org/article/{0}',
'pctr': 'http://clinicaltrials.ploshubs.org/article/{0}'}
#Identify subjournal name for base URL
subjournal_name = article_doi.split('.')[1]
base_url = journal_urls[subjournal_name]
#Acquire <graphic> and <inline-graphic> xml elements
graphics = document.document.getroot().findall('.//graphic')
graphics += document.document.getroot().findall('.//inline-graphic')
#Begin to download
log.info('Downloading images, this may take some time...')
for graphic in graphics:
nsmap = document.document.getroot().nsmap
xlink_href = graphic.attrib['{' + nsmap['xlink'] + '}' + 'href']
#Equations are handled a bit differently than the others
#Here we decide that an image name starting with "e" is an equation
if xlink_href.split('.')[-1].startswith('e'):
resource = 'fetchObject.action?uri=' + xlink_href + '&representation=PNG'
else:
resource = xlink_href + '/largerimage'
full_url = base_url.format(resource)
try:
image = urllib.request.urlopen(full_url)
except urllib.error.HTTPError as e:
if e.code == 503: # Server overload error
time.sleep(1) # Wait a second
try:
image = urllib.request.urlopen(full_url)
except:
return False # Happened twice, give up
else:
log.error('urllib.error.HTTPError {0}'.format(e.code))
return False
else:
img_name = xlink_href.split('.')[-1] + '.png'
img_path = os.path.join(output_dir, img_name)
with open(img_path, 'wb') as output:
output.write(image.read())
log.info('Downloaded image {0}'.format(img_name))
log.info('Done downloading images')
return True | [
"def",
"fetch_plos_images",
"(",
"article_doi",
",",
"output_dir",
",",
"document",
")",
":",
"log",
".",
"info",
"(",
"'Processing images for {0}...'",
".",
"format",
"(",
"article_doi",
")",
")",
"#A dict of URLs for PLoS subjournals",
"journal_urls",
"=",
"{",
"'pgen'",
":",
"'http://www.plosgenetics.org/article/{0}'",
",",
"'pcbi'",
":",
"'http://www.ploscompbiol.org/article/{0}'",
",",
"'ppat'",
":",
"'http://www.plospathogens.org/article/{0}'",
",",
"'pntd'",
":",
"'http://www.plosntds.org/article/{0}'",
",",
"'pmed'",
":",
"'http://www.plosmedicine.org/article/{0}'",
",",
"'pbio'",
":",
"'http://www.plosbiology.org/article/{0}'",
",",
"'pone'",
":",
"'http://www.plosone.org/article/{0}'",
",",
"'pctr'",
":",
"'http://clinicaltrials.ploshubs.org/article/{0}'",
"}",
"#Identify subjournal name for base URL",
"subjournal_name",
"=",
"article_doi",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"base_url",
"=",
"journal_urls",
"[",
"subjournal_name",
"]",
"#Acquire <graphic> and <inline-graphic> xml elements",
"graphics",
"=",
"document",
".",
"document",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//graphic'",
")",
"graphics",
"+=",
"document",
".",
"document",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//inline-graphic'",
")",
"#Begin to download",
"log",
".",
"info",
"(",
"'Downloading images, this may take some time...'",
")",
"for",
"graphic",
"in",
"graphics",
":",
"nsmap",
"=",
"document",
".",
"document",
".",
"getroot",
"(",
")",
".",
"nsmap",
"xlink_href",
"=",
"graphic",
".",
"attrib",
"[",
"'{'",
"+",
"nsmap",
"[",
"'xlink'",
"]",
"+",
"'}'",
"+",
"'href'",
"]",
"#Equations are handled a bit differently than the others",
"#Here we decide that an image name starting with \"e\" is an equation",
"if",
"xlink_href",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
".",
"startswith",
"(",
"'e'",
")",
":",
"resource",
"=",
"'fetchObject.action?uri='",
"+",
"xlink_href",
"+",
"'&representation=PNG'",
"else",
":",
"resource",
"=",
"xlink_href",
"+",
"'/largerimage'",
"full_url",
"=",
"base_url",
".",
"format",
"(",
"resource",
")",
"try",
":",
"image",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"full_url",
")",
"except",
"urllib",
".",
"error",
".",
"HTTPError",
"as",
"e",
":",
"if",
"e",
".",
"code",
"==",
"503",
":",
"# Server overload error",
"time",
".",
"sleep",
"(",
"1",
")",
"# Wait a second",
"try",
":",
"image",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"full_url",
")",
"except",
":",
"return",
"False",
"# Happened twice, give up",
"else",
":",
"log",
".",
"error",
"(",
"'urllib.error.HTTPError {0}'",
".",
"format",
"(",
"e",
".",
"code",
")",
")",
"return",
"False",
"else",
":",
"img_name",
"=",
"xlink_href",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"+",
"'.png'",
"img_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"img_name",
")",
"with",
"open",
"(",
"img_path",
",",
"'wb'",
")",
"as",
"output",
":",
"output",
".",
"write",
"(",
"image",
".",
"read",
"(",
")",
")",
"log",
".",
"info",
"(",
"'Downloaded image {0}'",
".",
"format",
"(",
"img_name",
")",
")",
"log",
".",
"info",
"(",
"'Done downloading images'",
")",
"return",
"True"
] | Fetch the images for a PLoS article from the internet.
PLoS images are known through the inspection of <graphic> and
<inline-graphic> elements. The information in these tags are then parsed
into appropriate URLs for downloading. | [
"Fetch",
"the",
"images",
"for",
"a",
"PLoS",
"article",
"from",
"the",
"internet",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/images.py#L276-L336 |
MSchnei/pyprf_feature | pyprf_feature/analysis/save_fit_tc_nii.py | save_tc_to_nii | def save_tc_to_nii(strCsvCnfg, lgcTest=False, lstRat=None, lgcMdlRsp=False,
strPathHrf=None, lgcSaveRam=False):
"""
Save empirical and fitted time courses to nii file format.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file used for pRF fitting.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcMdlRsp : boolean
Should the aperture responses for the winner model also be saved?
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, defaults
parameters were used.
lgcSaveRam : boolean
Whether to also save a nii file that uses little RAM.
Notes
-----
This function does not return any arguments but, instead, saves nii files
to disk.
"""
# %% Load configuration settings that were used for fitting
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# if fitting was done with custom hrf, make sure to retrieve results with
# '_hrf' appendix
if strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
# If suppressive surround flag is on, make sure to retrieve results with
# '_supsur' appendix
if lstRat is not None:
cfg.strPathOut = cfg.strPathOut + '_supsur'
cfg.strPathMdl = cfg.strPathMdl + '_supsur'
# Append 1.0 as the first entry, which is the key for fitting without
# surround (only centre)
lstRat.insert(0, 1.0)
# %% Load previous pRF fitting results
# Derive paths to the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [cfg.strPathOut + '_x_pos.nii.gz',
cfg.strPathOut + '_y_pos.nii.gz',
cfg.strPathOut + '_SD.nii.gz']
# Check if fitting has been performed, i.e. whether parameter files exist
# Throw error message if they do not exist.
errorMsg = 'Files that should have resulted from fitting do not exist. \
\nPlease perform pRF fitting first, calling e.g.: \
\npyprf_feature -config /path/to/my_config_file.csv'
assert os.path.isfile(lstWnrPrm[0]), errorMsg
assert os.path.isfile(lstWnrPrm[1]), errorMsg
assert os.path.isfile(lstWnrPrm[2]), errorMsg
# Load the x, y, sigma winner parameters from pyprf_feature
aryIntGssPrm = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Load beta parameters estimates, aka weights for time courses
lstPathBeta = [cfg.strPathOut + '_Betas.nii.gz']
aryBetas = load_res_prm(lstPathBeta, lstFlsMsk=[cfg.strPathNiiMask])[0][0]
assert os.path.isfile(lstPathBeta[0]), errorMsg
# Load ratio image, if fitting was obtained with suppressive surround
if lstRat is not None:
lstPathRatio = [cfg.strPathOut + '_Ratios.nii.gz']
aryRatio = load_res_prm(lstPathRatio,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
assert os.path.isfile(lstPathRatio[0]), errorMsg
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their initial parameters, too
# Get inclusion mask and nii header
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100)
# Apply inclusion mask
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
aryBetas = aryBetas[aryLgcVar, :]
if lstRat is not None:
aryRatio = aryRatio[aryLgcVar, :]
# Get array with model parameters that were fitted on a grid
# [x positions, y positions, sigmas]
aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)), cfg.varNum1,
cfg.varExtXmin, cfg.varExtXmax, cfg.varNum2,
cfg.varExtYmin, cfg.varExtYmax,
cfg.varNumPrfSizes, cfg.varPrfStdMin,
cfg.varPrfStdMax, kwUnt='deg',
kwCrd=cfg.strKwCrd)
# Load logical for parameter exclusion in unstimulated area
lgcMdlInc = np.load(cfg.strPathMdl + '_lgcMdlInc.npy')
# Apply logical
aryMdlParams = aryMdlParams[lgcMdlInc, :]
# Get corresponding pRF model time courses
aryPrfTc = np.load(cfg.strPathMdl + '.npy')
# The model time courses will be preprocessed such that they are smoothed
# (temporally) with same factor as the data and that they will be z-scored:
aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp)
if lgcMdlRsp:
aryMdlRsp = np.load(cfg.strPathMdl + '_mdlRsp.npy')
# %% Derive fitted time course models for all voxels
# Initialize array that will collect the fitted time courses
aryFitTc = np.zeros((aryFunc.shape), dtype=np.float32)
# If desired, initiliaze array that will collect model responses underlying
# the fitted time course
if lgcMdlRsp:
if lstRat is not None:
aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0], aryMdlRsp.shape[1],
aryMdlRsp.shape[3]),
dtype=np.float32)
else:
aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0],
aryMdlRsp.shape[1]), dtype=np.float32)
# create vector that allows to check whether every voxel is visited
# exactly once
vecVxlTst = np.zeros(aryIntGssPrm.shape[0])
# Find unique rows of fitted model parameters
aryUnqRows, aryUnqInd = fnd_unq_rws(aryIntGssPrm, return_index=False,
return_inverse=True)
# Loop over all best-fitting model parameter combinations found
print('---Assign models to voxels')
for indRow, vecPrm in enumerate(aryUnqRows):
# Get logical for voxels for which this prm combi was the best
lgcVxl = [aryUnqInd == indRow][0]
if np.all(np.invert(lgcVxl)):
print('---No voxel found')
# Mark those voxels that were visited
vecVxlTst[lgcVxl] += 1
# Get logical index for the model number
# This can only be 1 index, so we directly get 1st entry of array
lgcMdl = np.where(np.isclose(aryMdlParams, vecPrm,
atol=0.01).all(axis=1))[0][0]
# Tell user if no model was found
if lgcMdl is None:
print('---No model found')
# Get model time courses
aryMdlTc = aryPrfTc[lgcMdl, ...]
# Get beta parameter estimates
aryWeights = aryBetas[lgcVxl, :]
# If fitting was done with surround suppression, find ratios for voxels
# and the indices of these ratios in lstRat
if lstRat is not None:
aryVxlRatio = aryRatio[lgcVxl, :]
indRat = [ind for ind, rat1 in enumerate(lstRat) for rat2 in
aryVxlRatio[:, 0] if np.isclose(rat1, rat2)]
indVxl = range(len(indRat))
# Combine model time courses and weights to yield fitted time course
if lstRat is not None:
aryFitTcTmp = np.tensordot(aryWeights, aryMdlTc, axes=([1], [0]))
aryFitTc[lgcVxl, :] = aryFitTcTmp[indVxl, indRat, :]
else:
aryFitTc[lgcVxl, :] = np.dot(aryWeights, aryMdlTc)
# If desired by user, also save the model responses per voxels
if lgcMdlRsp:
# If desired also save the model responses that won
if lstRat is not None:
aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :, indRat, :]
else:
aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :]
# check that every voxel was visited exactly once
errMsg = 'At least one voxel visited more than once for tc recreation'
assert len(vecVxlTst) == np.sum(vecVxlTst), errMsg
# %% Export preprocessed voxel time courses as nii
# List with name suffices of output images:
lstNiiNames = ['_EmpTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export aryFunc as a single 4D nii file
print('---Save empirical time courses')
export_nii(aryFunc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
# If desired by user, also save RAM-saving version of nii
if lgcSaveRam:
strPthRamOut = cfg.strPathOut + '_EmpTc_saveRAM' + '.nii.gz'
imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFunc, axis=1),
axis=1),
affine=np.eye(4))
nb.save(imgNii, strPthRamOut)
# %% Export fitted time courses and, if desired, model responses as nii
# List with name suffices of output images:
lstNiiNames = ['_FitTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export aryFitTc as a single 4D nii file
print('---Save fitted time courses')
export_nii(aryFitTc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
if lgcMdlRsp:
# Create full path name
strNpyName = cfg.strPathOut + '_FitMdlRsp' + '.npy'
# Save aryFitMdlRsp as npy file
print('---Save fitted model responses')
np.save(strNpyName, aryFitMdlRsp)
print('------Done.')
# Save the mask so we know which voxels these parameters belonged to
strNpyMskName = cfg.strPathOut + '_FitMdlRsp_Mask' + '.npy'
aryLgcMsk[aryLgcMsk] = aryLgcVar
print('---Save mask for fitted model responses')
np.save(strNpyMskName, aryLgcMsk)
print('------Done.')
# If desired by user, also save RAM-saving version of nii
if lgcSaveRam:
strPthRamOut = cfg.strPathOut + '_FitTc_saveRAM' + '.nii.gz'
imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFitTc,
axis=1),
axis=1),
affine=np.eye(4))
nb.save(imgNii, strPthRamOut) | python | def save_tc_to_nii(strCsvCnfg, lgcTest=False, lstRat=None, lgcMdlRsp=False,
strPathHrf=None, lgcSaveRam=False):
"""
Save empirical and fitted time courses to nii file format.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file used for pRF fitting.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcMdlRsp : boolean
Should the aperture responses for the winner model also be saved?
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, defaults
parameters were used.
lgcSaveRam : boolean
Whether to also save a nii file that uses little RAM.
Notes
-----
This function does not return any arguments but, instead, saves nii files
to disk.
"""
# %% Load configuration settings that were used for fitting
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# if fitting was done with custom hrf, make sure to retrieve results with
# '_hrf' appendix
if strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
# If suppressive surround flag is on, make sure to retrieve results with
# '_supsur' appendix
if lstRat is not None:
cfg.strPathOut = cfg.strPathOut + '_supsur'
cfg.strPathMdl = cfg.strPathMdl + '_supsur'
# Append 1.0 as the first entry, which is the key for fitting without
# surround (only centre)
lstRat.insert(0, 1.0)
# %% Load previous pRF fitting results
# Derive paths to the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [cfg.strPathOut + '_x_pos.nii.gz',
cfg.strPathOut + '_y_pos.nii.gz',
cfg.strPathOut + '_SD.nii.gz']
# Check if fitting has been performed, i.e. whether parameter files exist
# Throw error message if they do not exist.
errorMsg = 'Files that should have resulted from fitting do not exist. \
\nPlease perform pRF fitting first, calling e.g.: \
\npyprf_feature -config /path/to/my_config_file.csv'
assert os.path.isfile(lstWnrPrm[0]), errorMsg
assert os.path.isfile(lstWnrPrm[1]), errorMsg
assert os.path.isfile(lstWnrPrm[2]), errorMsg
# Load the x, y, sigma winner parameters from pyprf_feature
aryIntGssPrm = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Load beta parameters estimates, aka weights for time courses
lstPathBeta = [cfg.strPathOut + '_Betas.nii.gz']
aryBetas = load_res_prm(lstPathBeta, lstFlsMsk=[cfg.strPathNiiMask])[0][0]
assert os.path.isfile(lstPathBeta[0]), errorMsg
# Load ratio image, if fitting was obtained with suppressive surround
if lstRat is not None:
lstPathRatio = [cfg.strPathOut + '_Ratios.nii.gz']
aryRatio = load_res_prm(lstPathRatio,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
assert os.path.isfile(lstPathRatio[0]), errorMsg
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their initial parameters, too
# Get inclusion mask and nii header
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100)
# Apply inclusion mask
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
aryBetas = aryBetas[aryLgcVar, :]
if lstRat is not None:
aryRatio = aryRatio[aryLgcVar, :]
# Get array with model parameters that were fitted on a grid
# [x positions, y positions, sigmas]
aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)), cfg.varNum1,
cfg.varExtXmin, cfg.varExtXmax, cfg.varNum2,
cfg.varExtYmin, cfg.varExtYmax,
cfg.varNumPrfSizes, cfg.varPrfStdMin,
cfg.varPrfStdMax, kwUnt='deg',
kwCrd=cfg.strKwCrd)
# Load logical for parameter exclusion in unstimulated area
lgcMdlInc = np.load(cfg.strPathMdl + '_lgcMdlInc.npy')
# Apply logical
aryMdlParams = aryMdlParams[lgcMdlInc, :]
# Get corresponding pRF model time courses
aryPrfTc = np.load(cfg.strPathMdl + '.npy')
# The model time courses will be preprocessed such that they are smoothed
# (temporally) with same factor as the data and that they will be z-scored:
aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp)
if lgcMdlRsp:
aryMdlRsp = np.load(cfg.strPathMdl + '_mdlRsp.npy')
# %% Derive fitted time course models for all voxels
# Initialize array that will collect the fitted time courses
aryFitTc = np.zeros((aryFunc.shape), dtype=np.float32)
# If desired, initiliaze array that will collect model responses underlying
# the fitted time course
if lgcMdlRsp:
if lstRat is not None:
aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0], aryMdlRsp.shape[1],
aryMdlRsp.shape[3]),
dtype=np.float32)
else:
aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0],
aryMdlRsp.shape[1]), dtype=np.float32)
# create vector that allows to check whether every voxel is visited
# exactly once
vecVxlTst = np.zeros(aryIntGssPrm.shape[0])
# Find unique rows of fitted model parameters
aryUnqRows, aryUnqInd = fnd_unq_rws(aryIntGssPrm, return_index=False,
return_inverse=True)
# Loop over all best-fitting model parameter combinations found
print('---Assign models to voxels')
for indRow, vecPrm in enumerate(aryUnqRows):
# Get logical for voxels for which this prm combi was the best
lgcVxl = [aryUnqInd == indRow][0]
if np.all(np.invert(lgcVxl)):
print('---No voxel found')
# Mark those voxels that were visited
vecVxlTst[lgcVxl] += 1
# Get logical index for the model number
# This can only be 1 index, so we directly get 1st entry of array
lgcMdl = np.where(np.isclose(aryMdlParams, vecPrm,
atol=0.01).all(axis=1))[0][0]
# Tell user if no model was found
if lgcMdl is None:
print('---No model found')
# Get model time courses
aryMdlTc = aryPrfTc[lgcMdl, ...]
# Get beta parameter estimates
aryWeights = aryBetas[lgcVxl, :]
# If fitting was done with surround suppression, find ratios for voxels
# and the indices of these ratios in lstRat
if lstRat is not None:
aryVxlRatio = aryRatio[lgcVxl, :]
indRat = [ind for ind, rat1 in enumerate(lstRat) for rat2 in
aryVxlRatio[:, 0] if np.isclose(rat1, rat2)]
indVxl = range(len(indRat))
# Combine model time courses and weights to yield fitted time course
if lstRat is not None:
aryFitTcTmp = np.tensordot(aryWeights, aryMdlTc, axes=([1], [0]))
aryFitTc[lgcVxl, :] = aryFitTcTmp[indVxl, indRat, :]
else:
aryFitTc[lgcVxl, :] = np.dot(aryWeights, aryMdlTc)
# If desired by user, also save the model responses per voxels
if lgcMdlRsp:
# If desired also save the model responses that won
if lstRat is not None:
aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :, indRat, :]
else:
aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :]
# check that every voxel was visited exactly once
errMsg = 'At least one voxel visited more than once for tc recreation'
assert len(vecVxlTst) == np.sum(vecVxlTst), errMsg
# %% Export preprocessed voxel time courses as nii
# List with name suffices of output images:
lstNiiNames = ['_EmpTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export aryFunc as a single 4D nii file
print('---Save empirical time courses')
export_nii(aryFunc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
# If desired by user, also save RAM-saving version of nii
if lgcSaveRam:
strPthRamOut = cfg.strPathOut + '_EmpTc_saveRAM' + '.nii.gz'
imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFunc, axis=1),
axis=1),
affine=np.eye(4))
nb.save(imgNii, strPthRamOut)
# %% Export fitted time courses and, if desired, model responses as nii
# List with name suffices of output images:
lstNiiNames = ['_FitTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export aryFitTc as a single 4D nii file
print('---Save fitted time courses')
export_nii(aryFitTc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
if lgcMdlRsp:
# Create full path name
strNpyName = cfg.strPathOut + '_FitMdlRsp' + '.npy'
# Save aryFitMdlRsp as npy file
print('---Save fitted model responses')
np.save(strNpyName, aryFitMdlRsp)
print('------Done.')
# Save the mask so we know which voxels these parameters belonged to
strNpyMskName = cfg.strPathOut + '_FitMdlRsp_Mask' + '.npy'
aryLgcMsk[aryLgcMsk] = aryLgcVar
print('---Save mask for fitted model responses')
np.save(strNpyMskName, aryLgcMsk)
print('------Done.')
# If desired by user, also save RAM-saving version of nii
if lgcSaveRam:
strPthRamOut = cfg.strPathOut + '_FitTc_saveRAM' + '.nii.gz'
imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFitTc,
axis=1),
axis=1),
affine=np.eye(4))
nb.save(imgNii, strPthRamOut) | [
"def",
"save_tc_to_nii",
"(",
"strCsvCnfg",
",",
"lgcTest",
"=",
"False",
",",
"lstRat",
"=",
"None",
",",
"lgcMdlRsp",
"=",
"False",
",",
"strPathHrf",
"=",
"None",
",",
"lgcSaveRam",
"=",
"False",
")",
":",
"# %% Load configuration settings that were used for fitting",
"# Load config parameters from csv file into dictionary:",
"dicCnfg",
"=",
"load_config",
"(",
"strCsvCnfg",
",",
"lgcTest",
"=",
"lgcTest",
")",
"# Load config parameters from dictionary into namespace:",
"cfg",
"=",
"cls_set_config",
"(",
"dicCnfg",
")",
"# if fitting was done with custom hrf, make sure to retrieve results with",
"# '_hrf' appendix",
"if",
"strPathHrf",
"is",
"not",
"None",
":",
"cfg",
".",
"strPathOut",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_hrf'",
"# If suppressive surround flag is on, make sure to retrieve results with",
"# '_supsur' appendix",
"if",
"lstRat",
"is",
"not",
"None",
":",
"cfg",
".",
"strPathOut",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_supsur'",
"cfg",
".",
"strPathMdl",
"=",
"cfg",
".",
"strPathMdl",
"+",
"'_supsur'",
"# Append 1.0 as the first entry, which is the key for fitting without",
"# surround (only centre)",
"lstRat",
".",
"insert",
"(",
"0",
",",
"1.0",
")",
"# %% Load previous pRF fitting results",
"# Derive paths to the x, y, sigma winner parameters from pyprf_feature",
"lstWnrPrm",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"'_x_pos.nii.gz'",
",",
"cfg",
".",
"strPathOut",
"+",
"'_y_pos.nii.gz'",
",",
"cfg",
".",
"strPathOut",
"+",
"'_SD.nii.gz'",
"]",
"# Check if fitting has been performed, i.e. whether parameter files exist",
"# Throw error message if they do not exist.",
"errorMsg",
"=",
"'Files that should have resulted from fitting do not exist. \\\n \\nPlease perform pRF fitting first, calling e.g.: \\\n \\npyprf_feature -config /path/to/my_config_file.csv'",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstWnrPrm",
"[",
"0",
"]",
")",
",",
"errorMsg",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstWnrPrm",
"[",
"1",
"]",
")",
",",
"errorMsg",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstWnrPrm",
"[",
"2",
"]",
")",
",",
"errorMsg",
"# Load the x, y, sigma winner parameters from pyprf_feature",
"aryIntGssPrm",
"=",
"load_res_prm",
"(",
"lstWnrPrm",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# Load beta parameters estimates, aka weights for time courses",
"lstPathBeta",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"'_Betas.nii.gz'",
"]",
"aryBetas",
"=",
"load_res_prm",
"(",
"lstPathBeta",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstPathBeta",
"[",
"0",
"]",
")",
",",
"errorMsg",
"# Load ratio image, if fitting was obtained with suppressive surround",
"if",
"lstRat",
"is",
"not",
"None",
":",
"lstPathRatio",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"'_Ratios.nii.gz'",
"]",
"aryRatio",
"=",
"load_res_prm",
"(",
"lstPathRatio",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstPathRatio",
"[",
"0",
"]",
")",
",",
"errorMsg",
"# Some voxels were excluded because they did not have sufficient mean",
"# and/or variance - exclude their initial parameters, too",
"# Get inclusion mask and nii header",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"hdrMsk",
",",
"aryAff",
",",
"aryFunc",
",",
"tplNiiShp",
"=",
"prep_func",
"(",
"cfg",
".",
"strPathNiiMask",
",",
"cfg",
".",
"lstPathNiiFunc",
",",
"varAvgThr",
"=",
"-",
"100",
")",
"# Apply inclusion mask",
"aryIntGssPrm",
"=",
"aryIntGssPrm",
"[",
"aryLgcVar",
",",
":",
"]",
"aryBetas",
"=",
"aryBetas",
"[",
"aryLgcVar",
",",
":",
"]",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryRatio",
"=",
"aryRatio",
"[",
"aryLgcVar",
",",
":",
"]",
"# Get array with model parameters that were fitted on a grid",
"# [x positions, y positions, sigmas]",
"aryMdlParams",
"=",
"crt_mdl_prms",
"(",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"cfg",
".",
"varNum1",
",",
"cfg",
".",
"varExtXmin",
",",
"cfg",
".",
"varExtXmax",
",",
"cfg",
".",
"varNum2",
",",
"cfg",
".",
"varExtYmin",
",",
"cfg",
".",
"varExtYmax",
",",
"cfg",
".",
"varNumPrfSizes",
",",
"cfg",
".",
"varPrfStdMin",
",",
"cfg",
".",
"varPrfStdMax",
",",
"kwUnt",
"=",
"'deg'",
",",
"kwCrd",
"=",
"cfg",
".",
"strKwCrd",
")",
"# Load logical for parameter exclusion in unstimulated area",
"lgcMdlInc",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strPathMdl",
"+",
"'_lgcMdlInc.npy'",
")",
"# Apply logical",
"aryMdlParams",
"=",
"aryMdlParams",
"[",
"lgcMdlInc",
",",
":",
"]",
"# Get corresponding pRF model time courses",
"aryPrfTc",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strPathMdl",
"+",
"'.npy'",
")",
"# The model time courses will be preprocessed such that they are smoothed",
"# (temporally) with same factor as the data and that they will be z-scored:",
"aryPrfTc",
"=",
"prep_models",
"(",
"aryPrfTc",
",",
"varSdSmthTmp",
"=",
"cfg",
".",
"varSdSmthTmp",
")",
"if",
"lgcMdlRsp",
":",
"aryMdlRsp",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strPathMdl",
"+",
"'_mdlRsp.npy'",
")",
"# %% Derive fitted time course models for all voxels",
"# Initialize array that will collect the fitted time courses",
"aryFitTc",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryFunc",
".",
"shape",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# If desired, initiliaze array that will collect model responses underlying",
"# the fitted time course",
"if",
"lgcMdlRsp",
":",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryFitMdlRsp",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryIntGssPrm",
".",
"shape",
"[",
"0",
"]",
",",
"aryMdlRsp",
".",
"shape",
"[",
"1",
"]",
",",
"aryMdlRsp",
".",
"shape",
"[",
"3",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"else",
":",
"aryFitMdlRsp",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryIntGssPrm",
".",
"shape",
"[",
"0",
"]",
",",
"aryMdlRsp",
".",
"shape",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# create vector that allows to check whether every voxel is visited",
"# exactly once",
"vecVxlTst",
"=",
"np",
".",
"zeros",
"(",
"aryIntGssPrm",
".",
"shape",
"[",
"0",
"]",
")",
"# Find unique rows of fitted model parameters",
"aryUnqRows",
",",
"aryUnqInd",
"=",
"fnd_unq_rws",
"(",
"aryIntGssPrm",
",",
"return_index",
"=",
"False",
",",
"return_inverse",
"=",
"True",
")",
"# Loop over all best-fitting model parameter combinations found",
"print",
"(",
"'---Assign models to voxels'",
")",
"for",
"indRow",
",",
"vecPrm",
"in",
"enumerate",
"(",
"aryUnqRows",
")",
":",
"# Get logical for voxels for which this prm combi was the best",
"lgcVxl",
"=",
"[",
"aryUnqInd",
"==",
"indRow",
"]",
"[",
"0",
"]",
"if",
"np",
".",
"all",
"(",
"np",
".",
"invert",
"(",
"lgcVxl",
")",
")",
":",
"print",
"(",
"'---No voxel found'",
")",
"# Mark those voxels that were visited",
"vecVxlTst",
"[",
"lgcVxl",
"]",
"+=",
"1",
"# Get logical index for the model number",
"# This can only be 1 index, so we directly get 1st entry of array",
"lgcMdl",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isclose",
"(",
"aryMdlParams",
",",
"vecPrm",
",",
"atol",
"=",
"0.01",
")",
".",
"all",
"(",
"axis",
"=",
"1",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# Tell user if no model was found",
"if",
"lgcMdl",
"is",
"None",
":",
"print",
"(",
"'---No model found'",
")",
"# Get model time courses",
"aryMdlTc",
"=",
"aryPrfTc",
"[",
"lgcMdl",
",",
"...",
"]",
"# Get beta parameter estimates",
"aryWeights",
"=",
"aryBetas",
"[",
"lgcVxl",
",",
":",
"]",
"# If fitting was done with surround suppression, find ratios for voxels",
"# and the indices of these ratios in lstRat",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryVxlRatio",
"=",
"aryRatio",
"[",
"lgcVxl",
",",
":",
"]",
"indRat",
"=",
"[",
"ind",
"for",
"ind",
",",
"rat1",
"in",
"enumerate",
"(",
"lstRat",
")",
"for",
"rat2",
"in",
"aryVxlRatio",
"[",
":",
",",
"0",
"]",
"if",
"np",
".",
"isclose",
"(",
"rat1",
",",
"rat2",
")",
"]",
"indVxl",
"=",
"range",
"(",
"len",
"(",
"indRat",
")",
")",
"# Combine model time courses and weights to yield fitted time course",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryFitTcTmp",
"=",
"np",
".",
"tensordot",
"(",
"aryWeights",
",",
"aryMdlTc",
",",
"axes",
"=",
"(",
"[",
"1",
"]",
",",
"[",
"0",
"]",
")",
")",
"aryFitTc",
"[",
"lgcVxl",
",",
":",
"]",
"=",
"aryFitTcTmp",
"[",
"indVxl",
",",
"indRat",
",",
":",
"]",
"else",
":",
"aryFitTc",
"[",
"lgcVxl",
",",
":",
"]",
"=",
"np",
".",
"dot",
"(",
"aryWeights",
",",
"aryMdlTc",
")",
"# If desired by user, also save the model responses per voxels",
"if",
"lgcMdlRsp",
":",
"# If desired also save the model responses that won",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryFitMdlRsp",
"[",
"lgcVxl",
",",
":",
"]",
"=",
"aryMdlRsp",
"[",
"lgcMdl",
",",
":",
",",
"indRat",
",",
":",
"]",
"else",
":",
"aryFitMdlRsp",
"[",
"lgcVxl",
",",
":",
"]",
"=",
"aryMdlRsp",
"[",
"lgcMdl",
",",
":",
"]",
"# check that every voxel was visited exactly once",
"errMsg",
"=",
"'At least one voxel visited more than once for tc recreation'",
"assert",
"len",
"(",
"vecVxlTst",
")",
"==",
"np",
".",
"sum",
"(",
"vecVxlTst",
")",
",",
"errMsg",
"# %% Export preprocessed voxel time courses as nii",
"# List with name suffices of output images:",
"lstNiiNames",
"=",
"[",
"'_EmpTc'",
"]",
"# Create full path names from nii file names and output path",
"lstNiiNames",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"strNii",
"+",
"'.nii.gz'",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# export aryFunc as a single 4D nii file",
"print",
"(",
"'---Save empirical time courses'",
")",
"export_nii",
"(",
"aryFunc",
",",
"lstNiiNames",
",",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"tplNiiShp",
",",
"aryAff",
",",
"hdrMsk",
",",
"outFormat",
"=",
"'4D'",
")",
"print",
"(",
"'------Done.'",
")",
"# If desired by user, also save RAM-saving version of nii",
"if",
"lgcSaveRam",
":",
"strPthRamOut",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_EmpTc_saveRAM'",
"+",
"'.nii.gz'",
"imgNii",
"=",
"nb",
".",
"Nifti1Image",
"(",
"np",
".",
"expand_dims",
"(",
"np",
".",
"expand_dims",
"(",
"aryFunc",
",",
"axis",
"=",
"1",
")",
",",
"axis",
"=",
"1",
")",
",",
"affine",
"=",
"np",
".",
"eye",
"(",
"4",
")",
")",
"nb",
".",
"save",
"(",
"imgNii",
",",
"strPthRamOut",
")",
"# %% Export fitted time courses and, if desired, model responses as nii",
"# List with name suffices of output images:",
"lstNiiNames",
"=",
"[",
"'_FitTc'",
"]",
"# Create full path names from nii file names and output path",
"lstNiiNames",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"strNii",
"+",
"'.nii.gz'",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# export aryFitTc as a single 4D nii file",
"print",
"(",
"'---Save fitted time courses'",
")",
"export_nii",
"(",
"aryFitTc",
",",
"lstNiiNames",
",",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"tplNiiShp",
",",
"aryAff",
",",
"hdrMsk",
",",
"outFormat",
"=",
"'4D'",
")",
"print",
"(",
"'------Done.'",
")",
"if",
"lgcMdlRsp",
":",
"# Create full path name",
"strNpyName",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_FitMdlRsp'",
"+",
"'.npy'",
"# Save aryFitMdlRsp as npy file",
"print",
"(",
"'---Save fitted model responses'",
")",
"np",
".",
"save",
"(",
"strNpyName",
",",
"aryFitMdlRsp",
")",
"print",
"(",
"'------Done.'",
")",
"# Save the mask so we know which voxels these parameters belonged to",
"strNpyMskName",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_FitMdlRsp_Mask'",
"+",
"'.npy'",
"aryLgcMsk",
"[",
"aryLgcMsk",
"]",
"=",
"aryLgcVar",
"print",
"(",
"'---Save mask for fitted model responses'",
")",
"np",
".",
"save",
"(",
"strNpyMskName",
",",
"aryLgcMsk",
")",
"print",
"(",
"'------Done.'",
")",
"# If desired by user, also save RAM-saving version of nii",
"if",
"lgcSaveRam",
":",
"strPthRamOut",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_FitTc_saveRAM'",
"+",
"'.nii.gz'",
"imgNii",
"=",
"nb",
".",
"Nifti1Image",
"(",
"np",
".",
"expand_dims",
"(",
"np",
".",
"expand_dims",
"(",
"aryFitTc",
",",
"axis",
"=",
"1",
")",
",",
"axis",
"=",
"1",
")",
",",
"affine",
"=",
"np",
".",
"eye",
"(",
"4",
")",
")",
"nb",
".",
"save",
"(",
"imgNii",
",",
"strPthRamOut",
")"
] | Save empirical and fitted time courses to nii file format.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file used for pRF fitting.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcMdlRsp : boolean
Should the aperture responses for the winner model also be saved?
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, defaults
parameters were used.
lgcSaveRam : boolean
Whether to also save a nii file that uses little RAM.
Notes
-----
This function does not return any arguments but, instead, saves nii files
to disk. | [
"Save",
"empirical",
"and",
"fitted",
"time",
"courses",
"to",
"nii",
"file",
"format",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/save_fit_tc_nii.py#L39-L292 |
MSchnei/pyprf_feature | pyprf_feature/analysis/pyprf_sim.py | pyprf_sim | def pyprf_sim(strPrior, strStmApr, lgcNoise=False, lgcRtnNrl=True,
lstRat=None, lgcTest=False):
"""
Simulate pRF response given pRF parameters and stimulus apertures.
Parameters
----------
strPrior : str
Absolute file path of config file used for pRF fitting.
strStmApr : str
Absolute file path to stimulus aperture used in in-silico experiment.
lgcNoise : boolean
Should noise be added to the simulated pRF time course. By default, no
noise is added.
lgcRtnNrl : boolean
Should neural time course, unconvolved with hrf, be returned as well?
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
Notes
-----
[1] This function does not return any arguments but, instead, saves nii
filex to disk.
[2] strStmApr should be a path to a npy file that contains a 3D numpy
array. This arrays consists of binary images in boolean array from that
represent the stimulus aperture. Images are stacked along last axis.
"""
# %% Load configuration settings that were used for fitting
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strPrior, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# If suppressive surround flag is on, make sure to retrieve results from
# that fitting
if lstRat is not None:
cfg.strPathOut = cfg.strPathOut + '_supsur'
# %% Load previous pRF fitting results
# Derive paths to the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [cfg.strPathOut + '_x_pos.nii.gz',
cfg.strPathOut + '_y_pos.nii.gz',
cfg.strPathOut + '_SD.nii.gz']
# Check if fitting has been performed, i.e. whether parameter files exist
# Throw error message if they do not exist.
errorMsg = 'Files that should have resulted from fitting do not exist. \
\nPlease perform pRF fitting first, calling e.g.: \
\npyprf_feature -config /path/to/my_config_file.csv'
assert os.path.isfile(lstWnrPrm[0]), errorMsg
assert os.path.isfile(lstWnrPrm[1]), errorMsg
assert os.path.isfile(lstWnrPrm[2]), errorMsg
# Load the x, y, sigma winner parameters from pyprf_feature
aryIntGssPrm = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Also load suppresive surround params if suppressive surround flag was on
if lstRat is not None:
# Load beta parameters estimates, aka weights, this is later needed to
# scale responses of the center wrt to the surround
lstPathBeta = [cfg.strPathOut + '_Betas.nii.gz']
aryBetas = load_res_prm(lstPathBeta,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Load ratio of prf sizes
lstPathRat = [cfg.strPathOut + '_Ratios.nii.gz']
aryRat = load_res_prm(lstPathRat, lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their initial parameters, too
# Get inclusion mask and nii header
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, _, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=100.)
# Apply inclusion mask
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
if lstRat is not None:
aryBetas = aryBetas[aryLgcVar, :]
aryRat = aryRat[aryLgcVar]
# %% Load stimulus aperture and create model responses to stimuli
# Load stimulus aperture
aryStmApr = np.load(strStmApr)
# Which dimensions does the representation have in pixel space?
tplStmApr = aryStmApr.shape[:2]
# Convert winner parameters from degrees of visual angle to pixel
vecIntX, vecIntY, vecIntSd = rmp_deg_pixel_xys(aryIntGssPrm[:, 0],
aryIntGssPrm[:, 1],
aryIntGssPrm[:, 2],
tplStmApr,
cfg.varExtXmin,
cfg.varExtXmax,
cfg.varExtYmin,
cfg.varExtYmax)
aryIntGssPrmPxl = np.column_stack((vecIntX, vecIntY, vecIntSd))
# Create 2D Gauss model responses to spatial conditions.
print('---Create 2D Gauss model responses to spatial conditions')
aryMdlRsp = crt_mdl_rsp(aryStmApr, tplStmApr, aryIntGssPrmPxl, cfg.varPar)
# If supsur flag was provided, also create responses with supsur params
# and combine positive center response with negative surround response
if lstRat is not None:
aryIntGssPrmPxlSur = np.copy(aryIntGssPrmPxl)
# Adjust pRF sizes using the ratio of pRF sizes
aryIntGssPrmPxlSur[:, 2] = np.multiply(aryIntGssPrmPxlSur[:, 2],
aryRat)
aryMdlRspSur = crt_mdl_rsp(aryStmApr, tplStmApr, aryIntGssPrmPxlSur,
cfg.varPar)
# Now the responses of the center and the surround need to be combined
# in a meaningful way. One way this could be done is to take the ratio
# of gain parameters that were found when fitting (i.e. betas)
varGainRat = np.divide(aryBetas[:, 0], aryBetas[:, 1])
aryMdlRsp = np.subtract(aryMdlRsp,
np.multiply(varGainRat, aryMdlRspSur))
# %% Convolve time courses with hrf function
# First temporally upsamle the model response
aryMdlRspUps = np.repeat(aryMdlRsp, cfg.varTmpOvsmpl, axis=-1)
# Convolve with hrf function
arySimRsp = crt_prf_tc(aryMdlRspUps, aryMdlRsp.shape[-1], cfg.varTr,
cfg.varTmpOvsmpl, 1, tplStmApr, cfg.varPar)
# Squeeze simulated reponse. This step is necessary because crt_prf_tc is,
# in principle, capable of convolving with deriavtes of canonical function
if arySimRsp.shape[1] > 1:
print('***WARNING: pyprf_sim expects 1 hrf function, currently***')
arySimRsp = np.squeeze(arySimRsp)
# Save memory by deleting upsampled time course
del(aryMdlRspUps)
# %% Add auto-correlated noise
if lgcNoise:
print('***Adding noise feature not yet implemented***')
# %% Export simulated prf, and if desired neural, time courses as nii
# List with name suffices of output images:
lstNiiNames = ['_SimPrfTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
print('---Save simulated pRF time courses')
export_nii(arySimRsp, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
if lgcRtnNrl:
# List with name suffices of output images:
lstNiiNames = ['_SimNrlTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
print('---Save simulated neural time courses')
export_nii(aryMdlRsp, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.') | python | def pyprf_sim(strPrior, strStmApr, lgcNoise=False, lgcRtnNrl=True,
lstRat=None, lgcTest=False):
"""
Simulate pRF response given pRF parameters and stimulus apertures.
Parameters
----------
strPrior : str
Absolute file path of config file used for pRF fitting.
strStmApr : str
Absolute file path to stimulus aperture used in in-silico experiment.
lgcNoise : boolean
Should noise be added to the simulated pRF time course. By default, no
noise is added.
lgcRtnNrl : boolean
Should neural time course, unconvolved with hrf, be returned as well?
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
Notes
-----
[1] This function does not return any arguments but, instead, saves nii
filex to disk.
[2] strStmApr should be a path to a npy file that contains a 3D numpy
array. This arrays consists of binary images in boolean array from that
represent the stimulus aperture. Images are stacked along last axis.
"""
# %% Load configuration settings that were used for fitting
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strPrior, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# If suppressive surround flag is on, make sure to retrieve results from
# that fitting
if lstRat is not None:
cfg.strPathOut = cfg.strPathOut + '_supsur'
# %% Load previous pRF fitting results
# Derive paths to the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [cfg.strPathOut + '_x_pos.nii.gz',
cfg.strPathOut + '_y_pos.nii.gz',
cfg.strPathOut + '_SD.nii.gz']
# Check if fitting has been performed, i.e. whether parameter files exist
# Throw error message if they do not exist.
errorMsg = 'Files that should have resulted from fitting do not exist. \
\nPlease perform pRF fitting first, calling e.g.: \
\npyprf_feature -config /path/to/my_config_file.csv'
assert os.path.isfile(lstWnrPrm[0]), errorMsg
assert os.path.isfile(lstWnrPrm[1]), errorMsg
assert os.path.isfile(lstWnrPrm[2]), errorMsg
# Load the x, y, sigma winner parameters from pyprf_feature
aryIntGssPrm = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Also load suppresive surround params if suppressive surround flag was on
if lstRat is not None:
# Load beta parameters estimates, aka weights, this is later needed to
# scale responses of the center wrt to the surround
lstPathBeta = [cfg.strPathOut + '_Betas.nii.gz']
aryBetas = load_res_prm(lstPathBeta,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Load ratio of prf sizes
lstPathRat = [cfg.strPathOut + '_Ratios.nii.gz']
aryRat = load_res_prm(lstPathRat, lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their initial parameters, too
# Get inclusion mask and nii header
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, _, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=100.)
# Apply inclusion mask
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
if lstRat is not None:
aryBetas = aryBetas[aryLgcVar, :]
aryRat = aryRat[aryLgcVar]
# %% Load stimulus aperture and create model responses to stimuli
# Load stimulus aperture
aryStmApr = np.load(strStmApr)
# Which dimensions does the representation have in pixel space?
tplStmApr = aryStmApr.shape[:2]
# Convert winner parameters from degrees of visual angle to pixel
vecIntX, vecIntY, vecIntSd = rmp_deg_pixel_xys(aryIntGssPrm[:, 0],
aryIntGssPrm[:, 1],
aryIntGssPrm[:, 2],
tplStmApr,
cfg.varExtXmin,
cfg.varExtXmax,
cfg.varExtYmin,
cfg.varExtYmax)
aryIntGssPrmPxl = np.column_stack((vecIntX, vecIntY, vecIntSd))
# Create 2D Gauss model responses to spatial conditions.
print('---Create 2D Gauss model responses to spatial conditions')
aryMdlRsp = crt_mdl_rsp(aryStmApr, tplStmApr, aryIntGssPrmPxl, cfg.varPar)
# If supsur flag was provided, also create responses with supsur params
# and combine positive center response with negative surround response
if lstRat is not None:
aryIntGssPrmPxlSur = np.copy(aryIntGssPrmPxl)
# Adjust pRF sizes using the ratio of pRF sizes
aryIntGssPrmPxlSur[:, 2] = np.multiply(aryIntGssPrmPxlSur[:, 2],
aryRat)
aryMdlRspSur = crt_mdl_rsp(aryStmApr, tplStmApr, aryIntGssPrmPxlSur,
cfg.varPar)
# Now the responses of the center and the surround need to be combined
# in a meaningful way. One way this could be done is to take the ratio
# of gain parameters that were found when fitting (i.e. betas)
varGainRat = np.divide(aryBetas[:, 0], aryBetas[:, 1])
aryMdlRsp = np.subtract(aryMdlRsp,
np.multiply(varGainRat, aryMdlRspSur))
# %% Convolve time courses with hrf function
# First temporally upsamle the model response
aryMdlRspUps = np.repeat(aryMdlRsp, cfg.varTmpOvsmpl, axis=-1)
# Convolve with hrf function
arySimRsp = crt_prf_tc(aryMdlRspUps, aryMdlRsp.shape[-1], cfg.varTr,
cfg.varTmpOvsmpl, 1, tplStmApr, cfg.varPar)
# Squeeze simulated reponse. This step is necessary because crt_prf_tc is,
# in principle, capable of convolving with deriavtes of canonical function
if arySimRsp.shape[1] > 1:
print('***WARNING: pyprf_sim expects 1 hrf function, currently***')
arySimRsp = np.squeeze(arySimRsp)
# Save memory by deleting upsampled time course
del(aryMdlRspUps)
# %% Add auto-correlated noise
if lgcNoise:
print('***Adding noise feature not yet implemented***')
# %% Export simulated prf, and if desired neural, time courses as nii
# List with name suffices of output images:
lstNiiNames = ['_SimPrfTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
print('---Save simulated pRF time courses')
export_nii(arySimRsp, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
if lgcRtnNrl:
# List with name suffices of output images:
lstNiiNames = ['_SimNrlTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
print('---Save simulated neural time courses')
export_nii(aryMdlRsp, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.') | [
"def",
"pyprf_sim",
"(",
"strPrior",
",",
"strStmApr",
",",
"lgcNoise",
"=",
"False",
",",
"lgcRtnNrl",
"=",
"True",
",",
"lstRat",
"=",
"None",
",",
"lgcTest",
"=",
"False",
")",
":",
"# %% Load configuration settings that were used for fitting",
"# Load config parameters from csv file into dictionary:",
"dicCnfg",
"=",
"load_config",
"(",
"strPrior",
",",
"lgcTest",
"=",
"lgcTest",
")",
"# Load config parameters from dictionary into namespace:",
"cfg",
"=",
"cls_set_config",
"(",
"dicCnfg",
")",
"# If suppressive surround flag is on, make sure to retrieve results from",
"# that fitting",
"if",
"lstRat",
"is",
"not",
"None",
":",
"cfg",
".",
"strPathOut",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_supsur'",
"# %% Load previous pRF fitting results",
"# Derive paths to the x, y, sigma winner parameters from pyprf_feature",
"lstWnrPrm",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"'_x_pos.nii.gz'",
",",
"cfg",
".",
"strPathOut",
"+",
"'_y_pos.nii.gz'",
",",
"cfg",
".",
"strPathOut",
"+",
"'_SD.nii.gz'",
"]",
"# Check if fitting has been performed, i.e. whether parameter files exist",
"# Throw error message if they do not exist.",
"errorMsg",
"=",
"'Files that should have resulted from fitting do not exist. \\\n \\nPlease perform pRF fitting first, calling e.g.: \\\n \\npyprf_feature -config /path/to/my_config_file.csv'",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstWnrPrm",
"[",
"0",
"]",
")",
",",
"errorMsg",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstWnrPrm",
"[",
"1",
"]",
")",
",",
"errorMsg",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstWnrPrm",
"[",
"2",
"]",
")",
",",
"errorMsg",
"# Load the x, y, sigma winner parameters from pyprf_feature",
"aryIntGssPrm",
"=",
"load_res_prm",
"(",
"lstWnrPrm",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# Also load suppresive surround params if suppressive surround flag was on",
"if",
"lstRat",
"is",
"not",
"None",
":",
"# Load beta parameters estimates, aka weights, this is later needed to",
"# scale responses of the center wrt to the surround",
"lstPathBeta",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"'_Betas.nii.gz'",
"]",
"aryBetas",
"=",
"load_res_prm",
"(",
"lstPathBeta",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# Load ratio of prf sizes",
"lstPathRat",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"'_Ratios.nii.gz'",
"]",
"aryRat",
"=",
"load_res_prm",
"(",
"lstPathRat",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# Some voxels were excluded because they did not have sufficient mean",
"# and/or variance - exclude their initial parameters, too",
"# Get inclusion mask and nii header",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"hdrMsk",
",",
"aryAff",
",",
"_",
",",
"tplNiiShp",
"=",
"prep_func",
"(",
"cfg",
".",
"strPathNiiMask",
",",
"cfg",
".",
"lstPathNiiFunc",
",",
"varAvgThr",
"=",
"100.",
")",
"# Apply inclusion mask",
"aryIntGssPrm",
"=",
"aryIntGssPrm",
"[",
"aryLgcVar",
",",
":",
"]",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryBetas",
"=",
"aryBetas",
"[",
"aryLgcVar",
",",
":",
"]",
"aryRat",
"=",
"aryRat",
"[",
"aryLgcVar",
"]",
"# %% Load stimulus aperture and create model responses to stimuli",
"# Load stimulus aperture",
"aryStmApr",
"=",
"np",
".",
"load",
"(",
"strStmApr",
")",
"# Which dimensions does the representation have in pixel space?",
"tplStmApr",
"=",
"aryStmApr",
".",
"shape",
"[",
":",
"2",
"]",
"# Convert winner parameters from degrees of visual angle to pixel",
"vecIntX",
",",
"vecIntY",
",",
"vecIntSd",
"=",
"rmp_deg_pixel_xys",
"(",
"aryIntGssPrm",
"[",
":",
",",
"0",
"]",
",",
"aryIntGssPrm",
"[",
":",
",",
"1",
"]",
",",
"aryIntGssPrm",
"[",
":",
",",
"2",
"]",
",",
"tplStmApr",
",",
"cfg",
".",
"varExtXmin",
",",
"cfg",
".",
"varExtXmax",
",",
"cfg",
".",
"varExtYmin",
",",
"cfg",
".",
"varExtYmax",
")",
"aryIntGssPrmPxl",
"=",
"np",
".",
"column_stack",
"(",
"(",
"vecIntX",
",",
"vecIntY",
",",
"vecIntSd",
")",
")",
"# Create 2D Gauss model responses to spatial conditions.",
"print",
"(",
"'---Create 2D Gauss model responses to spatial conditions'",
")",
"aryMdlRsp",
"=",
"crt_mdl_rsp",
"(",
"aryStmApr",
",",
"tplStmApr",
",",
"aryIntGssPrmPxl",
",",
"cfg",
".",
"varPar",
")",
"# If supsur flag was provided, also create responses with supsur params",
"# and combine positive center response with negative surround response",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryIntGssPrmPxlSur",
"=",
"np",
".",
"copy",
"(",
"aryIntGssPrmPxl",
")",
"# Adjust pRF sizes using the ratio of pRF sizes",
"aryIntGssPrmPxlSur",
"[",
":",
",",
"2",
"]",
"=",
"np",
".",
"multiply",
"(",
"aryIntGssPrmPxlSur",
"[",
":",
",",
"2",
"]",
",",
"aryRat",
")",
"aryMdlRspSur",
"=",
"crt_mdl_rsp",
"(",
"aryStmApr",
",",
"tplStmApr",
",",
"aryIntGssPrmPxlSur",
",",
"cfg",
".",
"varPar",
")",
"# Now the responses of the center and the surround need to be combined",
"# in a meaningful way. One way this could be done is to take the ratio",
"# of gain parameters that were found when fitting (i.e. betas)",
"varGainRat",
"=",
"np",
".",
"divide",
"(",
"aryBetas",
"[",
":",
",",
"0",
"]",
",",
"aryBetas",
"[",
":",
",",
"1",
"]",
")",
"aryMdlRsp",
"=",
"np",
".",
"subtract",
"(",
"aryMdlRsp",
",",
"np",
".",
"multiply",
"(",
"varGainRat",
",",
"aryMdlRspSur",
")",
")",
"# %% Convolve time courses with hrf function",
"# First temporally upsamle the model response",
"aryMdlRspUps",
"=",
"np",
".",
"repeat",
"(",
"aryMdlRsp",
",",
"cfg",
".",
"varTmpOvsmpl",
",",
"axis",
"=",
"-",
"1",
")",
"# Convolve with hrf function",
"arySimRsp",
"=",
"crt_prf_tc",
"(",
"aryMdlRspUps",
",",
"aryMdlRsp",
".",
"shape",
"[",
"-",
"1",
"]",
",",
"cfg",
".",
"varTr",
",",
"cfg",
".",
"varTmpOvsmpl",
",",
"1",
",",
"tplStmApr",
",",
"cfg",
".",
"varPar",
")",
"# Squeeze simulated reponse. This step is necessary because crt_prf_tc is,",
"# in principle, capable of convolving with deriavtes of canonical function",
"if",
"arySimRsp",
".",
"shape",
"[",
"1",
"]",
">",
"1",
":",
"print",
"(",
"'***WARNING: pyprf_sim expects 1 hrf function, currently***'",
")",
"arySimRsp",
"=",
"np",
".",
"squeeze",
"(",
"arySimRsp",
")",
"# Save memory by deleting upsampled time course",
"del",
"(",
"aryMdlRspUps",
")",
"# %% Add auto-correlated noise",
"if",
"lgcNoise",
":",
"print",
"(",
"'***Adding noise feature not yet implemented***'",
")",
"# %% Export simulated prf, and if desired neural, time courses as nii",
"# List with name suffices of output images:",
"lstNiiNames",
"=",
"[",
"'_SimPrfTc'",
"]",
"# Create full path names from nii file names and output path",
"lstNiiNames",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"strNii",
"+",
"'.nii.gz'",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# export beta parameter as a single 4D nii file",
"print",
"(",
"'---Save simulated pRF time courses'",
")",
"export_nii",
"(",
"arySimRsp",
",",
"lstNiiNames",
",",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"tplNiiShp",
",",
"aryAff",
",",
"hdrMsk",
",",
"outFormat",
"=",
"'4D'",
")",
"print",
"(",
"'------Done.'",
")",
"if",
"lgcRtnNrl",
":",
"# List with name suffices of output images:",
"lstNiiNames",
"=",
"[",
"'_SimNrlTc'",
"]",
"# Create full path names from nii file names and output path",
"lstNiiNames",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"strNii",
"+",
"'.nii.gz'",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# export beta parameter as a single 4D nii file",
"print",
"(",
"'---Save simulated neural time courses'",
")",
"export_nii",
"(",
"aryMdlRsp",
",",
"lstNiiNames",
",",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"tplNiiShp",
",",
"aryAff",
",",
"hdrMsk",
",",
"outFormat",
"=",
"'4D'",
")",
"print",
"(",
"'------Done.'",
")"
] | Simulate pRF response given pRF parameters and stimulus apertures.
Parameters
----------
strPrior : str
Absolute file path of config file used for pRF fitting.
strStmApr : str
Absolute file path to stimulus aperture used in in-silico experiment.
lgcNoise : boolean
Should noise be added to the simulated pRF time course. By default, no
noise is added.
lgcRtnNrl : boolean
Should neural time course, unconvolved with hrf, be returned as well?
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
Notes
-----
[1] This function does not return any arguments but, instead, saves nii
filex to disk.
[2] strStmApr should be a path to a npy file that contains a 3D numpy
array. This arrays consists of binary images in boolean array from that
represent the stimulus aperture. Images are stacked along last axis. | [
"Simulate",
"pRF",
"response",
"given",
"pRF",
"parameters",
"and",
"stimulus",
"apertures",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/pyprf_sim.py#L35-L209 |
bachya/pyflunearyou | pyflunearyou/helpers/__init__.py | get_nearest_by_numeric_key | def get_nearest_by_numeric_key(data: dict, key: int) -> Any:
"""Return the dict element whose numeric key is closest to a target."""
return data.get(key, data[min(data.keys(), key=lambda k: abs(k - key))]) | python | def get_nearest_by_numeric_key(data: dict, key: int) -> Any:
"""Return the dict element whose numeric key is closest to a target."""
return data.get(key, data[min(data.keys(), key=lambda k: abs(k - key))]) | [
"def",
"get_nearest_by_numeric_key",
"(",
"data",
":",
"dict",
",",
"key",
":",
"int",
")",
"->",
"Any",
":",
"return",
"data",
".",
"get",
"(",
"key",
",",
"data",
"[",
"min",
"(",
"data",
".",
"keys",
"(",
")",
",",
"key",
"=",
"lambda",
"k",
":",
"abs",
"(",
"k",
"-",
"key",
")",
")",
"]",
")"
] | Return the dict element whose numeric key is closest to a target. | [
"Return",
"the",
"dict",
"element",
"whose",
"numeric",
"key",
"is",
"closest",
"to",
"a",
"target",
"."
] | train | https://github.com/bachya/pyflunearyou/blob/16a2f839c8df851e925e010a6b5c5708386febac/pyflunearyou/helpers/__init__.py#L5-L7 |
olivier-m/rafter | rafter/blueprints.py | Blueprint.resource | def resource(self, uri, methods=frozenset({'GET'}), host=None,
strict_slashes=None, stream=False, version=None, name=None,
**kwargs):
"""
Create a blueprint resource route from a decorated function.
:param uri: endpoint at which the route will be accessible.
:param methods: list of acceptable HTTP methods.
:param host:
:param strict_slashes:
:param version:
:param name: user defined route name for url_for
:return: function or class instance
Accepts any keyword argument that will be passed to the app resource.
"""
if strict_slashes is None:
strict_slashes = self.strict_slashes
def decorator(handler):
self.resources.append((
FutureRoute(handler, uri, methods, host, strict_slashes,
stream, version, name),
kwargs))
return handler
return decorator | python | def resource(self, uri, methods=frozenset({'GET'}), host=None,
strict_slashes=None, stream=False, version=None, name=None,
**kwargs):
"""
Create a blueprint resource route from a decorated function.
:param uri: endpoint at which the route will be accessible.
:param methods: list of acceptable HTTP methods.
:param host:
:param strict_slashes:
:param version:
:param name: user defined route name for url_for
:return: function or class instance
Accepts any keyword argument that will be passed to the app resource.
"""
if strict_slashes is None:
strict_slashes = self.strict_slashes
def decorator(handler):
self.resources.append((
FutureRoute(handler, uri, methods, host, strict_slashes,
stream, version, name),
kwargs))
return handler
return decorator | [
"def",
"resource",
"(",
"self",
",",
"uri",
",",
"methods",
"=",
"frozenset",
"(",
"{",
"'GET'",
"}",
")",
",",
"host",
"=",
"None",
",",
"strict_slashes",
"=",
"None",
",",
"stream",
"=",
"False",
",",
"version",
"=",
"None",
",",
"name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"strict_slashes",
"is",
"None",
":",
"strict_slashes",
"=",
"self",
".",
"strict_slashes",
"def",
"decorator",
"(",
"handler",
")",
":",
"self",
".",
"resources",
".",
"append",
"(",
"(",
"FutureRoute",
"(",
"handler",
",",
"uri",
",",
"methods",
",",
"host",
",",
"strict_slashes",
",",
"stream",
",",
"version",
",",
"name",
")",
",",
"kwargs",
")",
")",
"return",
"handler",
"return",
"decorator"
] | Create a blueprint resource route from a decorated function.
:param uri: endpoint at which the route will be accessible.
:param methods: list of acceptable HTTP methods.
:param host:
:param strict_slashes:
:param version:
:param name: user defined route name for url_for
:return: function or class instance
Accepts any keyword argument that will be passed to the app resource. | [
"Create",
"a",
"blueprint",
"resource",
"route",
"from",
"a",
"decorated",
"function",
"."
] | train | https://github.com/olivier-m/rafter/blob/aafcf8fd019f24abcf519307c4484cc6b4697c04/rafter/blueprints.py#L46-L72 |
olivier-m/rafter | rafter/blueprints.py | Blueprint.add_resource | def add_resource(self, handler, uri, methods=frozenset({'GET'}),
host=None, strict_slashes=None, version=None, name=None,
**kwargs):
"""
Create a blueprint resource route from a function.
:param uri: endpoint at which the route will be accessible.
:param methods: list of acceptable HTTP methods.
:param host:
:param strict_slashes:
:param version:
:param name: user defined route name for url_for
:return: function or class instance
Accepts any keyword argument that will be passed to the app resource.
"""
self.resource(uri=uri, methods=methods, host=host,
strict_slashes=strict_slashes, version=version,
name=name, **kwargs)(handler) | python | def add_resource(self, handler, uri, methods=frozenset({'GET'}),
host=None, strict_slashes=None, version=None, name=None,
**kwargs):
"""
Create a blueprint resource route from a function.
:param uri: endpoint at which the route will be accessible.
:param methods: list of acceptable HTTP methods.
:param host:
:param strict_slashes:
:param version:
:param name: user defined route name for url_for
:return: function or class instance
Accepts any keyword argument that will be passed to the app resource.
"""
self.resource(uri=uri, methods=methods, host=host,
strict_slashes=strict_slashes, version=version,
name=name, **kwargs)(handler) | [
"def",
"add_resource",
"(",
"self",
",",
"handler",
",",
"uri",
",",
"methods",
"=",
"frozenset",
"(",
"{",
"'GET'",
"}",
")",
",",
"host",
"=",
"None",
",",
"strict_slashes",
"=",
"None",
",",
"version",
"=",
"None",
",",
"name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"resource",
"(",
"uri",
"=",
"uri",
",",
"methods",
"=",
"methods",
",",
"host",
"=",
"host",
",",
"strict_slashes",
"=",
"strict_slashes",
",",
"version",
"=",
"version",
",",
"name",
"=",
"name",
",",
"*",
"*",
"kwargs",
")",
"(",
"handler",
")"
] | Create a blueprint resource route from a function.
:param uri: endpoint at which the route will be accessible.
:param methods: list of acceptable HTTP methods.
:param host:
:param strict_slashes:
:param version:
:param name: user defined route name for url_for
:return: function or class instance
Accepts any keyword argument that will be passed to the app resource. | [
"Create",
"a",
"blueprint",
"resource",
"route",
"from",
"a",
"function",
"."
] | train | https://github.com/olivier-m/rafter/blob/aafcf8fd019f24abcf519307c4484cc6b4697c04/rafter/blueprints.py#L74-L92 |
lvieirajr/mongorest | mongorest/database.py | _get_db | def _get_db():
"""
Returns the connection to the database using the settings.
This function should not be called outside of this file.
Use db instead.
"""
from .settings import settings
mongo = settings.MONGODB
if 'URI' in mongo and mongo['URI']:
uri = mongo['URI']
else:
uri = 'mongodb://'
if all(mongo.get(key) for key in ('USERNAME', 'PASSWORD')):
uri += '{0}:{1}@'.format(mongo['USERNAME'], mongo['PASSWORD'])
if 'HOSTS' in mongo and mongo['HOSTS']:
uri += ','.join(
'{0}:{1}'.format(host, port)
for (host, port) in zip(mongo['HOSTS'], mongo['PORTS']),
)
else:
uri += '{0}:{1}'.format(mongo['HOST'], mongo.get('PORT', 27017))
uri += '/' + mongo['DATABASE']
if 'OPTIONS' in mongo and mongo['OPTIONS']:
uri += '?{0}'.format('&'.join(mongo['OPTIONS']))
client = ConnectionFailureProxy(MongoClient(uri, connect=False))
database = client[parse_uri(uri)['database']]
return database | python | def _get_db():
"""
Returns the connection to the database using the settings.
This function should not be called outside of this file.
Use db instead.
"""
from .settings import settings
mongo = settings.MONGODB
if 'URI' in mongo and mongo['URI']:
uri = mongo['URI']
else:
uri = 'mongodb://'
if all(mongo.get(key) for key in ('USERNAME', 'PASSWORD')):
uri += '{0}:{1}@'.format(mongo['USERNAME'], mongo['PASSWORD'])
if 'HOSTS' in mongo and mongo['HOSTS']:
uri += ','.join(
'{0}:{1}'.format(host, port)
for (host, port) in zip(mongo['HOSTS'], mongo['PORTS']),
)
else:
uri += '{0}:{1}'.format(mongo['HOST'], mongo.get('PORT', 27017))
uri += '/' + mongo['DATABASE']
if 'OPTIONS' in mongo and mongo['OPTIONS']:
uri += '?{0}'.format('&'.join(mongo['OPTIONS']))
client = ConnectionFailureProxy(MongoClient(uri, connect=False))
database = client[parse_uri(uri)['database']]
return database | [
"def",
"_get_db",
"(",
")",
":",
"from",
".",
"settings",
"import",
"settings",
"mongo",
"=",
"settings",
".",
"MONGODB",
"if",
"'URI'",
"in",
"mongo",
"and",
"mongo",
"[",
"'URI'",
"]",
":",
"uri",
"=",
"mongo",
"[",
"'URI'",
"]",
"else",
":",
"uri",
"=",
"'mongodb://'",
"if",
"all",
"(",
"mongo",
".",
"get",
"(",
"key",
")",
"for",
"key",
"in",
"(",
"'USERNAME'",
",",
"'PASSWORD'",
")",
")",
":",
"uri",
"+=",
"'{0}:{1}@'",
".",
"format",
"(",
"mongo",
"[",
"'USERNAME'",
"]",
",",
"mongo",
"[",
"'PASSWORD'",
"]",
")",
"if",
"'HOSTS'",
"in",
"mongo",
"and",
"mongo",
"[",
"'HOSTS'",
"]",
":",
"uri",
"+=",
"','",
".",
"join",
"(",
"'{0}:{1}'",
".",
"format",
"(",
"host",
",",
"port",
")",
"for",
"(",
"host",
",",
"port",
")",
"in",
"zip",
"(",
"mongo",
"[",
"'HOSTS'",
"]",
",",
"mongo",
"[",
"'PORTS'",
"]",
")",
",",
")",
"else",
":",
"uri",
"+=",
"'{0}:{1}'",
".",
"format",
"(",
"mongo",
"[",
"'HOST'",
"]",
",",
"mongo",
".",
"get",
"(",
"'PORT'",
",",
"27017",
")",
")",
"uri",
"+=",
"'/'",
"+",
"mongo",
"[",
"'DATABASE'",
"]",
"if",
"'OPTIONS'",
"in",
"mongo",
"and",
"mongo",
"[",
"'OPTIONS'",
"]",
":",
"uri",
"+=",
"'?{0}'",
".",
"format",
"(",
"'&'",
".",
"join",
"(",
"mongo",
"[",
"'OPTIONS'",
"]",
")",
")",
"client",
"=",
"ConnectionFailureProxy",
"(",
"MongoClient",
"(",
"uri",
",",
"connect",
"=",
"False",
")",
")",
"database",
"=",
"client",
"[",
"parse_uri",
"(",
"uri",
")",
"[",
"'database'",
"]",
"]",
"return",
"database"
] | Returns the connection to the database using the settings.
This function should not be called outside of this file.
Use db instead. | [
"Returns",
"the",
"connection",
"to",
"the",
"database",
"using",
"the",
"settings",
".",
"This",
"function",
"should",
"not",
"be",
"called",
"outside",
"of",
"this",
"file",
".",
"Use",
"db",
"instead",
"."
] | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/database.py#L91-L124 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_calcR2_getBetas.py | getBetas | def getBetas(idxPrc,
aryPrfTc,
lstAllMdlInd,
aryFuncChnk,
aryBstIndChnk,
betaSw,
queOut):
"""Calculate voxel betas and R^2 for the best model.
Parameters
----------
idxPrc : TODO
(?)
aryPrfTc : np.array, shape (?)
Population receptive field time courses.
lstAllMdlInd : list
List of the indices of all models.
aryFuncChnk : TODO
Chunk of something(?)
aryBstIndChnk : np.array, shape (?)
Points for every voxel to the index of the best model
betaSw : str, iterator, or np.array, shape (?)
Best beta correlation coefficients found in training.
queOut : TODO
Queue output (?)
Notes
-----
This is done after fitting with cross validation, since during the
fitting process, we never fit the model to the entire data.
"""
# get number of motion directions
varNumMtnDrctns = aryPrfTc.shape[3]
varNumVoxChnk = aryBstIndChnk.shape[0]
# prepare array for best beta weights
if type(betaSw) is sklearn.model_selection._split.KFold:
aryEstimMtnCrvTrn = np.zeros((varNumVoxChnk, varNumMtnDrctns,
betaSw.get_n_splits()), dtype='float32')
aryEstimMtnCrvTst = np.zeros((varNumVoxChnk, varNumMtnDrctns,
betaSw.get_n_splits()), dtype='float32')
resTrn = np.zeros((varNumVoxChnk, betaSw.get_n_splits()),
dtype='float32')
resTst = np.zeros((varNumVoxChnk, betaSw.get_n_splits()),
dtype='float32')
aryErrorTrn = np.zeros((varNumVoxChnk), dtype='float32')
aryErrorTst = np.zeros((varNumVoxChnk), dtype='float32')
contrast = np.array([
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
])
denomTrn = np.zeros((varNumVoxChnk, betaSw.get_n_splits(),
len(contrast)), dtype='float32')
denomTst = np.zeros((varNumVoxChnk, betaSw.get_n_splits(),
len(contrast)), dtype='float32')
elif type(betaSw) is np.ndarray and betaSw.dtype == 'bool':
aryEstimMtnCrvTrn = np.zeros((varNumVoxChnk, varNumMtnDrctns,
), dtype='float32')
aryEstimMtnCrvTst = np.zeros((varNumVoxChnk, varNumMtnDrctns,
), dtype='float32')
resTrn = np.zeros((varNumVoxChnk),
dtype='float32')
resTst = np.zeros((varNumVoxChnk),
dtype='float32')
aryErrorTrn = np.zeros((varNumVoxChnk), dtype='float32')
aryErrorTst = np.zeros((varNumVoxChnk), dtype='float32')
contrast = np.array([
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
])
denomTrn = np.zeros((varNumVoxChnk,
len(contrast)), dtype='float32')
denomTst = np.zeros((varNumVoxChnk,
len(contrast)), dtype='float32')
else:
aryEstimMtnCrv = np.zeros((varNumVoxChnk, varNumMtnDrctns),
dtype='float32')
# prepare array for best residuals
vecBstRes = np.zeros(varNumVoxChnk, dtype='float32')
vecBstRes[:] = np.inf
# prepare counter to check that every voxel is matched to one winner mdl
vecLgcCounter = np.zeros(varNumVoxChnk, dtype='float32')
# We reshape the voxel time courses, so that time goes down the column
aryFuncChnk = aryFuncChnk.T
# Change type to float 32:
aryFuncChnk = aryFuncChnk.astype(np.float32)
aryPrfTc = aryPrfTc.astype(np.float32)
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 0:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Number of pRF models to fit:
varNumMdls = len(lstAllMdlInd)
# Vector with pRF values at which to give status feedback:
vecStatPrf = np.linspace(0,
varNumMdls,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrf = np.ceil(vecStatPrf)
vecStatPrf = vecStatPrf.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrc = np.ceil(vecStatPrc)
vecStatPrc = vecStatPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through pRF models:
for idx, mdlInd in enumerate(lstAllMdlInd):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Status indicator:
if varCntSts02 == vecStatPrf[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatPrc[varCntSts01]) +
' % --- ' +
str(vecStatPrf[varCntSts01]) +
' pRF models out of ' +
str(varNumMdls))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# check whether any model had this particular x, y, sigma combination
# as its best model
lgcTemp = [aryBstIndChnk == idx][0]
if np.greater(np.sum(lgcTemp), 0):
# get current design matrix
aryDsgnTmp = aryPrfTc[mdlInd].T
if betaSw is 'train': # training
aryTmpPrmEst, aryTmpRes = np.linalg.lstsq(
aryDsgnTmp, aryFuncChnk[:, lgcTemp])[0:2]
aryEstimMtnCrv[lgcTemp, :] = aryTmpPrmEst.T
vecBstRes[lgcTemp] = aryTmpRes
elif type(betaSw) is np.ndarray and betaSw.dtype == 'float':
# get beta weights for axis of motion tuning curves
aryEstimMtnCrv[lgcTemp, :] = np.linalg.lstsq(
aryDsgnTmp, aryFuncChnk[:, lgcTemp])[0].T
# calculate prediction
aryPredTc = np.dot(aryDsgnTmp,
betaSw[lgcTemp, :].T)
# Sum of squares:
vecBstRes[lgcTemp] = np.sum((aryFuncChnk[:, lgcTemp] -
aryPredTc) ** 2, axis=0)
elif type(betaSw) is np.ndarray and betaSw.dtype == 'bool':
# get beta weights for training
betas, resTrn[lgcTemp] = np.linalg.lstsq(
aryDsgnTmp[betaSw, :], aryFuncChnk[betaSw][:, lgcTemp])[0:2]
aryEstimMtnCrvTrn[lgcTemp, :] = betas.T
# get beta weights for validation
betas, resTst[lgcTemp] = np.linalg.lstsq(
aryDsgnTmp[~betaSw, :], aryFuncChnk[~betaSw][:, lgcTemp])[0:2]
aryEstimMtnCrvTrn[lgcTemp, :] = betas.T
# calculate CC for training
aryCcTrn = np.linalg.pinv(
np.dot(aryDsgnTmp[betaSw, :].T,
aryDsgnTmp[betaSw, :]))
aryCcTst = np.linalg.pinv(
np.dot(aryDsgnTmp[~betaSw, :].T,
aryDsgnTmp[~betaSw, :]))
# calculate Error for training
aryErrorTrn[lgcTemp] = np.var(
np.subtract(aryFuncChnk[betaSw][:, lgcTemp],
np.dot(aryDsgnTmp[betaSw, :],
aryEstimMtnCrvTrn[lgcTemp, :].T)), axis=0)
# calculate Error for test
aryErrorTst[lgcTemp] = np.var(
np.subtract(aryFuncChnk[~betaSw][:, lgcTemp],
np.dot(aryDsgnTmp[~betaSw, :],
aryEstimMtnCrvTst[lgcTemp, :].T)), axis=0)
# calculate denominator for training
for indContr, contr in enumerate(contrast):
denomTrn[lgcTemp, indContr] = np.sqrt(
aryErrorTrn[lgcTemp] * np.dot(
np.dot(contr, aryCcTrn), contr.T))
denomTst[lgcTemp, indContr] = np.sqrt(
aryErrorTst[lgcTemp] * np.dot(
np.dot(contr, aryCcTst), contr.T))
elif type(betaSw) is sklearn.model_selection._split.KFold:
for idxCV, (idxTrn, idxVal) in enumerate(betaSw.split(aryDsgnTmp)):
# get beta weights for training
betas, resTrn[lgcTemp, idxCV] = np.linalg.lstsq(
aryDsgnTmp[idxTrn], aryFuncChnk[idxTrn][:, lgcTemp])[0:2]
aryEstimMtnCrvTrn[lgcTemp, :, idxCV] = betas.T
# get beta weights for validation
betas, resTst[lgcTemp, idxCV] = np.linalg.lstsq(
aryDsgnTmp[idxVal], aryFuncChnk[idxVal][:, lgcTemp])[0:2]
aryEstimMtnCrvTst[lgcTemp, :, idxCV] = betas.T
# calculate CC for training
aryCcTrn = np.linalg.pinv(
np.dot(aryDsgnTmp[idxTrn].T,
aryDsgnTmp[idxTrn]))
aryCcTst = np.linalg.pinv(
np.dot(aryDsgnTmp[idxVal].T,
aryDsgnTmp[idxVal]))
# calculate Error for training
aryErrorTrn[lgcTemp] = np.var(
np.subtract(aryFuncChnk[idxTrn][:, lgcTemp],
np.dot(aryDsgnTmp[idxTrn],
aryEstimMtnCrvTrn[lgcTemp, :, idxCV].T)), axis=0)
# calculate Error for test
aryErrorTst[lgcTemp] = np.var(
np.subtract(aryFuncChnk[idxVal][:, lgcTemp],
np.dot(aryDsgnTmp[idxVal],
aryEstimMtnCrvTst[lgcTemp, :, idxCV].T)), axis=0)
# calculate denominator for training
for indContr, contr in enumerate(contrast):
denomTrn[lgcTemp, idxCV, indContr] = np.sqrt(
aryErrorTrn[lgcTemp] * np.dot(
np.dot(contr, aryCcTrn), contr.T))
denomTst[lgcTemp, idxCV, indContr] = np.sqrt(
aryErrorTst[lgcTemp] * np.dot(
np.dot(contr, aryCcTst), contr.T))
# increase logical counter to verify later that every voxel
# was visited only once
vecLgcCounter[lgcTemp] += 1
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# check that every voxel was visited only once
strErrMsg = ('It looks like at least voxel was revisted more than once. ' +
'Check whether the R2 was calculated correctly')
assert np.sum(vecLgcCounter) == len(vecLgcCounter), strErrMsg
if type(betaSw) is sklearn.model_selection._split.KFold:
# calculate t-values
aryTvalsTrn = np.empty((varNumVoxChnk, contrast.shape[0],
betaSw.get_n_splits()))
aryTvalsTst = np.empty((varNumVoxChnk, contrast.shape[0],
betaSw.get_n_splits()))
for ind1, contr in enumerate(contrast):
for ind2 in range(betaSw.get_n_splits()):
aryTvalsTrn[:, ind1, ind2] = np.divide(
np.dot(contr, aryEstimMtnCrvTrn[:, :, ind2].T),
denomTrn[:, ind2, ind1])
aryTvalsTst[:, ind1, ind2] = np.divide(
np.dot(contr, aryEstimMtnCrvTst[:, :, ind2].T),
denomTst[:, ind2, ind1])
# Output list:
lstOut = [idxPrc,
aryEstimMtnCrvTrn,
aryEstimMtnCrvTst,
aryTvalsTrn,
aryTvalsTst,
]
queOut.put(lstOut)
elif type(betaSw) is np.ndarray and betaSw.dtype == 'bool':
# calculate t-values
aryTvalsTrn = np.empty((varNumVoxChnk, contrast.shape[0],
))
aryTvalsTst = np.empty((varNumVoxChnk, contrast.shape[0],
))
for ind1, contr in enumerate(contrast):
aryTvalsTrn[:, ind1] = np.divide(
np.dot(contr, aryEstimMtnCrvTrn.T),
denomTrn[:, ind1])
aryTvalsTst[:, ind1] = np.divide(
np.dot(contr, aryEstimMtnCrvTst.T),
denomTst[:, ind1])
# Output list:
lstOut = [idxPrc,
aryEstimMtnCrvTrn,
aryEstimMtnCrvTst,
aryTvalsTrn,
aryTvalsTst,
]
queOut.put(lstOut)
else:
# After finding the best fitting model for each voxel, we still have to
# calculate the coefficient of determination (R-squared) for each voxel. We
# start by calculating the total sum of squares (i.e. the deviation of the
# data from the mean). The mean of each time course:
vecFuncMean = np.mean(aryFuncChnk, axis=0)
# Deviation from the mean for each datapoint:
vecFuncDev = np.subtract(aryFuncChnk, vecFuncMean[None, :])
# Sum of squares:
vecSsTot = np.sum(np.power(vecFuncDev,
2.0),
axis=0)
# Coefficient of determination:
vecBstR2 = np.subtract(1.0,
np.divide(vecBstRes,
vecSsTot))
# Output list:
lstOut = [idxPrc,
vecBstR2,
aryEstimMtnCrv]
queOut.put(lstOut) | python | def getBetas(idxPrc,
aryPrfTc,
lstAllMdlInd,
aryFuncChnk,
aryBstIndChnk,
betaSw,
queOut):
"""Calculate voxel betas and R^2 for the best model.
Parameters
----------
idxPrc : TODO
(?)
aryPrfTc : np.array, shape (?)
Population receptive field time courses.
lstAllMdlInd : list
List of the indices of all models.
aryFuncChnk : TODO
Chunk of something(?)
aryBstIndChnk : np.array, shape (?)
Points for every voxel to the index of the best model
betaSw : str, iterator, or np.array, shape (?)
Best beta correlation coefficients found in training.
queOut : TODO
Queue output (?)
Notes
-----
This is done after fitting with cross validation, since during the
fitting process, we never fit the model to the entire data.
"""
# get number of motion directions
varNumMtnDrctns = aryPrfTc.shape[3]
varNumVoxChnk = aryBstIndChnk.shape[0]
# prepare array for best beta weights
if type(betaSw) is sklearn.model_selection._split.KFold:
aryEstimMtnCrvTrn = np.zeros((varNumVoxChnk, varNumMtnDrctns,
betaSw.get_n_splits()), dtype='float32')
aryEstimMtnCrvTst = np.zeros((varNumVoxChnk, varNumMtnDrctns,
betaSw.get_n_splits()), dtype='float32')
resTrn = np.zeros((varNumVoxChnk, betaSw.get_n_splits()),
dtype='float32')
resTst = np.zeros((varNumVoxChnk, betaSw.get_n_splits()),
dtype='float32')
aryErrorTrn = np.zeros((varNumVoxChnk), dtype='float32')
aryErrorTst = np.zeros((varNumVoxChnk), dtype='float32')
contrast = np.array([
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
])
denomTrn = np.zeros((varNumVoxChnk, betaSw.get_n_splits(),
len(contrast)), dtype='float32')
denomTst = np.zeros((varNumVoxChnk, betaSw.get_n_splits(),
len(contrast)), dtype='float32')
elif type(betaSw) is np.ndarray and betaSw.dtype == 'bool':
aryEstimMtnCrvTrn = np.zeros((varNumVoxChnk, varNumMtnDrctns,
), dtype='float32')
aryEstimMtnCrvTst = np.zeros((varNumVoxChnk, varNumMtnDrctns,
), dtype='float32')
resTrn = np.zeros((varNumVoxChnk),
dtype='float32')
resTst = np.zeros((varNumVoxChnk),
dtype='float32')
aryErrorTrn = np.zeros((varNumVoxChnk), dtype='float32')
aryErrorTst = np.zeros((varNumVoxChnk), dtype='float32')
contrast = np.array([
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
])
denomTrn = np.zeros((varNumVoxChnk,
len(contrast)), dtype='float32')
denomTst = np.zeros((varNumVoxChnk,
len(contrast)), dtype='float32')
else:
aryEstimMtnCrv = np.zeros((varNumVoxChnk, varNumMtnDrctns),
dtype='float32')
# prepare array for best residuals
vecBstRes = np.zeros(varNumVoxChnk, dtype='float32')
vecBstRes[:] = np.inf
# prepare counter to check that every voxel is matched to one winner mdl
vecLgcCounter = np.zeros(varNumVoxChnk, dtype='float32')
# We reshape the voxel time courses, so that time goes down the column
aryFuncChnk = aryFuncChnk.T
# Change type to float 32:
aryFuncChnk = aryFuncChnk.astype(np.float32)
aryPrfTc = aryPrfTc.astype(np.float32)
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 0:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Number of pRF models to fit:
varNumMdls = len(lstAllMdlInd)
# Vector with pRF values at which to give status feedback:
vecStatPrf = np.linspace(0,
varNumMdls,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrf = np.ceil(vecStatPrf)
vecStatPrf = vecStatPrf.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrc = np.ceil(vecStatPrc)
vecStatPrc = vecStatPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through pRF models:
for idx, mdlInd in enumerate(lstAllMdlInd):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Status indicator:
if varCntSts02 == vecStatPrf[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatPrc[varCntSts01]) +
' % --- ' +
str(vecStatPrf[varCntSts01]) +
' pRF models out of ' +
str(varNumMdls))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# check whether any model had this particular x, y, sigma combination
# as its best model
lgcTemp = [aryBstIndChnk == idx][0]
if np.greater(np.sum(lgcTemp), 0):
# get current design matrix
aryDsgnTmp = aryPrfTc[mdlInd].T
if betaSw is 'train': # training
aryTmpPrmEst, aryTmpRes = np.linalg.lstsq(
aryDsgnTmp, aryFuncChnk[:, lgcTemp])[0:2]
aryEstimMtnCrv[lgcTemp, :] = aryTmpPrmEst.T
vecBstRes[lgcTemp] = aryTmpRes
elif type(betaSw) is np.ndarray and betaSw.dtype == 'float':
# get beta weights for axis of motion tuning curves
aryEstimMtnCrv[lgcTemp, :] = np.linalg.lstsq(
aryDsgnTmp, aryFuncChnk[:, lgcTemp])[0].T
# calculate prediction
aryPredTc = np.dot(aryDsgnTmp,
betaSw[lgcTemp, :].T)
# Sum of squares:
vecBstRes[lgcTemp] = np.sum((aryFuncChnk[:, lgcTemp] -
aryPredTc) ** 2, axis=0)
elif type(betaSw) is np.ndarray and betaSw.dtype == 'bool':
# get beta weights for training
betas, resTrn[lgcTemp] = np.linalg.lstsq(
aryDsgnTmp[betaSw, :], aryFuncChnk[betaSw][:, lgcTemp])[0:2]
aryEstimMtnCrvTrn[lgcTemp, :] = betas.T
# get beta weights for validation
betas, resTst[lgcTemp] = np.linalg.lstsq(
aryDsgnTmp[~betaSw, :], aryFuncChnk[~betaSw][:, lgcTemp])[0:2]
aryEstimMtnCrvTrn[lgcTemp, :] = betas.T
# calculate CC for training
aryCcTrn = np.linalg.pinv(
np.dot(aryDsgnTmp[betaSw, :].T,
aryDsgnTmp[betaSw, :]))
aryCcTst = np.linalg.pinv(
np.dot(aryDsgnTmp[~betaSw, :].T,
aryDsgnTmp[~betaSw, :]))
# calculate Error for training
aryErrorTrn[lgcTemp] = np.var(
np.subtract(aryFuncChnk[betaSw][:, lgcTemp],
np.dot(aryDsgnTmp[betaSw, :],
aryEstimMtnCrvTrn[lgcTemp, :].T)), axis=0)
# calculate Error for test
aryErrorTst[lgcTemp] = np.var(
np.subtract(aryFuncChnk[~betaSw][:, lgcTemp],
np.dot(aryDsgnTmp[~betaSw, :],
aryEstimMtnCrvTst[lgcTemp, :].T)), axis=0)
# calculate denominator for training
for indContr, contr in enumerate(contrast):
denomTrn[lgcTemp, indContr] = np.sqrt(
aryErrorTrn[lgcTemp] * np.dot(
np.dot(contr, aryCcTrn), contr.T))
denomTst[lgcTemp, indContr] = np.sqrt(
aryErrorTst[lgcTemp] * np.dot(
np.dot(contr, aryCcTst), contr.T))
elif type(betaSw) is sklearn.model_selection._split.KFold:
for idxCV, (idxTrn, idxVal) in enumerate(betaSw.split(aryDsgnTmp)):
# get beta weights for training
betas, resTrn[lgcTemp, idxCV] = np.linalg.lstsq(
aryDsgnTmp[idxTrn], aryFuncChnk[idxTrn][:, lgcTemp])[0:2]
aryEstimMtnCrvTrn[lgcTemp, :, idxCV] = betas.T
# get beta weights for validation
betas, resTst[lgcTemp, idxCV] = np.linalg.lstsq(
aryDsgnTmp[idxVal], aryFuncChnk[idxVal][:, lgcTemp])[0:2]
aryEstimMtnCrvTst[lgcTemp, :, idxCV] = betas.T
# calculate CC for training
aryCcTrn = np.linalg.pinv(
np.dot(aryDsgnTmp[idxTrn].T,
aryDsgnTmp[idxTrn]))
aryCcTst = np.linalg.pinv(
np.dot(aryDsgnTmp[idxVal].T,
aryDsgnTmp[idxVal]))
# calculate Error for training
aryErrorTrn[lgcTemp] = np.var(
np.subtract(aryFuncChnk[idxTrn][:, lgcTemp],
np.dot(aryDsgnTmp[idxTrn],
aryEstimMtnCrvTrn[lgcTemp, :, idxCV].T)), axis=0)
# calculate Error for test
aryErrorTst[lgcTemp] = np.var(
np.subtract(aryFuncChnk[idxVal][:, lgcTemp],
np.dot(aryDsgnTmp[idxVal],
aryEstimMtnCrvTst[lgcTemp, :, idxCV].T)), axis=0)
# calculate denominator for training
for indContr, contr in enumerate(contrast):
denomTrn[lgcTemp, idxCV, indContr] = np.sqrt(
aryErrorTrn[lgcTemp] * np.dot(
np.dot(contr, aryCcTrn), contr.T))
denomTst[lgcTemp, idxCV, indContr] = np.sqrt(
aryErrorTst[lgcTemp] * np.dot(
np.dot(contr, aryCcTst), contr.T))
# increase logical counter to verify later that every voxel
# was visited only once
vecLgcCounter[lgcTemp] += 1
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# check that every voxel was visited only once
strErrMsg = ('It looks like at least voxel was revisted more than once. ' +
'Check whether the R2 was calculated correctly')
assert np.sum(vecLgcCounter) == len(vecLgcCounter), strErrMsg
if type(betaSw) is sklearn.model_selection._split.KFold:
# calculate t-values
aryTvalsTrn = np.empty((varNumVoxChnk, contrast.shape[0],
betaSw.get_n_splits()))
aryTvalsTst = np.empty((varNumVoxChnk, contrast.shape[0],
betaSw.get_n_splits()))
for ind1, contr in enumerate(contrast):
for ind2 in range(betaSw.get_n_splits()):
aryTvalsTrn[:, ind1, ind2] = np.divide(
np.dot(contr, aryEstimMtnCrvTrn[:, :, ind2].T),
denomTrn[:, ind2, ind1])
aryTvalsTst[:, ind1, ind2] = np.divide(
np.dot(contr, aryEstimMtnCrvTst[:, :, ind2].T),
denomTst[:, ind2, ind1])
# Output list:
lstOut = [idxPrc,
aryEstimMtnCrvTrn,
aryEstimMtnCrvTst,
aryTvalsTrn,
aryTvalsTst,
]
queOut.put(lstOut)
elif type(betaSw) is np.ndarray and betaSw.dtype == 'bool':
# calculate t-values
aryTvalsTrn = np.empty((varNumVoxChnk, contrast.shape[0],
))
aryTvalsTst = np.empty((varNumVoxChnk, contrast.shape[0],
))
for ind1, contr in enumerate(contrast):
aryTvalsTrn[:, ind1] = np.divide(
np.dot(contr, aryEstimMtnCrvTrn.T),
denomTrn[:, ind1])
aryTvalsTst[:, ind1] = np.divide(
np.dot(contr, aryEstimMtnCrvTst.T),
denomTst[:, ind1])
# Output list:
lstOut = [idxPrc,
aryEstimMtnCrvTrn,
aryEstimMtnCrvTst,
aryTvalsTrn,
aryTvalsTst,
]
queOut.put(lstOut)
else:
# After finding the best fitting model for each voxel, we still have to
# calculate the coefficient of determination (R-squared) for each voxel. We
# start by calculating the total sum of squares (i.e. the deviation of the
# data from the mean). The mean of each time course:
vecFuncMean = np.mean(aryFuncChnk, axis=0)
# Deviation from the mean for each datapoint:
vecFuncDev = np.subtract(aryFuncChnk, vecFuncMean[None, :])
# Sum of squares:
vecSsTot = np.sum(np.power(vecFuncDev,
2.0),
axis=0)
# Coefficient of determination:
vecBstR2 = np.subtract(1.0,
np.divide(vecBstRes,
vecSsTot))
# Output list:
lstOut = [idxPrc,
vecBstR2,
aryEstimMtnCrv]
queOut.put(lstOut) | [
"def",
"getBetas",
"(",
"idxPrc",
",",
"aryPrfTc",
",",
"lstAllMdlInd",
",",
"aryFuncChnk",
",",
"aryBstIndChnk",
",",
"betaSw",
",",
"queOut",
")",
":",
"# get number of motion directions",
"varNumMtnDrctns",
"=",
"aryPrfTc",
".",
"shape",
"[",
"3",
"]",
"varNumVoxChnk",
"=",
"aryBstIndChnk",
".",
"shape",
"[",
"0",
"]",
"# prepare array for best beta weights",
"if",
"type",
"(",
"betaSw",
")",
"is",
"sklearn",
".",
"model_selection",
".",
"_split",
".",
"KFold",
":",
"aryEstimMtnCrvTrn",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"varNumMtnDrctns",
",",
"betaSw",
".",
"get_n_splits",
"(",
")",
")",
",",
"dtype",
"=",
"'float32'",
")",
"aryEstimMtnCrvTst",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"varNumMtnDrctns",
",",
"betaSw",
".",
"get_n_splits",
"(",
")",
")",
",",
"dtype",
"=",
"'float32'",
")",
"resTrn",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"betaSw",
".",
"get_n_splits",
"(",
")",
")",
",",
"dtype",
"=",
"'float32'",
")",
"resTst",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"betaSw",
".",
"get_n_splits",
"(",
")",
")",
",",
"dtype",
"=",
"'float32'",
")",
"aryErrorTrn",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
")",
",",
"dtype",
"=",
"'float32'",
")",
"aryErrorTst",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
")",
",",
"dtype",
"=",
"'float32'",
")",
"contrast",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"1",
",",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"1",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"0",
",",
"1",
",",
"0",
"]",
",",
"]",
")",
"denomTrn",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"betaSw",
".",
"get_n_splits",
"(",
")",
",",
"len",
"(",
"contrast",
")",
")",
",",
"dtype",
"=",
"'float32'",
")",
"denomTst",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"betaSw",
".",
"get_n_splits",
"(",
")",
",",
"len",
"(",
"contrast",
")",
")",
",",
"dtype",
"=",
"'float32'",
")",
"elif",
"type",
"(",
"betaSw",
")",
"is",
"np",
".",
"ndarray",
"and",
"betaSw",
".",
"dtype",
"==",
"'bool'",
":",
"aryEstimMtnCrvTrn",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"varNumMtnDrctns",
",",
")",
",",
"dtype",
"=",
"'float32'",
")",
"aryEstimMtnCrvTst",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"varNumMtnDrctns",
",",
")",
",",
"dtype",
"=",
"'float32'",
")",
"resTrn",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
")",
",",
"dtype",
"=",
"'float32'",
")",
"resTst",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
")",
",",
"dtype",
"=",
"'float32'",
")",
"aryErrorTrn",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
")",
",",
"dtype",
"=",
"'float32'",
")",
"aryErrorTst",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
")",
",",
"dtype",
"=",
"'float32'",
")",
"contrast",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"1",
",",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"1",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"0",
",",
"1",
",",
"0",
"]",
",",
"]",
")",
"denomTrn",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"len",
"(",
"contrast",
")",
")",
",",
"dtype",
"=",
"'float32'",
")",
"denomTst",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"len",
"(",
"contrast",
")",
")",
",",
"dtype",
"=",
"'float32'",
")",
"else",
":",
"aryEstimMtnCrv",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"varNumMtnDrctns",
")",
",",
"dtype",
"=",
"'float32'",
")",
"# prepare array for best residuals",
"vecBstRes",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
",",
"dtype",
"=",
"'float32'",
")",
"vecBstRes",
"[",
":",
"]",
"=",
"np",
".",
"inf",
"# prepare counter to check that every voxel is matched to one winner mdl",
"vecLgcCounter",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
",",
"dtype",
"=",
"'float32'",
")",
"# We reshape the voxel time courses, so that time goes down the column",
"aryFuncChnk",
"=",
"aryFuncChnk",
".",
"T",
"# Change type to float 32:",
"aryFuncChnk",
"=",
"aryFuncChnk",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"aryPrfTc",
"=",
"aryPrfTc",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"# Prepare status indicator if this is the first of the parallel processes:",
"if",
"idxPrc",
"==",
"0",
":",
"# We create a status indicator for the time consuming pRF model finding",
"# algorithm. Number of steps of the status indicator:",
"varStsStpSze",
"=",
"20",
"# Number of pRF models to fit:",
"varNumMdls",
"=",
"len",
"(",
"lstAllMdlInd",
")",
"# Vector with pRF values at which to give status feedback:",
"vecStatPrf",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"varNumMdls",
",",
"num",
"=",
"(",
"varStsStpSze",
"+",
"1",
")",
",",
"endpoint",
"=",
"True",
")",
"vecStatPrf",
"=",
"np",
".",
"ceil",
"(",
"vecStatPrf",
")",
"vecStatPrf",
"=",
"vecStatPrf",
".",
"astype",
"(",
"int",
")",
"# Vector with corresponding percentage values at which to give status",
"# feedback:",
"vecStatPrc",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"100",
",",
"num",
"=",
"(",
"varStsStpSze",
"+",
"1",
")",
",",
"endpoint",
"=",
"True",
")",
"vecStatPrc",
"=",
"np",
".",
"ceil",
"(",
"vecStatPrc",
")",
"vecStatPrc",
"=",
"vecStatPrc",
".",
"astype",
"(",
"int",
")",
"# Counter for status indicator:",
"varCntSts01",
"=",
"0",
"varCntSts02",
"=",
"0",
"# Loop through pRF models:",
"for",
"idx",
",",
"mdlInd",
"in",
"enumerate",
"(",
"lstAllMdlInd",
")",
":",
"# Status indicator (only used in the first of the parallel",
"# processes):",
"if",
"idxPrc",
"==",
"0",
":",
"# Status indicator:",
"if",
"varCntSts02",
"==",
"vecStatPrf",
"[",
"varCntSts01",
"]",
":",
"# Prepare status message:",
"strStsMsg",
"=",
"(",
"'---------Progress: '",
"+",
"str",
"(",
"vecStatPrc",
"[",
"varCntSts01",
"]",
")",
"+",
"' % --- '",
"+",
"str",
"(",
"vecStatPrf",
"[",
"varCntSts01",
"]",
")",
"+",
"' pRF models out of '",
"+",
"str",
"(",
"varNumMdls",
")",
")",
"print",
"(",
"strStsMsg",
")",
"# Only increment counter if the last value has not been",
"# reached yet:",
"if",
"varCntSts01",
"<",
"varStsStpSze",
":",
"varCntSts01",
"=",
"varCntSts01",
"+",
"int",
"(",
"1",
")",
"# check whether any model had this particular x, y, sigma combination",
"# as its best model",
"lgcTemp",
"=",
"[",
"aryBstIndChnk",
"==",
"idx",
"]",
"[",
"0",
"]",
"if",
"np",
".",
"greater",
"(",
"np",
".",
"sum",
"(",
"lgcTemp",
")",
",",
"0",
")",
":",
"# get current design matrix",
"aryDsgnTmp",
"=",
"aryPrfTc",
"[",
"mdlInd",
"]",
".",
"T",
"if",
"betaSw",
"is",
"'train'",
":",
"# training",
"aryTmpPrmEst",
",",
"aryTmpRes",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"aryDsgnTmp",
",",
"aryFuncChnk",
"[",
":",
",",
"lgcTemp",
"]",
")",
"[",
"0",
":",
"2",
"]",
"aryEstimMtnCrv",
"[",
"lgcTemp",
",",
":",
"]",
"=",
"aryTmpPrmEst",
".",
"T",
"vecBstRes",
"[",
"lgcTemp",
"]",
"=",
"aryTmpRes",
"elif",
"type",
"(",
"betaSw",
")",
"is",
"np",
".",
"ndarray",
"and",
"betaSw",
".",
"dtype",
"==",
"'float'",
":",
"# get beta weights for axis of motion tuning curves",
"aryEstimMtnCrv",
"[",
"lgcTemp",
",",
":",
"]",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"aryDsgnTmp",
",",
"aryFuncChnk",
"[",
":",
",",
"lgcTemp",
"]",
")",
"[",
"0",
"]",
".",
"T",
"# calculate prediction",
"aryPredTc",
"=",
"np",
".",
"dot",
"(",
"aryDsgnTmp",
",",
"betaSw",
"[",
"lgcTemp",
",",
":",
"]",
".",
"T",
")",
"# Sum of squares:",
"vecBstRes",
"[",
"lgcTemp",
"]",
"=",
"np",
".",
"sum",
"(",
"(",
"aryFuncChnk",
"[",
":",
",",
"lgcTemp",
"]",
"-",
"aryPredTc",
")",
"**",
"2",
",",
"axis",
"=",
"0",
")",
"elif",
"type",
"(",
"betaSw",
")",
"is",
"np",
".",
"ndarray",
"and",
"betaSw",
".",
"dtype",
"==",
"'bool'",
":",
"# get beta weights for training",
"betas",
",",
"resTrn",
"[",
"lgcTemp",
"]",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"aryDsgnTmp",
"[",
"betaSw",
",",
":",
"]",
",",
"aryFuncChnk",
"[",
"betaSw",
"]",
"[",
":",
",",
"lgcTemp",
"]",
")",
"[",
"0",
":",
"2",
"]",
"aryEstimMtnCrvTrn",
"[",
"lgcTemp",
",",
":",
"]",
"=",
"betas",
".",
"T",
"# get beta weights for validation",
"betas",
",",
"resTst",
"[",
"lgcTemp",
"]",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"aryDsgnTmp",
"[",
"~",
"betaSw",
",",
":",
"]",
",",
"aryFuncChnk",
"[",
"~",
"betaSw",
"]",
"[",
":",
",",
"lgcTemp",
"]",
")",
"[",
"0",
":",
"2",
"]",
"aryEstimMtnCrvTrn",
"[",
"lgcTemp",
",",
":",
"]",
"=",
"betas",
".",
"T",
"# calculate CC for training",
"aryCcTrn",
"=",
"np",
".",
"linalg",
".",
"pinv",
"(",
"np",
".",
"dot",
"(",
"aryDsgnTmp",
"[",
"betaSw",
",",
":",
"]",
".",
"T",
",",
"aryDsgnTmp",
"[",
"betaSw",
",",
":",
"]",
")",
")",
"aryCcTst",
"=",
"np",
".",
"linalg",
".",
"pinv",
"(",
"np",
".",
"dot",
"(",
"aryDsgnTmp",
"[",
"~",
"betaSw",
",",
":",
"]",
".",
"T",
",",
"aryDsgnTmp",
"[",
"~",
"betaSw",
",",
":",
"]",
")",
")",
"# calculate Error for training",
"aryErrorTrn",
"[",
"lgcTemp",
"]",
"=",
"np",
".",
"var",
"(",
"np",
".",
"subtract",
"(",
"aryFuncChnk",
"[",
"betaSw",
"]",
"[",
":",
",",
"lgcTemp",
"]",
",",
"np",
".",
"dot",
"(",
"aryDsgnTmp",
"[",
"betaSw",
",",
":",
"]",
",",
"aryEstimMtnCrvTrn",
"[",
"lgcTemp",
",",
":",
"]",
".",
"T",
")",
")",
",",
"axis",
"=",
"0",
")",
"# calculate Error for test",
"aryErrorTst",
"[",
"lgcTemp",
"]",
"=",
"np",
".",
"var",
"(",
"np",
".",
"subtract",
"(",
"aryFuncChnk",
"[",
"~",
"betaSw",
"]",
"[",
":",
",",
"lgcTemp",
"]",
",",
"np",
".",
"dot",
"(",
"aryDsgnTmp",
"[",
"~",
"betaSw",
",",
":",
"]",
",",
"aryEstimMtnCrvTst",
"[",
"lgcTemp",
",",
":",
"]",
".",
"T",
")",
")",
",",
"axis",
"=",
"0",
")",
"# calculate denominator for training",
"for",
"indContr",
",",
"contr",
"in",
"enumerate",
"(",
"contrast",
")",
":",
"denomTrn",
"[",
"lgcTemp",
",",
"indContr",
"]",
"=",
"np",
".",
"sqrt",
"(",
"aryErrorTrn",
"[",
"lgcTemp",
"]",
"*",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"contr",
",",
"aryCcTrn",
")",
",",
"contr",
".",
"T",
")",
")",
"denomTst",
"[",
"lgcTemp",
",",
"indContr",
"]",
"=",
"np",
".",
"sqrt",
"(",
"aryErrorTst",
"[",
"lgcTemp",
"]",
"*",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"contr",
",",
"aryCcTst",
")",
",",
"contr",
".",
"T",
")",
")",
"elif",
"type",
"(",
"betaSw",
")",
"is",
"sklearn",
".",
"model_selection",
".",
"_split",
".",
"KFold",
":",
"for",
"idxCV",
",",
"(",
"idxTrn",
",",
"idxVal",
")",
"in",
"enumerate",
"(",
"betaSw",
".",
"split",
"(",
"aryDsgnTmp",
")",
")",
":",
"# get beta weights for training",
"betas",
",",
"resTrn",
"[",
"lgcTemp",
",",
"idxCV",
"]",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"aryDsgnTmp",
"[",
"idxTrn",
"]",
",",
"aryFuncChnk",
"[",
"idxTrn",
"]",
"[",
":",
",",
"lgcTemp",
"]",
")",
"[",
"0",
":",
"2",
"]",
"aryEstimMtnCrvTrn",
"[",
"lgcTemp",
",",
":",
",",
"idxCV",
"]",
"=",
"betas",
".",
"T",
"# get beta weights for validation",
"betas",
",",
"resTst",
"[",
"lgcTemp",
",",
"idxCV",
"]",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"aryDsgnTmp",
"[",
"idxVal",
"]",
",",
"aryFuncChnk",
"[",
"idxVal",
"]",
"[",
":",
",",
"lgcTemp",
"]",
")",
"[",
"0",
":",
"2",
"]",
"aryEstimMtnCrvTst",
"[",
"lgcTemp",
",",
":",
",",
"idxCV",
"]",
"=",
"betas",
".",
"T",
"# calculate CC for training",
"aryCcTrn",
"=",
"np",
".",
"linalg",
".",
"pinv",
"(",
"np",
".",
"dot",
"(",
"aryDsgnTmp",
"[",
"idxTrn",
"]",
".",
"T",
",",
"aryDsgnTmp",
"[",
"idxTrn",
"]",
")",
")",
"aryCcTst",
"=",
"np",
".",
"linalg",
".",
"pinv",
"(",
"np",
".",
"dot",
"(",
"aryDsgnTmp",
"[",
"idxVal",
"]",
".",
"T",
",",
"aryDsgnTmp",
"[",
"idxVal",
"]",
")",
")",
"# calculate Error for training",
"aryErrorTrn",
"[",
"lgcTemp",
"]",
"=",
"np",
".",
"var",
"(",
"np",
".",
"subtract",
"(",
"aryFuncChnk",
"[",
"idxTrn",
"]",
"[",
":",
",",
"lgcTemp",
"]",
",",
"np",
".",
"dot",
"(",
"aryDsgnTmp",
"[",
"idxTrn",
"]",
",",
"aryEstimMtnCrvTrn",
"[",
"lgcTemp",
",",
":",
",",
"idxCV",
"]",
".",
"T",
")",
")",
",",
"axis",
"=",
"0",
")",
"# calculate Error for test",
"aryErrorTst",
"[",
"lgcTemp",
"]",
"=",
"np",
".",
"var",
"(",
"np",
".",
"subtract",
"(",
"aryFuncChnk",
"[",
"idxVal",
"]",
"[",
":",
",",
"lgcTemp",
"]",
",",
"np",
".",
"dot",
"(",
"aryDsgnTmp",
"[",
"idxVal",
"]",
",",
"aryEstimMtnCrvTst",
"[",
"lgcTemp",
",",
":",
",",
"idxCV",
"]",
".",
"T",
")",
")",
",",
"axis",
"=",
"0",
")",
"# calculate denominator for training",
"for",
"indContr",
",",
"contr",
"in",
"enumerate",
"(",
"contrast",
")",
":",
"denomTrn",
"[",
"lgcTemp",
",",
"idxCV",
",",
"indContr",
"]",
"=",
"np",
".",
"sqrt",
"(",
"aryErrorTrn",
"[",
"lgcTemp",
"]",
"*",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"contr",
",",
"aryCcTrn",
")",
",",
"contr",
".",
"T",
")",
")",
"denomTst",
"[",
"lgcTemp",
",",
"idxCV",
",",
"indContr",
"]",
"=",
"np",
".",
"sqrt",
"(",
"aryErrorTst",
"[",
"lgcTemp",
"]",
"*",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"contr",
",",
"aryCcTst",
")",
",",
"contr",
".",
"T",
")",
")",
"# increase logical counter to verify later that every voxel",
"# was visited only once",
"vecLgcCounter",
"[",
"lgcTemp",
"]",
"+=",
"1",
"# Status indicator (only used in the first of the parallel",
"# processes):",
"if",
"idxPrc",
"==",
"0",
":",
"# Increment status indicator counter:",
"varCntSts02",
"=",
"varCntSts02",
"+",
"1",
"# check that every voxel was visited only once",
"strErrMsg",
"=",
"(",
"'It looks like at least voxel was revisted more than once. '",
"+",
"'Check whether the R2 was calculated correctly'",
")",
"assert",
"np",
".",
"sum",
"(",
"vecLgcCounter",
")",
"==",
"len",
"(",
"vecLgcCounter",
")",
",",
"strErrMsg",
"if",
"type",
"(",
"betaSw",
")",
"is",
"sklearn",
".",
"model_selection",
".",
"_split",
".",
"KFold",
":",
"# calculate t-values",
"aryTvalsTrn",
"=",
"np",
".",
"empty",
"(",
"(",
"varNumVoxChnk",
",",
"contrast",
".",
"shape",
"[",
"0",
"]",
",",
"betaSw",
".",
"get_n_splits",
"(",
")",
")",
")",
"aryTvalsTst",
"=",
"np",
".",
"empty",
"(",
"(",
"varNumVoxChnk",
",",
"contrast",
".",
"shape",
"[",
"0",
"]",
",",
"betaSw",
".",
"get_n_splits",
"(",
")",
")",
")",
"for",
"ind1",
",",
"contr",
"in",
"enumerate",
"(",
"contrast",
")",
":",
"for",
"ind2",
"in",
"range",
"(",
"betaSw",
".",
"get_n_splits",
"(",
")",
")",
":",
"aryTvalsTrn",
"[",
":",
",",
"ind1",
",",
"ind2",
"]",
"=",
"np",
".",
"divide",
"(",
"np",
".",
"dot",
"(",
"contr",
",",
"aryEstimMtnCrvTrn",
"[",
":",
",",
":",
",",
"ind2",
"]",
".",
"T",
")",
",",
"denomTrn",
"[",
":",
",",
"ind2",
",",
"ind1",
"]",
")",
"aryTvalsTst",
"[",
":",
",",
"ind1",
",",
"ind2",
"]",
"=",
"np",
".",
"divide",
"(",
"np",
".",
"dot",
"(",
"contr",
",",
"aryEstimMtnCrvTst",
"[",
":",
",",
":",
",",
"ind2",
"]",
".",
"T",
")",
",",
"denomTst",
"[",
":",
",",
"ind2",
",",
"ind1",
"]",
")",
"# Output list:",
"lstOut",
"=",
"[",
"idxPrc",
",",
"aryEstimMtnCrvTrn",
",",
"aryEstimMtnCrvTst",
",",
"aryTvalsTrn",
",",
"aryTvalsTst",
",",
"]",
"queOut",
".",
"put",
"(",
"lstOut",
")",
"elif",
"type",
"(",
"betaSw",
")",
"is",
"np",
".",
"ndarray",
"and",
"betaSw",
".",
"dtype",
"==",
"'bool'",
":",
"# calculate t-values",
"aryTvalsTrn",
"=",
"np",
".",
"empty",
"(",
"(",
"varNumVoxChnk",
",",
"contrast",
".",
"shape",
"[",
"0",
"]",
",",
")",
")",
"aryTvalsTst",
"=",
"np",
".",
"empty",
"(",
"(",
"varNumVoxChnk",
",",
"contrast",
".",
"shape",
"[",
"0",
"]",
",",
")",
")",
"for",
"ind1",
",",
"contr",
"in",
"enumerate",
"(",
"contrast",
")",
":",
"aryTvalsTrn",
"[",
":",
",",
"ind1",
"]",
"=",
"np",
".",
"divide",
"(",
"np",
".",
"dot",
"(",
"contr",
",",
"aryEstimMtnCrvTrn",
".",
"T",
")",
",",
"denomTrn",
"[",
":",
",",
"ind1",
"]",
")",
"aryTvalsTst",
"[",
":",
",",
"ind1",
"]",
"=",
"np",
".",
"divide",
"(",
"np",
".",
"dot",
"(",
"contr",
",",
"aryEstimMtnCrvTst",
".",
"T",
")",
",",
"denomTst",
"[",
":",
",",
"ind1",
"]",
")",
"# Output list:",
"lstOut",
"=",
"[",
"idxPrc",
",",
"aryEstimMtnCrvTrn",
",",
"aryEstimMtnCrvTst",
",",
"aryTvalsTrn",
",",
"aryTvalsTst",
",",
"]",
"queOut",
".",
"put",
"(",
"lstOut",
")",
"else",
":",
"# After finding the best fitting model for each voxel, we still have to",
"# calculate the coefficient of determination (R-squared) for each voxel. We",
"# start by calculating the total sum of squares (i.e. the deviation of the",
"# data from the mean). The mean of each time course:",
"vecFuncMean",
"=",
"np",
".",
"mean",
"(",
"aryFuncChnk",
",",
"axis",
"=",
"0",
")",
"# Deviation from the mean for each datapoint:",
"vecFuncDev",
"=",
"np",
".",
"subtract",
"(",
"aryFuncChnk",
",",
"vecFuncMean",
"[",
"None",
",",
":",
"]",
")",
"# Sum of squares:",
"vecSsTot",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"power",
"(",
"vecFuncDev",
",",
"2.0",
")",
",",
"axis",
"=",
"0",
")",
"# Coefficient of determination:",
"vecBstR2",
"=",
"np",
".",
"subtract",
"(",
"1.0",
",",
"np",
".",
"divide",
"(",
"vecBstRes",
",",
"vecSsTot",
")",
")",
"# Output list:",
"lstOut",
"=",
"[",
"idxPrc",
",",
"vecBstR2",
",",
"aryEstimMtnCrv",
"]",
"queOut",
".",
"put",
"(",
"lstOut",
")"
] | Calculate voxel betas and R^2 for the best model.
Parameters
----------
idxPrc : TODO
(?)
aryPrfTc : np.array, shape (?)
Population receptive field time courses.
lstAllMdlInd : list
List of the indices of all models.
aryFuncChnk : TODO
Chunk of something(?)
aryBstIndChnk : np.array, shape (?)
Points for every voxel to the index of the best model
betaSw : str, iterator, or np.array, shape (?)
Best beta correlation coefficients found in training.
queOut : TODO
Queue output (?)
Notes
-----
This is done after fitting with cross validation, since during the
fitting process, we never fit the model to the entire data. | [
"Calculate",
"voxel",
"betas",
"and",
"R^2",
"for",
"the",
"best",
"model",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_calcR2_getBetas.py#L25-L359 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/frontiers.py | Frontiers.nav_creators | def nav_creators(self, article):
"""
Frontiers method.
Given an Article class instance, this is responsible for returning the
names for creators of the article. For our purposes, it is sufficient to
list only the authors, returning their name, role=aut, and file-as name.
This returns a list of Creator(name, role, file_as)
"""
creator_list = []
for contrib_group in article.metadata.front.article_meta.contrib_group:
for contrib in contrib_group.contrib:
if not contrib.attrs['contrib-type'] == 'author':
continue
if contrib.collab:
auth = etree.tostring(contrib.collab[0].node, method='text', encoding='utf-8')
file_as = auth
elif contrib.anonymous:
auth = 'Anonymous'
file_as = auth
else:
name = contrib.name[0] # Work with only first name listed
surname = name.surname.text
given = name.given_names
if given: # Given is optional
if given.text: # Odd instances of empty tags
auth = ' '.join([surname, given.text])
given_initial = given.text[0]
file_as = ', '.join([surname, given_initial])
else:
auth = surname
file_as = auth
else:
auth = surname
file_as = auth
new_creator = creator(auth, 'aut', file_as)
creator_list.append(new_creator)
return creator_list | python | def nav_creators(self, article):
"""
Frontiers method.
Given an Article class instance, this is responsible for returning the
names for creators of the article. For our purposes, it is sufficient to
list only the authors, returning their name, role=aut, and file-as name.
This returns a list of Creator(name, role, file_as)
"""
creator_list = []
for contrib_group in article.metadata.front.article_meta.contrib_group:
for contrib in contrib_group.contrib:
if not contrib.attrs['contrib-type'] == 'author':
continue
if contrib.collab:
auth = etree.tostring(contrib.collab[0].node, method='text', encoding='utf-8')
file_as = auth
elif contrib.anonymous:
auth = 'Anonymous'
file_as = auth
else:
name = contrib.name[0] # Work with only first name listed
surname = name.surname.text
given = name.given_names
if given: # Given is optional
if given.text: # Odd instances of empty tags
auth = ' '.join([surname, given.text])
given_initial = given.text[0]
file_as = ', '.join([surname, given_initial])
else:
auth = surname
file_as = auth
else:
auth = surname
file_as = auth
new_creator = creator(auth, 'aut', file_as)
creator_list.append(new_creator)
return creator_list | [
"def",
"nav_creators",
"(",
"self",
",",
"article",
")",
":",
"creator_list",
"=",
"[",
"]",
"for",
"contrib_group",
"in",
"article",
".",
"metadata",
".",
"front",
".",
"article_meta",
".",
"contrib_group",
":",
"for",
"contrib",
"in",
"contrib_group",
".",
"contrib",
":",
"if",
"not",
"contrib",
".",
"attrs",
"[",
"'contrib-type'",
"]",
"==",
"'author'",
":",
"continue",
"if",
"contrib",
".",
"collab",
":",
"auth",
"=",
"etree",
".",
"tostring",
"(",
"contrib",
".",
"collab",
"[",
"0",
"]",
".",
"node",
",",
"method",
"=",
"'text'",
",",
"encoding",
"=",
"'utf-8'",
")",
"file_as",
"=",
"auth",
"elif",
"contrib",
".",
"anonymous",
":",
"auth",
"=",
"'Anonymous'",
"file_as",
"=",
"auth",
"else",
":",
"name",
"=",
"contrib",
".",
"name",
"[",
"0",
"]",
"# Work with only first name listed",
"surname",
"=",
"name",
".",
"surname",
".",
"text",
"given",
"=",
"name",
".",
"given_names",
"if",
"given",
":",
"# Given is optional",
"if",
"given",
".",
"text",
":",
"# Odd instances of empty tags",
"auth",
"=",
"' '",
".",
"join",
"(",
"[",
"surname",
",",
"given",
".",
"text",
"]",
")",
"given_initial",
"=",
"given",
".",
"text",
"[",
"0",
"]",
"file_as",
"=",
"', '",
".",
"join",
"(",
"[",
"surname",
",",
"given_initial",
"]",
")",
"else",
":",
"auth",
"=",
"surname",
"file_as",
"=",
"auth",
"else",
":",
"auth",
"=",
"surname",
"file_as",
"=",
"auth",
"new_creator",
"=",
"creator",
"(",
"auth",
",",
"'aut'",
",",
"file_as",
")",
"creator_list",
".",
"append",
"(",
"new_creator",
")",
"return",
"creator_list"
] | Frontiers method.
Given an Article class instance, this is responsible for returning the
names for creators of the article. For our purposes, it is sufficient to
list only the authors, returning their name, role=aut, and file-as name.
This returns a list of Creator(name, role, file_as) | [
"Frontiers",
"method",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/frontiers.py#L27-L65 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/frontiers.py | Frontiers.nav_title | def nav_title(self, article):
"""
Frontiers method.
Given an Article class instance, this will return a string representing
the title of the article. This is done for PloS by serializing the text
in the Article's
"""
article_title = article.metadata.front.article_meta.title_group.article_title.node
return str(etree.tostring(article_title, method='text', encoding='utf-8'), encoding='utf-8') | python | def nav_title(self, article):
"""
Frontiers method.
Given an Article class instance, this will return a string representing
the title of the article. This is done for PloS by serializing the text
in the Article's
"""
article_title = article.metadata.front.article_meta.title_group.article_title.node
return str(etree.tostring(article_title, method='text', encoding='utf-8'), encoding='utf-8') | [
"def",
"nav_title",
"(",
"self",
",",
"article",
")",
":",
"article_title",
"=",
"article",
".",
"metadata",
".",
"front",
".",
"article_meta",
".",
"title_group",
".",
"article_title",
".",
"node",
"return",
"str",
"(",
"etree",
".",
"tostring",
"(",
"article_title",
",",
"method",
"=",
"'text'",
",",
"encoding",
"=",
"'utf-8'",
")",
",",
"encoding",
"=",
"'utf-8'",
")"
] | Frontiers method.
Given an Article class instance, this will return a string representing
the title of the article. This is done for PloS by serializing the text
in the Article's | [
"Frontiers",
"method",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/frontiers.py#L67-L76 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/nlm_transform/citation.py | format_citation | def format_citation(citation, citation_type=None):
"""
This method may be built to support elements from different Tag Suite
versions with the following tag names:
citation, element-citation, mixed-citation, and nlm-citation
The citation-type attribute is optional, and may also be empty; if it has a
value then it should appear in the following prescribed list, or it will be
treated as 'other'.
book Book or book series
commun Informal or personal communication, such as a phone call or
an email message
confproc Conference proceedings
discussion Discussion among a group in some forum β public, private, or
electronic β which may or may not be moderated, for example,
a single discussion thread in a listserv
gov Government publication or government standard
journal Journal article
list Listserv or discussion group (as an entity, as opposed to a
single discussion thread which uses the value βdiscussionβ)
other None of the listed types.
patent Patent or patent application
thesis Work written as part of the completion of an advanced degree
web Website
This method will accept a passed citation_type argument which will override
checking of the element's citation-type attribute and force the formatting
according to the passed string value. Note that this may not be appropriate
in many cases.
"""
cite_types = {'book': self.format_book_citation,
'commun': self.format_commun_citation,
'confproc': self.format_confproc_citation,
'discussion': self.format_discussion_citation,
'gov': self.format_gov_citation,
'journal': self.format_journal_citation,
'list': self.format_list_citation,
'other': self.format_other_citation,
'patent': self.format_patent_citation,
'thesis': self.format_thesis_citation,
'web': self.format_web_citation,
'': self.format_other_citation, # Empty becomes 'other'
None: self.format_other_citation} # None becomes 'other'
#Only check if no citation_type value is passed
if citation_type is None:
#Get the citation-type attribute value
if 'citation-type' in nlm_citation.attrib:
citation_type = nlm_citation.attrib['citation-type']
#Pass the citation to the appropriate function and return result
return cite_types[citation_type](citation)
@staticmethod
def format_book_citation(self, citation):
"""
citation-type=\"book\"
"""
#Get the count of authors
author_group_count = int(citation.xpath('count(person-group) + count(collab)'))
#Detect if there are non-authors
if citation.xpath('person-group[@person-group-type!=\'author\']'):
non_authors = True
else:
non_authors= False
#Detect article-title
if citation.xpath('article-title'):
article_title = True
else:
article_title = False
#Find out if there is at least one author or compiler
auth_or_comp = False
for person_group in citation.findall('person-group'):
if 'person-group-type' in person_group.attrib:
if person_group.attrib['person-group-type'] in ['author', 'compiler']:
auth_or_comp = True
break
#These pieces of information allow us to provide two special use cases
#and one general use case.
#First special case:
if author_group_count > 0 and non_authors and article_title:
pass
#Second special case
elif auth_or_comp:
pass
#General case
else:
pass
@staticmethod
def format_commun_citation(self, citation):
"""
citation-type=\"commun\"
"""
@staticmethod
def format_confproc_citation(self, citation):
"""
citation-type=\"confproc\"
"""
@staticmethod
def format_discussion_citation(self, citation):
"""
citation-type=\"discussion\"
"""
@staticmethod
def format_gov_citation(self, citation):
"""
citation-type=\"gov\"
"""
@staticmethod
def format_journal_citation(self, citation):
"""
citation-type=\"journal\"
"""
@staticmethod
def format_list_citation(self, citation):
"""
citation-type=\"list\"
"""
@staticmethod
def format_other_citation(self, citation):
"""
citation-type=\"other\"
"""
@staticmethod
def format_patent_citation(self, citation):
"""
citation-type=\"patent\"
"""
@staticmethod
def format_thesis_citation(self, citation):
"""
citation-type=\"thesis\"
"""
#Treat the same as "book"
return format_book_citation(citation)
@staticmethod
def format_web_citation(self, citation):
"""
citation-type=\"web\"
""" | python | def format_citation(citation, citation_type=None):
"""
This method may be built to support elements from different Tag Suite
versions with the following tag names:
citation, element-citation, mixed-citation, and nlm-citation
The citation-type attribute is optional, and may also be empty; if it has a
value then it should appear in the following prescribed list, or it will be
treated as 'other'.
book Book or book series
commun Informal or personal communication, such as a phone call or
an email message
confproc Conference proceedings
discussion Discussion among a group in some forum β public, private, or
electronic β which may or may not be moderated, for example,
a single discussion thread in a listserv
gov Government publication or government standard
journal Journal article
list Listserv or discussion group (as an entity, as opposed to a
single discussion thread which uses the value βdiscussionβ)
other None of the listed types.
patent Patent or patent application
thesis Work written as part of the completion of an advanced degree
web Website
This method will accept a passed citation_type argument which will override
checking of the element's citation-type attribute and force the formatting
according to the passed string value. Note that this may not be appropriate
in many cases.
"""
cite_types = {'book': self.format_book_citation,
'commun': self.format_commun_citation,
'confproc': self.format_confproc_citation,
'discussion': self.format_discussion_citation,
'gov': self.format_gov_citation,
'journal': self.format_journal_citation,
'list': self.format_list_citation,
'other': self.format_other_citation,
'patent': self.format_patent_citation,
'thesis': self.format_thesis_citation,
'web': self.format_web_citation,
'': self.format_other_citation, # Empty becomes 'other'
None: self.format_other_citation} # None becomes 'other'
#Only check if no citation_type value is passed
if citation_type is None:
#Get the citation-type attribute value
if 'citation-type' in nlm_citation.attrib:
citation_type = nlm_citation.attrib['citation-type']
#Pass the citation to the appropriate function and return result
return cite_types[citation_type](citation)
@staticmethod
def format_book_citation(self, citation):
"""
citation-type=\"book\"
"""
#Get the count of authors
author_group_count = int(citation.xpath('count(person-group) + count(collab)'))
#Detect if there are non-authors
if citation.xpath('person-group[@person-group-type!=\'author\']'):
non_authors = True
else:
non_authors= False
#Detect article-title
if citation.xpath('article-title'):
article_title = True
else:
article_title = False
#Find out if there is at least one author or compiler
auth_or_comp = False
for person_group in citation.findall('person-group'):
if 'person-group-type' in person_group.attrib:
if person_group.attrib['person-group-type'] in ['author', 'compiler']:
auth_or_comp = True
break
#These pieces of information allow us to provide two special use cases
#and one general use case.
#First special case:
if author_group_count > 0 and non_authors and article_title:
pass
#Second special case
elif auth_or_comp:
pass
#General case
else:
pass
@staticmethod
def format_commun_citation(self, citation):
"""
citation-type=\"commun\"
"""
@staticmethod
def format_confproc_citation(self, citation):
"""
citation-type=\"confproc\"
"""
@staticmethod
def format_discussion_citation(self, citation):
"""
citation-type=\"discussion\"
"""
@staticmethod
def format_gov_citation(self, citation):
"""
citation-type=\"gov\"
"""
@staticmethod
def format_journal_citation(self, citation):
"""
citation-type=\"journal\"
"""
@staticmethod
def format_list_citation(self, citation):
"""
citation-type=\"list\"
"""
@staticmethod
def format_other_citation(self, citation):
"""
citation-type=\"other\"
"""
@staticmethod
def format_patent_citation(self, citation):
"""
citation-type=\"patent\"
"""
@staticmethod
def format_thesis_citation(self, citation):
"""
citation-type=\"thesis\"
"""
#Treat the same as "book"
return format_book_citation(citation)
@staticmethod
def format_web_citation(self, citation):
"""
citation-type=\"web\"
""" | [
"def",
"format_citation",
"(",
"citation",
",",
"citation_type",
"=",
"None",
")",
":",
"cite_types",
"=",
"{",
"'book'",
":",
"self",
".",
"format_book_citation",
",",
"'commun'",
":",
"self",
".",
"format_commun_citation",
",",
"'confproc'",
":",
"self",
".",
"format_confproc_citation",
",",
"'discussion'",
":",
"self",
".",
"format_discussion_citation",
",",
"'gov'",
":",
"self",
".",
"format_gov_citation",
",",
"'journal'",
":",
"self",
".",
"format_journal_citation",
",",
"'list'",
":",
"self",
".",
"format_list_citation",
",",
"'other'",
":",
"self",
".",
"format_other_citation",
",",
"'patent'",
":",
"self",
".",
"format_patent_citation",
",",
"'thesis'",
":",
"self",
".",
"format_thesis_citation",
",",
"'web'",
":",
"self",
".",
"format_web_citation",
",",
"''",
":",
"self",
".",
"format_other_citation",
",",
"# Empty becomes 'other'",
"None",
":",
"self",
".",
"format_other_citation",
"}",
"# None becomes 'other'",
"#Only check if no citation_type value is passed",
"if",
"citation_type",
"is",
"None",
":",
"#Get the citation-type attribute value",
"if",
"'citation-type'",
"in",
"nlm_citation",
".",
"attrib",
":",
"citation_type",
"=",
"nlm_citation",
".",
"attrib",
"[",
"'citation-type'",
"]",
"#Pass the citation to the appropriate function and return result",
"return",
"cite_types",
"[",
"citation_type",
"]",
"(",
"citation",
")",
"@",
"staticmethod",
"def",
"format_book_citation",
"(",
"self",
",",
"citation",
")",
":",
"\"\"\"\n citation-type=\\\"book\\\"\n \"\"\"",
"#Get the count of authors",
"author_group_count",
"=",
"int",
"(",
"citation",
".",
"xpath",
"(",
"'count(person-group) + count(collab)'",
")",
")",
"#Detect if there are non-authors",
"if",
"citation",
".",
"xpath",
"(",
"'person-group[@person-group-type!=\\'author\\']'",
")",
":",
"non_authors",
"=",
"True",
"else",
":",
"non_authors",
"=",
"False",
"#Detect article-title",
"if",
"citation",
".",
"xpath",
"(",
"'article-title'",
")",
":",
"article_title",
"=",
"True",
"else",
":",
"article_title",
"=",
"False",
"#Find out if there is at least one author or compiler",
"auth_or_comp",
"=",
"False",
"for",
"person_group",
"in",
"citation",
".",
"findall",
"(",
"'person-group'",
")",
":",
"if",
"'person-group-type'",
"in",
"person_group",
".",
"attrib",
":",
"if",
"person_group",
".",
"attrib",
"[",
"'person-group-type'",
"]",
"in",
"[",
"'author'",
",",
"'compiler'",
"]",
":",
"auth_or_comp",
"=",
"True",
"break",
"#These pieces of information allow us to provide two special use cases",
"#and one general use case.",
"#First special case:",
"if",
"author_group_count",
">",
"0",
"and",
"non_authors",
"and",
"article_title",
":",
"pass",
"#Second special case",
"elif",
"auth_or_comp",
":",
"pass",
"#General case",
"else",
":",
"pass",
"@",
"staticmethod",
"def",
"format_commun_citation",
"(",
"self",
",",
"citation",
")",
":",
"\"\"\"\n citation-type=\\\"commun\\\"\n \"\"\"",
"@",
"staticmethod",
"def",
"format_confproc_citation",
"(",
"self",
",",
"citation",
")",
":",
"\"\"\"\n citation-type=\\\"confproc\\\"\n \"\"\"",
"@",
"staticmethod",
"def",
"format_discussion_citation",
"(",
"self",
",",
"citation",
")",
":",
"\"\"\"\n citation-type=\\\"discussion\\\"\n \"\"\"",
"@",
"staticmethod",
"def",
"format_gov_citation",
"(",
"self",
",",
"citation",
")",
":",
"\"\"\"\n citation-type=\\\"gov\\\"\n \"\"\"",
"@",
"staticmethod",
"def",
"format_journal_citation",
"(",
"self",
",",
"citation",
")",
":",
"\"\"\"\n citation-type=\\\"journal\\\"\n \"\"\"",
"@",
"staticmethod",
"def",
"format_list_citation",
"(",
"self",
",",
"citation",
")",
":",
"\"\"\"\n citation-type=\\\"list\\\"\n \"\"\"",
"@",
"staticmethod",
"def",
"format_other_citation",
"(",
"self",
",",
"citation",
")",
":",
"\"\"\"\n citation-type=\\\"other\\\"\n \"\"\"",
"@",
"staticmethod",
"def",
"format_patent_citation",
"(",
"self",
",",
"citation",
")",
":",
"\"\"\"\n citation-type=\\\"patent\\\"\n \"\"\"",
"@",
"staticmethod",
"def",
"format_thesis_citation",
"(",
"self",
",",
"citation",
")",
":",
"\"\"\"\n citation-type=\\\"thesis\\\"\n \"\"\"",
"#Treat the same as \"book\"",
"return",
"format_book_citation",
"(",
"citation",
")",
"@",
"staticmethod",
"def",
"format_web_citation",
"(",
"self",
",",
"citation",
")",
":",
"\"\"\"\n citation-type=\\\"web\\\"\n \"\"\""
] | This method may be built to support elements from different Tag Suite
versions with the following tag names:
citation, element-citation, mixed-citation, and nlm-citation
The citation-type attribute is optional, and may also be empty; if it has a
value then it should appear in the following prescribed list, or it will be
treated as 'other'.
book Book or book series
commun Informal or personal communication, such as a phone call or
an email message
confproc Conference proceedings
discussion Discussion among a group in some forum β public, private, or
electronic β which may or may not be moderated, for example,
a single discussion thread in a listserv
gov Government publication or government standard
journal Journal article
list Listserv or discussion group (as an entity, as opposed to a
single discussion thread which uses the value βdiscussionβ)
other None of the listed types.
patent Patent or patent application
thesis Work written as part of the completion of an advanced degree
web Website
This method will accept a passed citation_type argument which will override
checking of the element's citation-type attribute and force the formatting
according to the passed string value. Note that this may not be appropriate
in many cases. | [
"This",
"method",
"may",
"be",
"built",
"to",
"support",
"elements",
"from",
"different",
"Tag",
"Suite",
"versions",
"with",
"the",
"following",
"tag",
"names",
":",
"citation",
"element",
"-",
"citation",
"mixed",
"-",
"citation",
"and",
"nlm",
"-",
"citation"
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/nlm_transform/citation.py#L21-L174 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/inputs.py | plos_doi_to_xmlurl | def plos_doi_to_xmlurl(doi_string):
"""
Attempts to resolve a PLoS DOI into a URL path to the XML file.
"""
#Create URL to request DOI resolution from http://dx.doi.org
doi_url = 'http://dx.doi.org/{0}'.format(doi_string)
log.debug('DOI URL: {0}'.format(doi_url))
#Open the page, follow the redirect
try:
resolved_page = urllib.request.urlopen(doi_url)
except urllib.error.URLError as err:
print('Unable to resolve DOI URL, or could not connect')
raise err
else:
#Given the redirection, attempt to shape new request for PLoS servers
resolved_address = resolved_page.geturl()
log.debug('DOI resolved to {0}'.format(resolved_address))
parsed = urllib.parse.urlparse(resolved_address)
xml_url = '{0}://{1}'.format(parsed.scheme, parsed.netloc)
xml_url += '/article/fetchObjectAttachment.action?uri='
xml_path = parsed.path.replace(':', '%3A').replace('/', '%2F')
xml_path = xml_path.split('article%2F')[1]
xml_url += '{0}{1}'.format(xml_path, '&representation=XML')
log.debug('Shaped PLoS request for XML {0}'.format(xml_url))
#Return this url to the calling function
return xml_url | python | def plos_doi_to_xmlurl(doi_string):
"""
Attempts to resolve a PLoS DOI into a URL path to the XML file.
"""
#Create URL to request DOI resolution from http://dx.doi.org
doi_url = 'http://dx.doi.org/{0}'.format(doi_string)
log.debug('DOI URL: {0}'.format(doi_url))
#Open the page, follow the redirect
try:
resolved_page = urllib.request.urlopen(doi_url)
except urllib.error.URLError as err:
print('Unable to resolve DOI URL, or could not connect')
raise err
else:
#Given the redirection, attempt to shape new request for PLoS servers
resolved_address = resolved_page.geturl()
log.debug('DOI resolved to {0}'.format(resolved_address))
parsed = urllib.parse.urlparse(resolved_address)
xml_url = '{0}://{1}'.format(parsed.scheme, parsed.netloc)
xml_url += '/article/fetchObjectAttachment.action?uri='
xml_path = parsed.path.replace(':', '%3A').replace('/', '%2F')
xml_path = xml_path.split('article%2F')[1]
xml_url += '{0}{1}'.format(xml_path, '&representation=XML')
log.debug('Shaped PLoS request for XML {0}'.format(xml_url))
#Return this url to the calling function
return xml_url | [
"def",
"plos_doi_to_xmlurl",
"(",
"doi_string",
")",
":",
"#Create URL to request DOI resolution from http://dx.doi.org",
"doi_url",
"=",
"'http://dx.doi.org/{0}'",
".",
"format",
"(",
"doi_string",
")",
"log",
".",
"debug",
"(",
"'DOI URL: {0}'",
".",
"format",
"(",
"doi_url",
")",
")",
"#Open the page, follow the redirect",
"try",
":",
"resolved_page",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"doi_url",
")",
"except",
"urllib",
".",
"error",
".",
"URLError",
"as",
"err",
":",
"print",
"(",
"'Unable to resolve DOI URL, or could not connect'",
")",
"raise",
"err",
"else",
":",
"#Given the redirection, attempt to shape new request for PLoS servers",
"resolved_address",
"=",
"resolved_page",
".",
"geturl",
"(",
")",
"log",
".",
"debug",
"(",
"'DOI resolved to {0}'",
".",
"format",
"(",
"resolved_address",
")",
")",
"parsed",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"resolved_address",
")",
"xml_url",
"=",
"'{0}://{1}'",
".",
"format",
"(",
"parsed",
".",
"scheme",
",",
"parsed",
".",
"netloc",
")",
"xml_url",
"+=",
"'/article/fetchObjectAttachment.action?uri='",
"xml_path",
"=",
"parsed",
".",
"path",
".",
"replace",
"(",
"':'",
",",
"'%3A'",
")",
".",
"replace",
"(",
"'/'",
",",
"'%2F'",
")",
"xml_path",
"=",
"xml_path",
".",
"split",
"(",
"'article%2F'",
")",
"[",
"1",
"]",
"xml_url",
"+=",
"'{0}{1}'",
".",
"format",
"(",
"xml_path",
",",
"'&representation=XML'",
")",
"log",
".",
"debug",
"(",
"'Shaped PLoS request for XML {0}'",
".",
"format",
"(",
"xml_url",
")",
")",
"#Return this url to the calling function",
"return",
"xml_url"
] | Attempts to resolve a PLoS DOI into a URL path to the XML file. | [
"Attempts",
"to",
"resolve",
"a",
"PLoS",
"DOI",
"into",
"a",
"URL",
"path",
"to",
"the",
"XML",
"file",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/inputs.py#L23-L48 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/inputs.py | doi_input | def doi_input(doi_string, download=True):
"""
This method accepts a DOI string and attempts to download the appropriate
xml file. If successful, it returns a path to that file. As with all URL
input types, the success of this method depends on supporting per-publisher
conventions and will fail on unsupported publishers
"""
log.debug('DOI Input - {0}'.format(doi_string))
doi_string = doi_string[4:]
if '10.1371' in doi_string: # Corresponds to PLoS
log.debug('DOI string shows PLoS')
xml_url = plos_doi_to_xmlurl(doi_string)
else:
log.critical('DOI input for this publisher is not supported')
sys.exit('This publisher is not yet supported by OpenAccess_EPUB')
return url_input(xml_url, download) | python | def doi_input(doi_string, download=True):
"""
This method accepts a DOI string and attempts to download the appropriate
xml file. If successful, it returns a path to that file. As with all URL
input types, the success of this method depends on supporting per-publisher
conventions and will fail on unsupported publishers
"""
log.debug('DOI Input - {0}'.format(doi_string))
doi_string = doi_string[4:]
if '10.1371' in doi_string: # Corresponds to PLoS
log.debug('DOI string shows PLoS')
xml_url = plos_doi_to_xmlurl(doi_string)
else:
log.critical('DOI input for this publisher is not supported')
sys.exit('This publisher is not yet supported by OpenAccess_EPUB')
return url_input(xml_url, download) | [
"def",
"doi_input",
"(",
"doi_string",
",",
"download",
"=",
"True",
")",
":",
"log",
".",
"debug",
"(",
"'DOI Input - {0}'",
".",
"format",
"(",
"doi_string",
")",
")",
"doi_string",
"=",
"doi_string",
"[",
"4",
":",
"]",
"if",
"'10.1371'",
"in",
"doi_string",
":",
"# Corresponds to PLoS",
"log",
".",
"debug",
"(",
"'DOI string shows PLoS'",
")",
"xml_url",
"=",
"plos_doi_to_xmlurl",
"(",
"doi_string",
")",
"else",
":",
"log",
".",
"critical",
"(",
"'DOI input for this publisher is not supported'",
")",
"sys",
".",
"exit",
"(",
"'This publisher is not yet supported by OpenAccess_EPUB'",
")",
"return",
"url_input",
"(",
"xml_url",
",",
"download",
")"
] | This method accepts a DOI string and attempts to download the appropriate
xml file. If successful, it returns a path to that file. As with all URL
input types, the success of this method depends on supporting per-publisher
conventions and will fail on unsupported publishers | [
"This",
"method",
"accepts",
"a",
"DOI",
"string",
"and",
"attempts",
"to",
"download",
"the",
"appropriate",
"xml",
"file",
".",
"If",
"successful",
"it",
"returns",
"a",
"path",
"to",
"that",
"file",
".",
"As",
"with",
"all",
"URL",
"input",
"types",
"the",
"success",
"of",
"this",
"method",
"depends",
"on",
"supporting",
"per",
"-",
"publisher",
"conventions",
"and",
"will",
"fail",
"on",
"unsupported",
"publishers"
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/inputs.py#L51-L66 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/inputs.py | url_input | def url_input(url_string, download=True):
"""
This method expects a direct URL link to an xml file. It will apply no
modifications to the received URL string, so ensure good input.
"""
log.debug('URL Input - {0}'.format(url_string))
try:
open_xml = urllib.request.urlopen(url_string)
except urllib.error.URLError as err:
print('utils.input.url_input received a bad URL, or could not connect')
raise err
else:
#Employ a quick check on the mimetype of the link
if not open_xml.headers['Content-Type'] == 'text/xml':
sys.exit('URL request does not appear to be XML')
filename = open_xml.headers['Content-Disposition'].split('\"')[1]
if download:
with open(filename, 'wb') as xml_file:
xml_file.write(open_xml.read())
return openaccess_epub.utils.file_root_name(filename) | python | def url_input(url_string, download=True):
"""
This method expects a direct URL link to an xml file. It will apply no
modifications to the received URL string, so ensure good input.
"""
log.debug('URL Input - {0}'.format(url_string))
try:
open_xml = urllib.request.urlopen(url_string)
except urllib.error.URLError as err:
print('utils.input.url_input received a bad URL, or could not connect')
raise err
else:
#Employ a quick check on the mimetype of the link
if not open_xml.headers['Content-Type'] == 'text/xml':
sys.exit('URL request does not appear to be XML')
filename = open_xml.headers['Content-Disposition'].split('\"')[1]
if download:
with open(filename, 'wb') as xml_file:
xml_file.write(open_xml.read())
return openaccess_epub.utils.file_root_name(filename) | [
"def",
"url_input",
"(",
"url_string",
",",
"download",
"=",
"True",
")",
":",
"log",
".",
"debug",
"(",
"'URL Input - {0}'",
".",
"format",
"(",
"url_string",
")",
")",
"try",
":",
"open_xml",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"url_string",
")",
"except",
"urllib",
".",
"error",
".",
"URLError",
"as",
"err",
":",
"print",
"(",
"'utils.input.url_input received a bad URL, or could not connect'",
")",
"raise",
"err",
"else",
":",
"#Employ a quick check on the mimetype of the link",
"if",
"not",
"open_xml",
".",
"headers",
"[",
"'Content-Type'",
"]",
"==",
"'text/xml'",
":",
"sys",
".",
"exit",
"(",
"'URL request does not appear to be XML'",
")",
"filename",
"=",
"open_xml",
".",
"headers",
"[",
"'Content-Disposition'",
"]",
".",
"split",
"(",
"'\\\"'",
")",
"[",
"1",
"]",
"if",
"download",
":",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"xml_file",
":",
"xml_file",
".",
"write",
"(",
"open_xml",
".",
"read",
"(",
")",
")",
"return",
"openaccess_epub",
".",
"utils",
".",
"file_root_name",
"(",
"filename",
")"
] | This method expects a direct URL link to an xml file. It will apply no
modifications to the received URL string, so ensure good input. | [
"This",
"method",
"expects",
"a",
"direct",
"URL",
"link",
"to",
"an",
"xml",
"file",
".",
"It",
"will",
"apply",
"no",
"modifications",
"to",
"the",
"received",
"URL",
"string",
"so",
"ensure",
"good",
"input",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/inputs.py#L69-L88 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/inputs.py | frontiersZipInput | def frontiersZipInput(zip_path, output_prefix, download=None):
"""
This method provides support for Frontiers production using base zipfiles
as the input for ePub creation. It expects a valid pathname for one of the
two zipfiles, and that both zipfiles are present in the same directory.
"""
log.debug('frontiersZipInput called')
#If there is a problem with the input, it should clearly describe the issue
pathname, pathext = os.path.splitext(zip_path)
path, name = os.path.split(pathname)
if not pathext == '.zip': # Checks for a path to zipfile
log.error('Pathname provided does not end with .zip')
print('Invalid file path: Does not have a zip extension.')
sys.exit(1)
#Construct the pair of zipfile pathnames
file_root = name.split('-r')[0]
zipname1 = "{0}-r{1}.zip".format(file_root, '1')
zipname2 = "{0}-r{1}.zip".format(file_root, '2')
#Construct the pathnames for output
output = os.path.join(output_prefix, file_root)
if os.path.isdir(output):
shutil.rmtree(output) # Delete previous output
output_meta = os.path.join(output, 'META-INF')
images_output = os.path.join(output, 'EPUB', 'images')
with zipfile.ZipFile(os.path.join(path, zipname1), 'r') as xml_zip:
zip_dir = '{0}-r1'.format(file_root)
xml = '/'.join([zip_dir, '{0}.xml'.format(file_root)])
try:
xml_zip.extract(xml)
except KeyError:
log.critical('There is no item {0} in the zipfile'.format(xml))
sys.exit('There is no item {0} in the zipfile'.format(xml))
else:
if not os.path.isdir(output_meta):
os.makedirs(output_meta)
shutil.copy(xml, os.path.join(output_meta))
os.remove(xml)
os.rmdir(zip_dir)
with zipfile.ZipFile(os.path.join(path, zipname2), 'r') as image_zip:
zip_dir = '{0}-r2'.format(file_root)
for i in image_zip.namelist():
if 'image_m' in i:
image_zip.extract(i)
if not os.path.isdir(images_output):
os.makedirs(images_output)
unzipped_images = os.path.join(zip_dir, 'images', 'image_m')
for i in os.listdir(unzipped_images):
shutil.copy(os.path.join(unzipped_images, i), images_output)
shutil.rmtree(zip_dir)
return file_root | python | def frontiersZipInput(zip_path, output_prefix, download=None):
"""
This method provides support for Frontiers production using base zipfiles
as the input for ePub creation. It expects a valid pathname for one of the
two zipfiles, and that both zipfiles are present in the same directory.
"""
log.debug('frontiersZipInput called')
#If there is a problem with the input, it should clearly describe the issue
pathname, pathext = os.path.splitext(zip_path)
path, name = os.path.split(pathname)
if not pathext == '.zip': # Checks for a path to zipfile
log.error('Pathname provided does not end with .zip')
print('Invalid file path: Does not have a zip extension.')
sys.exit(1)
#Construct the pair of zipfile pathnames
file_root = name.split('-r')[0]
zipname1 = "{0}-r{1}.zip".format(file_root, '1')
zipname2 = "{0}-r{1}.zip".format(file_root, '2')
#Construct the pathnames for output
output = os.path.join(output_prefix, file_root)
if os.path.isdir(output):
shutil.rmtree(output) # Delete previous output
output_meta = os.path.join(output, 'META-INF')
images_output = os.path.join(output, 'EPUB', 'images')
with zipfile.ZipFile(os.path.join(path, zipname1), 'r') as xml_zip:
zip_dir = '{0}-r1'.format(file_root)
xml = '/'.join([zip_dir, '{0}.xml'.format(file_root)])
try:
xml_zip.extract(xml)
except KeyError:
log.critical('There is no item {0} in the zipfile'.format(xml))
sys.exit('There is no item {0} in the zipfile'.format(xml))
else:
if not os.path.isdir(output_meta):
os.makedirs(output_meta)
shutil.copy(xml, os.path.join(output_meta))
os.remove(xml)
os.rmdir(zip_dir)
with zipfile.ZipFile(os.path.join(path, zipname2), 'r') as image_zip:
zip_dir = '{0}-r2'.format(file_root)
for i in image_zip.namelist():
if 'image_m' in i:
image_zip.extract(i)
if not os.path.isdir(images_output):
os.makedirs(images_output)
unzipped_images = os.path.join(zip_dir, 'images', 'image_m')
for i in os.listdir(unzipped_images):
shutil.copy(os.path.join(unzipped_images, i), images_output)
shutil.rmtree(zip_dir)
return file_root | [
"def",
"frontiersZipInput",
"(",
"zip_path",
",",
"output_prefix",
",",
"download",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"'frontiersZipInput called'",
")",
"#If there is a problem with the input, it should clearly describe the issue",
"pathname",
",",
"pathext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"zip_path",
")",
"path",
",",
"name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"pathname",
")",
"if",
"not",
"pathext",
"==",
"'.zip'",
":",
"# Checks for a path to zipfile",
"log",
".",
"error",
"(",
"'Pathname provided does not end with .zip'",
")",
"print",
"(",
"'Invalid file path: Does not have a zip extension.'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"#Construct the pair of zipfile pathnames",
"file_root",
"=",
"name",
".",
"split",
"(",
"'-r'",
")",
"[",
"0",
"]",
"zipname1",
"=",
"\"{0}-r{1}.zip\"",
".",
"format",
"(",
"file_root",
",",
"'1'",
")",
"zipname2",
"=",
"\"{0}-r{1}.zip\"",
".",
"format",
"(",
"file_root",
",",
"'2'",
")",
"#Construct the pathnames for output",
"output",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_prefix",
",",
"file_root",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"output",
")",
":",
"shutil",
".",
"rmtree",
"(",
"output",
")",
"# Delete previous output",
"output_meta",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output",
",",
"'META-INF'",
")",
"images_output",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output",
",",
"'EPUB'",
",",
"'images'",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"zipname1",
")",
",",
"'r'",
")",
"as",
"xml_zip",
":",
"zip_dir",
"=",
"'{0}-r1'",
".",
"format",
"(",
"file_root",
")",
"xml",
"=",
"'/'",
".",
"join",
"(",
"[",
"zip_dir",
",",
"'{0}.xml'",
".",
"format",
"(",
"file_root",
")",
"]",
")",
"try",
":",
"xml_zip",
".",
"extract",
"(",
"xml",
")",
"except",
"KeyError",
":",
"log",
".",
"critical",
"(",
"'There is no item {0} in the zipfile'",
".",
"format",
"(",
"xml",
")",
")",
"sys",
".",
"exit",
"(",
"'There is no item {0} in the zipfile'",
".",
"format",
"(",
"xml",
")",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"output_meta",
")",
":",
"os",
".",
"makedirs",
"(",
"output_meta",
")",
"shutil",
".",
"copy",
"(",
"xml",
",",
"os",
".",
"path",
".",
"join",
"(",
"output_meta",
")",
")",
"os",
".",
"remove",
"(",
"xml",
")",
"os",
".",
"rmdir",
"(",
"zip_dir",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"zipname2",
")",
",",
"'r'",
")",
"as",
"image_zip",
":",
"zip_dir",
"=",
"'{0}-r2'",
".",
"format",
"(",
"file_root",
")",
"for",
"i",
"in",
"image_zip",
".",
"namelist",
"(",
")",
":",
"if",
"'image_m'",
"in",
"i",
":",
"image_zip",
".",
"extract",
"(",
"i",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"images_output",
")",
":",
"os",
".",
"makedirs",
"(",
"images_output",
")",
"unzipped_images",
"=",
"os",
".",
"path",
".",
"join",
"(",
"zip_dir",
",",
"'images'",
",",
"'image_m'",
")",
"for",
"i",
"in",
"os",
".",
"listdir",
"(",
"unzipped_images",
")",
":",
"shutil",
".",
"copy",
"(",
"os",
".",
"path",
".",
"join",
"(",
"unzipped_images",
",",
"i",
")",
",",
"images_output",
")",
"shutil",
".",
"rmtree",
"(",
"zip_dir",
")",
"return",
"file_root"
] | This method provides support for Frontiers production using base zipfiles
as the input for ePub creation. It expects a valid pathname for one of the
two zipfiles, and that both zipfiles are present in the same directory. | [
"This",
"method",
"provides",
"support",
"for",
"Frontiers",
"production",
"using",
"base",
"zipfiles",
"as",
"the",
"input",
"for",
"ePub",
"creation",
".",
"It",
"expects",
"a",
"valid",
"pathname",
"for",
"one",
"of",
"the",
"two",
"zipfiles",
"and",
"that",
"both",
"zipfiles",
"are",
"present",
"in",
"the",
"same",
"directory",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/inputs.py#L91-L140 |
curious-containers/cc-core | cc_core/commons/red.py | _red_listing_validation | def _red_listing_validation(key, listing):
"""
Raises an RedValidationError, if the given listing does not comply with cwl_job_listing_schema.
If listing is None or an empty list, no exception is thrown.
:param key: The input key to build an error message if needed.
:param listing: The listing to validate
:raise RedValidationError: If the given listing does not comply with cwl_job_listing_schema
"""
if listing:
try:
jsonschema.validate(listing, cwl_job_listing_schema)
except ValidationError as e:
raise RedValidationError('REDFILE listing of input "{}" does not comply with jsonschema: {}'
.format(key, e.context)) | python | def _red_listing_validation(key, listing):
"""
Raises an RedValidationError, if the given listing does not comply with cwl_job_listing_schema.
If listing is None or an empty list, no exception is thrown.
:param key: The input key to build an error message if needed.
:param listing: The listing to validate
:raise RedValidationError: If the given listing does not comply with cwl_job_listing_schema
"""
if listing:
try:
jsonschema.validate(listing, cwl_job_listing_schema)
except ValidationError as e:
raise RedValidationError('REDFILE listing of input "{}" does not comply with jsonschema: {}'
.format(key, e.context)) | [
"def",
"_red_listing_validation",
"(",
"key",
",",
"listing",
")",
":",
"if",
"listing",
":",
"try",
":",
"jsonschema",
".",
"validate",
"(",
"listing",
",",
"cwl_job_listing_schema",
")",
"except",
"ValidationError",
"as",
"e",
":",
"raise",
"RedValidationError",
"(",
"'REDFILE listing of input \"{}\" does not comply with jsonschema: {}'",
".",
"format",
"(",
"key",
",",
"e",
".",
"context",
")",
")"
] | Raises an RedValidationError, if the given listing does not comply with cwl_job_listing_schema.
If listing is None or an empty list, no exception is thrown.
:param key: The input key to build an error message if needed.
:param listing: The listing to validate
:raise RedValidationError: If the given listing does not comply with cwl_job_listing_schema | [
"Raises",
"an",
"RedValidationError",
"if",
"the",
"given",
"listing",
"does",
"not",
"comply",
"with",
"cwl_job_listing_schema",
".",
"If",
"listing",
"is",
"None",
"or",
"an",
"empty",
"list",
"no",
"exception",
"is",
"thrown",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/red.py#L211-L226 |
curious-containers/cc-core | cc_core/commons/red.py | red_get_mount_connectors | def red_get_mount_connectors(red_data, ignore_outputs):
"""
Returns a list of mounting connectors
:param red_data: The red data to be searched
:param ignore_outputs: If outputs should be ignored
:return: A list of connectors with active mount option.
"""
keys = []
batches = red_data.get('batches')
inputs = red_data.get('inputs')
if batches:
for batch in batches:
keys.extend(red_get_mount_connectors_from_inputs(batch['inputs']))
elif inputs:
keys.extend(red_get_mount_connectors_from_inputs(inputs))
if not ignore_outputs:
outputs = red_data.get('outputs')
if batches:
for batch in batches:
batch_outputs = batch.get('outputs')
if batch_outputs:
keys.extend(red_get_mount_connectors_from_outputs(batch_outputs))
elif outputs:
keys.extend(red_get_mount_connectors_from_outputs(outputs))
return keys | python | def red_get_mount_connectors(red_data, ignore_outputs):
"""
Returns a list of mounting connectors
:param red_data: The red data to be searched
:param ignore_outputs: If outputs should be ignored
:return: A list of connectors with active mount option.
"""
keys = []
batches = red_data.get('batches')
inputs = red_data.get('inputs')
if batches:
for batch in batches:
keys.extend(red_get_mount_connectors_from_inputs(batch['inputs']))
elif inputs:
keys.extend(red_get_mount_connectors_from_inputs(inputs))
if not ignore_outputs:
outputs = red_data.get('outputs')
if batches:
for batch in batches:
batch_outputs = batch.get('outputs')
if batch_outputs:
keys.extend(red_get_mount_connectors_from_outputs(batch_outputs))
elif outputs:
keys.extend(red_get_mount_connectors_from_outputs(outputs))
return keys | [
"def",
"red_get_mount_connectors",
"(",
"red_data",
",",
"ignore_outputs",
")",
":",
"keys",
"=",
"[",
"]",
"batches",
"=",
"red_data",
".",
"get",
"(",
"'batches'",
")",
"inputs",
"=",
"red_data",
".",
"get",
"(",
"'inputs'",
")",
"if",
"batches",
":",
"for",
"batch",
"in",
"batches",
":",
"keys",
".",
"extend",
"(",
"red_get_mount_connectors_from_inputs",
"(",
"batch",
"[",
"'inputs'",
"]",
")",
")",
"elif",
"inputs",
":",
"keys",
".",
"extend",
"(",
"red_get_mount_connectors_from_inputs",
"(",
"inputs",
")",
")",
"if",
"not",
"ignore_outputs",
":",
"outputs",
"=",
"red_data",
".",
"get",
"(",
"'outputs'",
")",
"if",
"batches",
":",
"for",
"batch",
"in",
"batches",
":",
"batch_outputs",
"=",
"batch",
".",
"get",
"(",
"'outputs'",
")",
"if",
"batch_outputs",
":",
"keys",
".",
"extend",
"(",
"red_get_mount_connectors_from_outputs",
"(",
"batch_outputs",
")",
")",
"elif",
"outputs",
":",
"keys",
".",
"extend",
"(",
"red_get_mount_connectors_from_outputs",
"(",
"outputs",
")",
")",
"return",
"keys"
] | Returns a list of mounting connectors
:param red_data: The red data to be searched
:param ignore_outputs: If outputs should be ignored
:return: A list of connectors with active mount option. | [
"Returns",
"a",
"list",
"of",
"mounting",
"connectors"
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/red.py#L261-L291 |
curious-containers/cc-core | cc_core/commons/red.py | cleanup | def cleanup(connector_manager, red_data, tmp_dir):
"""
Invokes the cleanup functions for all inputs.
"""
for key, arg in red_data['inputs'].items():
val = arg
if isinstance(arg, list):
for index, i in enumerate(arg):
if not isinstance(i, dict):
continue
# connector_class should be one of 'File' or 'Directory'
connector_class = i['class']
input_key = '{}_{}'.format(key, index)
path = os.path.join(tmp_dir, input_key)
connector_data = i['connector']
internal = {URL_SCHEME_IDENTIFIER: path}
if connector_class == 'File':
connector_manager.receive_cleanup(connector_data, input_key, internal)
elif connector_class == 'Directory':
connector_manager.receive_directory_cleanup(connector_data, input_key, internal)
elif isinstance(arg, dict):
# connector_class should be one of 'File' or 'Directory'
connector_class = arg['class']
input_key = key
path = os.path.join(tmp_dir, input_key)
connector_data = val['connector']
internal = {URL_SCHEME_IDENTIFIER: path}
if connector_class == 'File':
connector_manager.receive_cleanup(connector_data, input_key, internal)
elif connector_class == 'Directory':
connector_manager.receive_directory_cleanup(connector_data, input_key, internal)
try:
os.rmdir(tmp_dir)
except (OSError, FileNotFoundError):
# Maybe, raise a warning here, because not all connectors have cleaned up their contents correctly.
pass | python | def cleanup(connector_manager, red_data, tmp_dir):
"""
Invokes the cleanup functions for all inputs.
"""
for key, arg in red_data['inputs'].items():
val = arg
if isinstance(arg, list):
for index, i in enumerate(arg):
if not isinstance(i, dict):
continue
# connector_class should be one of 'File' or 'Directory'
connector_class = i['class']
input_key = '{}_{}'.format(key, index)
path = os.path.join(tmp_dir, input_key)
connector_data = i['connector']
internal = {URL_SCHEME_IDENTIFIER: path}
if connector_class == 'File':
connector_manager.receive_cleanup(connector_data, input_key, internal)
elif connector_class == 'Directory':
connector_manager.receive_directory_cleanup(connector_data, input_key, internal)
elif isinstance(arg, dict):
# connector_class should be one of 'File' or 'Directory'
connector_class = arg['class']
input_key = key
path = os.path.join(tmp_dir, input_key)
connector_data = val['connector']
internal = {URL_SCHEME_IDENTIFIER: path}
if connector_class == 'File':
connector_manager.receive_cleanup(connector_data, input_key, internal)
elif connector_class == 'Directory':
connector_manager.receive_directory_cleanup(connector_data, input_key, internal)
try:
os.rmdir(tmp_dir)
except (OSError, FileNotFoundError):
# Maybe, raise a warning here, because not all connectors have cleaned up their contents correctly.
pass | [
"def",
"cleanup",
"(",
"connector_manager",
",",
"red_data",
",",
"tmp_dir",
")",
":",
"for",
"key",
",",
"arg",
"in",
"red_data",
"[",
"'inputs'",
"]",
".",
"items",
"(",
")",
":",
"val",
"=",
"arg",
"if",
"isinstance",
"(",
"arg",
",",
"list",
")",
":",
"for",
"index",
",",
"i",
"in",
"enumerate",
"(",
"arg",
")",
":",
"if",
"not",
"isinstance",
"(",
"i",
",",
"dict",
")",
":",
"continue",
"# connector_class should be one of 'File' or 'Directory'",
"connector_class",
"=",
"i",
"[",
"'class'",
"]",
"input_key",
"=",
"'{}_{}'",
".",
"format",
"(",
"key",
",",
"index",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"input_key",
")",
"connector_data",
"=",
"i",
"[",
"'connector'",
"]",
"internal",
"=",
"{",
"URL_SCHEME_IDENTIFIER",
":",
"path",
"}",
"if",
"connector_class",
"==",
"'File'",
":",
"connector_manager",
".",
"receive_cleanup",
"(",
"connector_data",
",",
"input_key",
",",
"internal",
")",
"elif",
"connector_class",
"==",
"'Directory'",
":",
"connector_manager",
".",
"receive_directory_cleanup",
"(",
"connector_data",
",",
"input_key",
",",
"internal",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"dict",
")",
":",
"# connector_class should be one of 'File' or 'Directory'",
"connector_class",
"=",
"arg",
"[",
"'class'",
"]",
"input_key",
"=",
"key",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"input_key",
")",
"connector_data",
"=",
"val",
"[",
"'connector'",
"]",
"internal",
"=",
"{",
"URL_SCHEME_IDENTIFIER",
":",
"path",
"}",
"if",
"connector_class",
"==",
"'File'",
":",
"connector_manager",
".",
"receive_cleanup",
"(",
"connector_data",
",",
"input_key",
",",
"internal",
")",
"elif",
"connector_class",
"==",
"'Directory'",
":",
"connector_manager",
".",
"receive_directory_cleanup",
"(",
"connector_data",
",",
"input_key",
",",
"internal",
")",
"try",
":",
"os",
".",
"rmdir",
"(",
"tmp_dir",
")",
"except",
"(",
"OSError",
",",
"FileNotFoundError",
")",
":",
"# Maybe, raise a warning here, because not all connectors have cleaned up their contents correctly.",
"pass"
] | Invokes the cleanup functions for all inputs. | [
"Invokes",
"the",
"cleanup",
"functions",
"for",
"all",
"inputs",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/red.py#L454-L495 |
curious-containers/cc-core | cc_core/commons/red.py | ConnectorManager._execute_connector | def _execute_connector(connector_command, top_level_argument, *file_contents, listing=None):
"""
Executes a connector by executing the given connector_command. The content of args will be the content of the
files handed to the connector cli.
:param connector_command: The connector command to execute.
:param top_level_argument: The top level command line argument for the connector cli.
(Like 'receive' or 'send_validate')
:param file_contents: A dict of information handed over to the connector cli.
:param listing: A listing to provide to the connector cli. Will be ignored if None.
:return: A tuple containing the return code of the connector and the stderr of the command as str.
"""
# create temp_files for every file_content
temp_files = []
for file_content in file_contents:
if file_content is None:
continue
tmp_file = tempfile.NamedTemporaryFile('w')
json.dump(file_content, tmp_file)
tmp_file.flush()
temp_files.append(tmp_file)
tmp_listing_file = None
if listing:
tmp_listing_file = tempfile.NamedTemporaryFile('w')
json.dump(listing, tmp_listing_file)
tmp_listing_file.flush()
command = [connector_command, top_level_argument]
command.extend([t.name for t in temp_files])
if tmp_listing_file:
command.append('--listing {}'.format(tmp_listing_file.name))
result = execute(' '.join(command))
# close temp_files
for temp_file in temp_files:
temp_file.close()
if tmp_listing_file:
tmp_listing_file.close()
return result['returnCode'], result['stdErr'] | python | def _execute_connector(connector_command, top_level_argument, *file_contents, listing=None):
"""
Executes a connector by executing the given connector_command. The content of args will be the content of the
files handed to the connector cli.
:param connector_command: The connector command to execute.
:param top_level_argument: The top level command line argument for the connector cli.
(Like 'receive' or 'send_validate')
:param file_contents: A dict of information handed over to the connector cli.
:param listing: A listing to provide to the connector cli. Will be ignored if None.
:return: A tuple containing the return code of the connector and the stderr of the command as str.
"""
# create temp_files for every file_content
temp_files = []
for file_content in file_contents:
if file_content is None:
continue
tmp_file = tempfile.NamedTemporaryFile('w')
json.dump(file_content, tmp_file)
tmp_file.flush()
temp_files.append(tmp_file)
tmp_listing_file = None
if listing:
tmp_listing_file = tempfile.NamedTemporaryFile('w')
json.dump(listing, tmp_listing_file)
tmp_listing_file.flush()
command = [connector_command, top_level_argument]
command.extend([t.name for t in temp_files])
if tmp_listing_file:
command.append('--listing {}'.format(tmp_listing_file.name))
result = execute(' '.join(command))
# close temp_files
for temp_file in temp_files:
temp_file.close()
if tmp_listing_file:
tmp_listing_file.close()
return result['returnCode'], result['stdErr'] | [
"def",
"_execute_connector",
"(",
"connector_command",
",",
"top_level_argument",
",",
"*",
"file_contents",
",",
"listing",
"=",
"None",
")",
":",
"# create temp_files for every file_content",
"temp_files",
"=",
"[",
"]",
"for",
"file_content",
"in",
"file_contents",
":",
"if",
"file_content",
"is",
"None",
":",
"continue",
"tmp_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"'w'",
")",
"json",
".",
"dump",
"(",
"file_content",
",",
"tmp_file",
")",
"tmp_file",
".",
"flush",
"(",
")",
"temp_files",
".",
"append",
"(",
"tmp_file",
")",
"tmp_listing_file",
"=",
"None",
"if",
"listing",
":",
"tmp_listing_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"'w'",
")",
"json",
".",
"dump",
"(",
"listing",
",",
"tmp_listing_file",
")",
"tmp_listing_file",
".",
"flush",
"(",
")",
"command",
"=",
"[",
"connector_command",
",",
"top_level_argument",
"]",
"command",
".",
"extend",
"(",
"[",
"t",
".",
"name",
"for",
"t",
"in",
"temp_files",
"]",
")",
"if",
"tmp_listing_file",
":",
"command",
".",
"append",
"(",
"'--listing {}'",
".",
"format",
"(",
"tmp_listing_file",
".",
"name",
")",
")",
"result",
"=",
"execute",
"(",
"' '",
".",
"join",
"(",
"command",
")",
")",
"# close temp_files",
"for",
"temp_file",
"in",
"temp_files",
":",
"temp_file",
".",
"close",
"(",
")",
"if",
"tmp_listing_file",
":",
"tmp_listing_file",
".",
"close",
"(",
")",
"return",
"result",
"[",
"'returnCode'",
"]",
",",
"result",
"[",
"'stdErr'",
"]"
] | Executes a connector by executing the given connector_command. The content of args will be the content of the
files handed to the connector cli.
:param connector_command: The connector command to execute.
:param top_level_argument: The top level command line argument for the connector cli.
(Like 'receive' or 'send_validate')
:param file_contents: A dict of information handed over to the connector cli.
:param listing: A listing to provide to the connector cli. Will be ignored if None.
:return: A tuple containing the return code of the connector and the stderr of the command as str. | [
"Executes",
"a",
"connector",
"by",
"executing",
"the",
"given",
"connector_command",
".",
"The",
"content",
"of",
"args",
"will",
"be",
"the",
"content",
"of",
"the",
"files",
"handed",
"to",
"the",
"connector",
"cli",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/red.py#L36-L79 |
curious-containers/cc-core | cc_core/commons/red.py | ConnectorManager.directory_listing_content_check | def directory_listing_content_check(directory_path, listing):
"""
Checks if a given listing is present under the given directory path.
:param directory_path: The path to the base directory
:param listing: The listing to check
:return: None if no errors could be found, otherwise a string describing the error
"""
if listing:
for sub in listing:
path = os.path.join(directory_path, sub['basename'])
if sub['class'] == 'File':
if not os.path.isfile(path):
return 'listing contains "{}" but this file could not be found on disk.'.format(path)
elif sub['class'] == 'Directory':
if not os.path.isdir(path):
return 'listing contains "{}" but this directory could not be found on disk'.format(path)
listing = sub.get('listing')
if listing:
return ConnectorManager.directory_listing_content_check(path, listing)
return None | python | def directory_listing_content_check(directory_path, listing):
"""
Checks if a given listing is present under the given directory path.
:param directory_path: The path to the base directory
:param listing: The listing to check
:return: None if no errors could be found, otherwise a string describing the error
"""
if listing:
for sub in listing:
path = os.path.join(directory_path, sub['basename'])
if sub['class'] == 'File':
if not os.path.isfile(path):
return 'listing contains "{}" but this file could not be found on disk.'.format(path)
elif sub['class'] == 'Directory':
if not os.path.isdir(path):
return 'listing contains "{}" but this directory could not be found on disk'.format(path)
listing = sub.get('listing')
if listing:
return ConnectorManager.directory_listing_content_check(path, listing)
return None | [
"def",
"directory_listing_content_check",
"(",
"directory_path",
",",
"listing",
")",
":",
"if",
"listing",
":",
"for",
"sub",
"in",
"listing",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory_path",
",",
"sub",
"[",
"'basename'",
"]",
")",
"if",
"sub",
"[",
"'class'",
"]",
"==",
"'File'",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"return",
"'listing contains \"{}\" but this file could not be found on disk.'",
".",
"format",
"(",
"path",
")",
"elif",
"sub",
"[",
"'class'",
"]",
"==",
"'Directory'",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"return",
"'listing contains \"{}\" but this directory could not be found on disk'",
".",
"format",
"(",
"path",
")",
"listing",
"=",
"sub",
".",
"get",
"(",
"'listing'",
")",
"if",
"listing",
":",
"return",
"ConnectorManager",
".",
"directory_listing_content_check",
"(",
"path",
",",
"listing",
")",
"return",
"None"
] | Checks if a given listing is present under the given directory path.
:param directory_path: The path to the base directory
:param listing: The listing to check
:return: None if no errors could be found, otherwise a string describing the error | [
"Checks",
"if",
"a",
"given",
"listing",
"is",
"present",
"under",
"the",
"given",
"directory",
"path",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/red.py#L133-L153 |
curious-containers/cc-core | cc_core/commons/gpu_info.py | get_cuda_devices | def get_cuda_devices():
"""
Imports pycuda at runtime and reads GPU information.
:return: A list of available cuda GPUs.
"""
devices = []
try:
import pycuda.autoinit
import pycuda.driver as cuda
for device_id in range(cuda.Device.count()):
vram = cuda.Device(device_id).total_memory()
devices.append(GPUDevice(device_id, vram))
except ImportError:
raise InsufficientGPUError('No Nvidia-GPUs could be found, because "pycuda" could not be imported.')
return devices | python | def get_cuda_devices():
"""
Imports pycuda at runtime and reads GPU information.
:return: A list of available cuda GPUs.
"""
devices = []
try:
import pycuda.autoinit
import pycuda.driver as cuda
for device_id in range(cuda.Device.count()):
vram = cuda.Device(device_id).total_memory()
devices.append(GPUDevice(device_id, vram))
except ImportError:
raise InsufficientGPUError('No Nvidia-GPUs could be found, because "pycuda" could not be imported.')
return devices | [
"def",
"get_cuda_devices",
"(",
")",
":",
"devices",
"=",
"[",
"]",
"try",
":",
"import",
"pycuda",
".",
"autoinit",
"import",
"pycuda",
".",
"driver",
"as",
"cuda",
"for",
"device_id",
"in",
"range",
"(",
"cuda",
".",
"Device",
".",
"count",
"(",
")",
")",
":",
"vram",
"=",
"cuda",
".",
"Device",
"(",
"device_id",
")",
".",
"total_memory",
"(",
")",
"devices",
".",
"append",
"(",
"GPUDevice",
"(",
"device_id",
",",
"vram",
")",
")",
"except",
"ImportError",
":",
"raise",
"InsufficientGPUError",
"(",
"'No Nvidia-GPUs could be found, because \"pycuda\" could not be imported.'",
")",
"return",
"devices"
] | Imports pycuda at runtime and reads GPU information.
:return: A list of available cuda GPUs. | [
"Imports",
"pycuda",
"at",
"runtime",
"and",
"reads",
"GPU",
"information",
".",
":",
"return",
":",
"A",
"list",
"of",
"available",
"cuda",
"GPUs",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/gpu_info.py#L44-L63 |
curious-containers/cc-core | cc_core/commons/gpu_info.py | match_gpus | def match_gpus(available_devices, requirements):
"""
Determines sufficient GPUs for the given requirements and returns a list of GPUDevices.
If there aren't sufficient GPUs a InsufficientGPUException is thrown.
:param available_devices: A list of GPUDevices
:param requirements: A list of GPURequirements
:return: A list of sufficient devices
"""
if not requirements:
return []
if not available_devices:
raise InsufficientGPUError("No GPU devices available, but {} devices required.".format(len(requirements)))
available_devices = available_devices.copy()
used_devices = []
for req in requirements:
dev = search_device(req, available_devices)
if dev:
used_devices.append(dev)
available_devices.remove(dev)
else:
raise InsufficientGPUError("Not all GPU requirements could be fulfilled.")
return used_devices | python | def match_gpus(available_devices, requirements):
"""
Determines sufficient GPUs for the given requirements and returns a list of GPUDevices.
If there aren't sufficient GPUs a InsufficientGPUException is thrown.
:param available_devices: A list of GPUDevices
:param requirements: A list of GPURequirements
:return: A list of sufficient devices
"""
if not requirements:
return []
if not available_devices:
raise InsufficientGPUError("No GPU devices available, but {} devices required.".format(len(requirements)))
available_devices = available_devices.copy()
used_devices = []
for req in requirements:
dev = search_device(req, available_devices)
if dev:
used_devices.append(dev)
available_devices.remove(dev)
else:
raise InsufficientGPUError("Not all GPU requirements could be fulfilled.")
return used_devices | [
"def",
"match_gpus",
"(",
"available_devices",
",",
"requirements",
")",
":",
"if",
"not",
"requirements",
":",
"return",
"[",
"]",
"if",
"not",
"available_devices",
":",
"raise",
"InsufficientGPUError",
"(",
"\"No GPU devices available, but {} devices required.\"",
".",
"format",
"(",
"len",
"(",
"requirements",
")",
")",
")",
"available_devices",
"=",
"available_devices",
".",
"copy",
"(",
")",
"used_devices",
"=",
"[",
"]",
"for",
"req",
"in",
"requirements",
":",
"dev",
"=",
"search_device",
"(",
"req",
",",
"available_devices",
")",
"if",
"dev",
":",
"used_devices",
".",
"append",
"(",
"dev",
")",
"available_devices",
".",
"remove",
"(",
"dev",
")",
"else",
":",
"raise",
"InsufficientGPUError",
"(",
"\"Not all GPU requirements could be fulfilled.\"",
")",
"return",
"used_devices"
] | Determines sufficient GPUs for the given requirements and returns a list of GPUDevices.
If there aren't sufficient GPUs a InsufficientGPUException is thrown.
:param available_devices: A list of GPUDevices
:param requirements: A list of GPURequirements
:return: A list of sufficient devices | [
"Determines",
"sufficient",
"GPUs",
"for",
"the",
"given",
"requirements",
"and",
"returns",
"a",
"list",
"of",
"GPUDevices",
".",
"If",
"there",
"aren",
"t",
"sufficient",
"GPUs",
"a",
"InsufficientGPUException",
"is",
"thrown",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/gpu_info.py#L110-L139 |
curious-containers/cc-core | cc_core/commons/gpu_info.py | get_gpu_requirements | def get_gpu_requirements(gpus_reqs):
"""
Extracts the GPU from a dictionary requirements as list of GPURequirements.
:param gpus_reqs: A dictionary {'count': <count>} or a list [{min_vram: <min_vram>}, {min_vram: <min_vram>}, ...]
:return: A list of GPURequirements
"""
requirements = []
if gpus_reqs:
if type(gpus_reqs) is dict:
count = gpus_reqs.get('count')
if count:
for i in range(count):
requirements.append(GPURequirement())
elif type(gpus_reqs) is list:
for gpu_req in gpus_reqs:
requirements.append(GPURequirement(min_vram=gpu_req['minVram']))
return requirements
else:
# If no requirements are supplied
return [] | python | def get_gpu_requirements(gpus_reqs):
"""
Extracts the GPU from a dictionary requirements as list of GPURequirements.
:param gpus_reqs: A dictionary {'count': <count>} or a list [{min_vram: <min_vram>}, {min_vram: <min_vram>}, ...]
:return: A list of GPURequirements
"""
requirements = []
if gpus_reqs:
if type(gpus_reqs) is dict:
count = gpus_reqs.get('count')
if count:
for i in range(count):
requirements.append(GPURequirement())
elif type(gpus_reqs) is list:
for gpu_req in gpus_reqs:
requirements.append(GPURequirement(min_vram=gpu_req['minVram']))
return requirements
else:
# If no requirements are supplied
return [] | [
"def",
"get_gpu_requirements",
"(",
"gpus_reqs",
")",
":",
"requirements",
"=",
"[",
"]",
"if",
"gpus_reqs",
":",
"if",
"type",
"(",
"gpus_reqs",
")",
"is",
"dict",
":",
"count",
"=",
"gpus_reqs",
".",
"get",
"(",
"'count'",
")",
"if",
"count",
":",
"for",
"i",
"in",
"range",
"(",
"count",
")",
":",
"requirements",
".",
"append",
"(",
"GPURequirement",
"(",
")",
")",
"elif",
"type",
"(",
"gpus_reqs",
")",
"is",
"list",
":",
"for",
"gpu_req",
"in",
"gpus_reqs",
":",
"requirements",
".",
"append",
"(",
"GPURequirement",
"(",
"min_vram",
"=",
"gpu_req",
"[",
"'minVram'",
"]",
")",
")",
"return",
"requirements",
"else",
":",
"# If no requirements are supplied",
"return",
"[",
"]"
] | Extracts the GPU from a dictionary requirements as list of GPURequirements.
:param gpus_reqs: A dictionary {'count': <count>} or a list [{min_vram: <min_vram>}, {min_vram: <min_vram>}, ...]
:return: A list of GPURequirements | [
"Extracts",
"the",
"GPU",
"from",
"a",
"dictionary",
"requirements",
"as",
"list",
"of",
"GPURequirements",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/gpu_info.py#L142-L163 |
curious-containers/cc-core | cc_core/commons/gpu_info.py | set_nvidia_environment_variables | def set_nvidia_environment_variables(environment, gpu_ids):
"""
Updates a dictionary containing environment variables to setup Nvidia-GPUs.
:param environment: The environment variables to update
:param gpu_ids: A list of GPU ids
"""
if gpu_ids:
nvidia_visible_devices = ""
for gpu_id in gpu_ids:
nvidia_visible_devices += "{},".format(gpu_id)
environment["NVIDIA_VISIBLE_DEVICES"] = nvidia_visible_devices | python | def set_nvidia_environment_variables(environment, gpu_ids):
"""
Updates a dictionary containing environment variables to setup Nvidia-GPUs.
:param environment: The environment variables to update
:param gpu_ids: A list of GPU ids
"""
if gpu_ids:
nvidia_visible_devices = ""
for gpu_id in gpu_ids:
nvidia_visible_devices += "{},".format(gpu_id)
environment["NVIDIA_VISIBLE_DEVICES"] = nvidia_visible_devices | [
"def",
"set_nvidia_environment_variables",
"(",
"environment",
",",
"gpu_ids",
")",
":",
"if",
"gpu_ids",
":",
"nvidia_visible_devices",
"=",
"\"\"",
"for",
"gpu_id",
"in",
"gpu_ids",
":",
"nvidia_visible_devices",
"+=",
"\"{},\"",
".",
"format",
"(",
"gpu_id",
")",
"environment",
"[",
"\"NVIDIA_VISIBLE_DEVICES\"",
"]",
"=",
"nvidia_visible_devices"
] | Updates a dictionary containing environment variables to setup Nvidia-GPUs.
:param environment: The environment variables to update
:param gpu_ids: A list of GPU ids | [
"Updates",
"a",
"dictionary",
"containing",
"environment",
"variables",
"to",
"setup",
"Nvidia",
"-",
"GPUs",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/gpu_info.py#L166-L178 |
curious-containers/cc-core | cc_core/commons/gpu_info.py | GPURequirement.is_sufficient | def is_sufficient(self, device):
"""
Returns whether the device is sufficient for this requirement.
:param device: A GPUDevice instance.
:type device: GPUDevice
:return: True if the requirement is fulfilled otherwise False
"""
sufficient = True
if (self.min_vram is not None) and (device.vram < self.min_vram):
sufficient = False
return sufficient | python | def is_sufficient(self, device):
"""
Returns whether the device is sufficient for this requirement.
:param device: A GPUDevice instance.
:type device: GPUDevice
:return: True if the requirement is fulfilled otherwise False
"""
sufficient = True
if (self.min_vram is not None) and (device.vram < self.min_vram):
sufficient = False
return sufficient | [
"def",
"is_sufficient",
"(",
"self",
",",
"device",
")",
":",
"sufficient",
"=",
"True",
"if",
"(",
"self",
".",
"min_vram",
"is",
"not",
"None",
")",
"and",
"(",
"device",
".",
"vram",
"<",
"self",
".",
"min_vram",
")",
":",
"sufficient",
"=",
"False",
"return",
"sufficient"
] | Returns whether the device is sufficient for this requirement.
:param device: A GPUDevice instance.
:type device: GPUDevice
:return: True if the requirement is fulfilled otherwise False | [
"Returns",
"whether",
"the",
"device",
"is",
"sufficient",
"for",
"this",
"requirement",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/gpu_info.py#L28-L41 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/__init__.py | cache_location | def cache_location():
'''Cross-platform placement of cached files'''
plat = platform.platform()
log.debug('Platform read as: {0}'.format(plat))
if plat.startswith('Windows'):
log.debug('Windows platform detected')
return os.path.join(os.environ['APPDATA'], 'OpenAccess_EPUB')
elif plat.startswith('Darwin'):
log.debug('Mac platform detected')
elif plat.startswith('Linux'):
log.debug('Linux platform detected')
else:
log.warning('Unhandled platform for cache_location')
#This code is written for Linux and Mac, don't expect success for others
path = os.path.expanduser('~')
if path == '~':
path = os.path.expanduser('~user')
if path == '~user':
log.critical('Could not resolve the correct cache location')
sys.exit('Could not resolve the correct cache location')
cache_loc = os.path.join(path, '.OpenAccess_EPUB')
log.debug('Cache located: {0}'.format(cache_loc))
return cache_loc | python | def cache_location():
'''Cross-platform placement of cached files'''
plat = platform.platform()
log.debug('Platform read as: {0}'.format(plat))
if plat.startswith('Windows'):
log.debug('Windows platform detected')
return os.path.join(os.environ['APPDATA'], 'OpenAccess_EPUB')
elif plat.startswith('Darwin'):
log.debug('Mac platform detected')
elif plat.startswith('Linux'):
log.debug('Linux platform detected')
else:
log.warning('Unhandled platform for cache_location')
#This code is written for Linux and Mac, don't expect success for others
path = os.path.expanduser('~')
if path == '~':
path = os.path.expanduser('~user')
if path == '~user':
log.critical('Could not resolve the correct cache location')
sys.exit('Could not resolve the correct cache location')
cache_loc = os.path.join(path, '.OpenAccess_EPUB')
log.debug('Cache located: {0}'.format(cache_loc))
return cache_loc | [
"def",
"cache_location",
"(",
")",
":",
"plat",
"=",
"platform",
".",
"platform",
"(",
")",
"log",
".",
"debug",
"(",
"'Platform read as: {0}'",
".",
"format",
"(",
"plat",
")",
")",
"if",
"plat",
".",
"startswith",
"(",
"'Windows'",
")",
":",
"log",
".",
"debug",
"(",
"'Windows platform detected'",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
"[",
"'APPDATA'",
"]",
",",
"'OpenAccess_EPUB'",
")",
"elif",
"plat",
".",
"startswith",
"(",
"'Darwin'",
")",
":",
"log",
".",
"debug",
"(",
"'Mac platform detected'",
")",
"elif",
"plat",
".",
"startswith",
"(",
"'Linux'",
")",
":",
"log",
".",
"debug",
"(",
"'Linux platform detected'",
")",
"else",
":",
"log",
".",
"warning",
"(",
"'Unhandled platform for cache_location'",
")",
"#This code is written for Linux and Mac, don't expect success for others",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
"if",
"path",
"==",
"'~'",
":",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~user'",
")",
"if",
"path",
"==",
"'~user'",
":",
"log",
".",
"critical",
"(",
"'Could not resolve the correct cache location'",
")",
"sys",
".",
"exit",
"(",
"'Could not resolve the correct cache location'",
")",
"cache_loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'.OpenAccess_EPUB'",
")",
"log",
".",
"debug",
"(",
"'Cache located: {0}'",
".",
"format",
"(",
"cache_loc",
")",
")",
"return",
"cache_loc"
] | Cross-platform placement of cached files | [
"Cross",
"-",
"platform",
"placement",
"of",
"cached",
"files"
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/__init__.py#L86-L109 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/__init__.py | load_config_module | def load_config_module():
"""
If the config.py file exists, import it as a module. If it does not exist,
call sys.exit() with a request to run oaepub configure.
"""
import imp
config_path = config_location()
try:
config = imp.load_source('config', config_path)
except IOError:
log.critical('Config file not found. oaepub exiting...')
sys.exit('Config file not found. Please run \'oaepub configure\'')
else:
log.debug('Config file loaded from {0}'.format(config_path))
return config | python | def load_config_module():
"""
If the config.py file exists, import it as a module. If it does not exist,
call sys.exit() with a request to run oaepub configure.
"""
import imp
config_path = config_location()
try:
config = imp.load_source('config', config_path)
except IOError:
log.critical('Config file not found. oaepub exiting...')
sys.exit('Config file not found. Please run \'oaepub configure\'')
else:
log.debug('Config file loaded from {0}'.format(config_path))
return config | [
"def",
"load_config_module",
"(",
")",
":",
"import",
"imp",
"config_path",
"=",
"config_location",
"(",
")",
"try",
":",
"config",
"=",
"imp",
".",
"load_source",
"(",
"'config'",
",",
"config_path",
")",
"except",
"IOError",
":",
"log",
".",
"critical",
"(",
"'Config file not found. oaepub exiting...'",
")",
"sys",
".",
"exit",
"(",
"'Config file not found. Please run \\'oaepub configure\\''",
")",
"else",
":",
"log",
".",
"debug",
"(",
"'Config file loaded from {0}'",
".",
"format",
"(",
"config_path",
")",
")",
"return",
"config"
] | If the config.py file exists, import it as a module. If it does not exist,
call sys.exit() with a request to run oaepub configure. | [
"If",
"the",
"config",
".",
"py",
"file",
"exists",
"import",
"it",
"as",
"a",
"module",
".",
"If",
"it",
"does",
"not",
"exist",
"call",
"sys",
".",
"exit",
"()",
"with",
"a",
"request",
"to",
"run",
"oaepub",
"configure",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/__init__.py#L133-L147 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/__init__.py | evaluate_relative_path | def evaluate_relative_path(working=os.getcwd(), relative=''):
"""
This function receives two strings representing system paths. The first is
the working directory and it should be an absolute path. The second is the
relative path and it should not be absolute. This function will render an
OS-appropriate absolute path, which is the normalized path from working
to relative.
"""
return os.path.normpath(os.path.join(working, relative)) | python | def evaluate_relative_path(working=os.getcwd(), relative=''):
"""
This function receives two strings representing system paths. The first is
the working directory and it should be an absolute path. The second is the
relative path and it should not be absolute. This function will render an
OS-appropriate absolute path, which is the normalized path from working
to relative.
"""
return os.path.normpath(os.path.join(working, relative)) | [
"def",
"evaluate_relative_path",
"(",
"working",
"=",
"os",
".",
"getcwd",
"(",
")",
",",
"relative",
"=",
"''",
")",
":",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"working",
",",
"relative",
")",
")"
] | This function receives two strings representing system paths. The first is
the working directory and it should be an absolute path. The second is the
relative path and it should not be absolute. This function will render an
OS-appropriate absolute path, which is the normalized path from working
to relative. | [
"This",
"function",
"receives",
"two",
"strings",
"representing",
"system",
"paths",
".",
"The",
"first",
"is",
"the",
"working",
"directory",
"and",
"it",
"should",
"be",
"an",
"absolute",
"path",
".",
"The",
"second",
"is",
"the",
"relative",
"path",
"and",
"it",
"should",
"not",
"be",
"absolute",
".",
"This",
"function",
"will",
"render",
"an",
"OS",
"-",
"appropriate",
"absolute",
"path",
"which",
"is",
"the",
"normalized",
"path",
"from",
"working",
"to",
"relative",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/__init__.py#L156-L164 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/__init__.py | get_absolute_path | def get_absolute_path(some_path):
"""
This function will return an appropriate absolute path for the path it is
given. If the input is absolute, it will return unmodified; if the input is
relative, it will be rendered as relative to the current working directory.
"""
if os.path.isabs(some_path):
return some_path
else:
return evaluate_relative_path(os.getcwd(), some_path) | python | def get_absolute_path(some_path):
"""
This function will return an appropriate absolute path for the path it is
given. If the input is absolute, it will return unmodified; if the input is
relative, it will be rendered as relative to the current working directory.
"""
if os.path.isabs(some_path):
return some_path
else:
return evaluate_relative_path(os.getcwd(), some_path) | [
"def",
"get_absolute_path",
"(",
"some_path",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"some_path",
")",
":",
"return",
"some_path",
"else",
":",
"return",
"evaluate_relative_path",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"some_path",
")"
] | This function will return an appropriate absolute path for the path it is
given. If the input is absolute, it will return unmodified; if the input is
relative, it will be rendered as relative to the current working directory. | [
"This",
"function",
"will",
"return",
"an",
"appropriate",
"absolute",
"path",
"for",
"the",
"path",
"it",
"is",
"given",
".",
"If",
"the",
"input",
"is",
"absolute",
"it",
"will",
"return",
"unmodified",
";",
"if",
"the",
"input",
"is",
"relative",
"it",
"will",
"be",
"rendered",
"as",
"relative",
"to",
"the",
"current",
"working",
"directory",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/__init__.py#L167-L176 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/__init__.py | get_output_directory | def get_output_directory(args):
"""
Determination of the directory for output placement involves possibilities
for explicit user instruction (absolute path or relative to execution) and
implicit default configuration (absolute path or relative to input) from
the system global configuration file. This function is responsible for
reliably returning the appropriate output directory which will contain any
log(s), ePub(s), and unzipped output of OpenAccess_EPUB.
It utilizes the parsed args, passed as an object, and is self-sufficient in
accessing the config file.
All paths returned by this function are absolute.
"""
#Import the global config file as a module
import imp
config_path = os.path.join(cache_location(), 'config.py')
try:
config = imp.load_source('config', config_path)
except IOError:
print('Could not find {0}, please run oae-quickstart'.format(config_path))
sys.exit()
#args.output is the explicit user instruction, None if unspecified
if args.output:
#args.output may be an absolute path
if os.path.isabs(args.output):
return args.output # return as is
#or args.output may be a relative path, relative to cwd
else:
return evaluate_relative_path(relative=args.output)
#config.default_output for default behavior without explicit instruction
else:
#config.default_output may be an absolute_path
if os.path.isabs(config.default_output):
return config.default_output
#or config.default_output may be a relative path, relative to input
else:
if args.input: # The case of single input
if 'http://www' in args.input:
#Fetched from internet by URL
raw_name = url_input(args.input, download=False)
abs_input_path = os.path.join(os.getcwd(), raw_name+'.xml')
elif args.input[:4] == 'doi:':
#Fetched from internet by DOI
raw_name = doi_input(args.input, download=False)
abs_input_path = os.path.join(os.getcwd(), raw_name+'.xml')
else:
#Local option, could be anywhere
abs_input_path = get_absolute_path(args.input)
abs_input_parent = os.path.split(abs_input_path)[0]
return evaluate_relative_path(abs_input_parent, config.default_output)
elif args.batch: # The case of Batch Mode
#Batch should only work on a supplied directory
abs_batch_path = get_absolute_path(args.batch)
return abs_batch_path
elif args.zip:
#Zip is a local-only option, behaves just like local xml
abs_input_path = get_absolute_path(args.zip)
abs_input_parent = os.path.split(abs_input_path)[0]
return evaluate_relative_path(abs_input_parent, config.default_output)
elif args.collection:
return os.getcwd()
else: # Un-handled or currently unsupported options
print('The output location could not be determined...')
sys.exit() | python | def get_output_directory(args):
"""
Determination of the directory for output placement involves possibilities
for explicit user instruction (absolute path or relative to execution) and
implicit default configuration (absolute path or relative to input) from
the system global configuration file. This function is responsible for
reliably returning the appropriate output directory which will contain any
log(s), ePub(s), and unzipped output of OpenAccess_EPUB.
It utilizes the parsed args, passed as an object, and is self-sufficient in
accessing the config file.
All paths returned by this function are absolute.
"""
#Import the global config file as a module
import imp
config_path = os.path.join(cache_location(), 'config.py')
try:
config = imp.load_source('config', config_path)
except IOError:
print('Could not find {0}, please run oae-quickstart'.format(config_path))
sys.exit()
#args.output is the explicit user instruction, None if unspecified
if args.output:
#args.output may be an absolute path
if os.path.isabs(args.output):
return args.output # return as is
#or args.output may be a relative path, relative to cwd
else:
return evaluate_relative_path(relative=args.output)
#config.default_output for default behavior without explicit instruction
else:
#config.default_output may be an absolute_path
if os.path.isabs(config.default_output):
return config.default_output
#or config.default_output may be a relative path, relative to input
else:
if args.input: # The case of single input
if 'http://www' in args.input:
#Fetched from internet by URL
raw_name = url_input(args.input, download=False)
abs_input_path = os.path.join(os.getcwd(), raw_name+'.xml')
elif args.input[:4] == 'doi:':
#Fetched from internet by DOI
raw_name = doi_input(args.input, download=False)
abs_input_path = os.path.join(os.getcwd(), raw_name+'.xml')
else:
#Local option, could be anywhere
abs_input_path = get_absolute_path(args.input)
abs_input_parent = os.path.split(abs_input_path)[0]
return evaluate_relative_path(abs_input_parent, config.default_output)
elif args.batch: # The case of Batch Mode
#Batch should only work on a supplied directory
abs_batch_path = get_absolute_path(args.batch)
return abs_batch_path
elif args.zip:
#Zip is a local-only option, behaves just like local xml
abs_input_path = get_absolute_path(args.zip)
abs_input_parent = os.path.split(abs_input_path)[0]
return evaluate_relative_path(abs_input_parent, config.default_output)
elif args.collection:
return os.getcwd()
else: # Un-handled or currently unsupported options
print('The output location could not be determined...')
sys.exit() | [
"def",
"get_output_directory",
"(",
"args",
")",
":",
"#Import the global config file as a module",
"import",
"imp",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cache_location",
"(",
")",
",",
"'config.py'",
")",
"try",
":",
"config",
"=",
"imp",
".",
"load_source",
"(",
"'config'",
",",
"config_path",
")",
"except",
"IOError",
":",
"print",
"(",
"'Could not find {0}, please run oae-quickstart'",
".",
"format",
"(",
"config_path",
")",
")",
"sys",
".",
"exit",
"(",
")",
"#args.output is the explicit user instruction, None if unspecified",
"if",
"args",
".",
"output",
":",
"#args.output may be an absolute path",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"args",
".",
"output",
")",
":",
"return",
"args",
".",
"output",
"# return as is",
"#or args.output may be a relative path, relative to cwd",
"else",
":",
"return",
"evaluate_relative_path",
"(",
"relative",
"=",
"args",
".",
"output",
")",
"#config.default_output for default behavior without explicit instruction",
"else",
":",
"#config.default_output may be an absolute_path",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"config",
".",
"default_output",
")",
":",
"return",
"config",
".",
"default_output",
"#or config.default_output may be a relative path, relative to input",
"else",
":",
"if",
"args",
".",
"input",
":",
"# The case of single input",
"if",
"'http://www'",
"in",
"args",
".",
"input",
":",
"#Fetched from internet by URL",
"raw_name",
"=",
"url_input",
"(",
"args",
".",
"input",
",",
"download",
"=",
"False",
")",
"abs_input_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"raw_name",
"+",
"'.xml'",
")",
"elif",
"args",
".",
"input",
"[",
":",
"4",
"]",
"==",
"'doi:'",
":",
"#Fetched from internet by DOI",
"raw_name",
"=",
"doi_input",
"(",
"args",
".",
"input",
",",
"download",
"=",
"False",
")",
"abs_input_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"raw_name",
"+",
"'.xml'",
")",
"else",
":",
"#Local option, could be anywhere",
"abs_input_path",
"=",
"get_absolute_path",
"(",
"args",
".",
"input",
")",
"abs_input_parent",
"=",
"os",
".",
"path",
".",
"split",
"(",
"abs_input_path",
")",
"[",
"0",
"]",
"return",
"evaluate_relative_path",
"(",
"abs_input_parent",
",",
"config",
".",
"default_output",
")",
"elif",
"args",
".",
"batch",
":",
"# The case of Batch Mode",
"#Batch should only work on a supplied directory",
"abs_batch_path",
"=",
"get_absolute_path",
"(",
"args",
".",
"batch",
")",
"return",
"abs_batch_path",
"elif",
"args",
".",
"zip",
":",
"#Zip is a local-only option, behaves just like local xml",
"abs_input_path",
"=",
"get_absolute_path",
"(",
"args",
".",
"zip",
")",
"abs_input_parent",
"=",
"os",
".",
"path",
".",
"split",
"(",
"abs_input_path",
")",
"[",
"0",
"]",
"return",
"evaluate_relative_path",
"(",
"abs_input_parent",
",",
"config",
".",
"default_output",
")",
"elif",
"args",
".",
"collection",
":",
"return",
"os",
".",
"getcwd",
"(",
")",
"else",
":",
"# Un-handled or currently unsupported options",
"print",
"(",
"'The output location could not be determined...'",
")",
"sys",
".",
"exit",
"(",
")"
] | Determination of the directory for output placement involves possibilities
for explicit user instruction (absolute path or relative to execution) and
implicit default configuration (absolute path or relative to input) from
the system global configuration file. This function is responsible for
reliably returning the appropriate output directory which will contain any
log(s), ePub(s), and unzipped output of OpenAccess_EPUB.
It utilizes the parsed args, passed as an object, and is self-sufficient in
accessing the config file.
All paths returned by this function are absolute. | [
"Determination",
"of",
"the",
"directory",
"for",
"output",
"placement",
"involves",
"possibilities",
"for",
"explicit",
"user",
"instruction",
"(",
"absolute",
"path",
"or",
"relative",
"to",
"execution",
")",
"and",
"implicit",
"default",
"configuration",
"(",
"absolute",
"path",
"or",
"relative",
"to",
"input",
")",
"from",
"the",
"system",
"global",
"configuration",
"file",
".",
"This",
"function",
"is",
"responsible",
"for",
"reliably",
"returning",
"the",
"appropriate",
"output",
"directory",
"which",
"will",
"contain",
"any",
"log",
"(",
"s",
")",
"ePub",
"(",
"s",
")",
"and",
"unzipped",
"output",
"of",
"OpenAccess_EPUB",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/__init__.py#L179-L243 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/__init__.py | file_root_name | def file_root_name(name):
"""
Returns the root name of a file from a full file path.
It will not raise an error if the result is empty, but an warning will be
issued.
"""
base = os.path.basename(name)
root = os.path.splitext(base)[0]
if not root:
warning = 'file_root_name returned an empty root name from \"{0}\"'
log.warning(warning.format(name))
return root | python | def file_root_name(name):
"""
Returns the root name of a file from a full file path.
It will not raise an error if the result is empty, but an warning will be
issued.
"""
base = os.path.basename(name)
root = os.path.splitext(base)[0]
if not root:
warning = 'file_root_name returned an empty root name from \"{0}\"'
log.warning(warning.format(name))
return root | [
"def",
"file_root_name",
"(",
"name",
")",
":",
"base",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"name",
")",
"root",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"base",
")",
"[",
"0",
"]",
"if",
"not",
"root",
":",
"warning",
"=",
"'file_root_name returned an empty root name from \\\"{0}\\\"'",
"log",
".",
"warning",
"(",
"warning",
".",
"format",
"(",
"name",
")",
")",
"return",
"root"
] | Returns the root name of a file from a full file path.
It will not raise an error if the result is empty, but an warning will be
issued. | [
"Returns",
"the",
"root",
"name",
"of",
"a",
"file",
"from",
"a",
"full",
"file",
"path",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/__init__.py#L246-L258 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/__init__.py | files_with_ext | def files_with_ext(extension, directory='.', recursive=False):
"""
Generator function that will iterate over all files in the specified
directory and return a path to the files which possess a matching extension.
You should include the period in your extension, and matching is not case
sensitive: '.xml' will also match '.XML' and vice versa.
An empty string passed to extension will match extensionless files.
"""
if recursive:
log.info('Recursively searching {0} for files with extension "{1}"'.format(directory, extension))
for dirname, subdirnames, filenames in os.walk(directory):
for filename in filenames:
filepath = os.path.join(dirname, filename)
_root, ext = os.path.splitext(filepath)
if extension.lower() == ext.lower():
yield filepath
else:
log.info('Looking in {0} for files with extension: "{1}"'.format(directory, extension))
for name in os.listdir(directory):
filepath = os.path.join(directory, name)
if not os.path.isfile(filepath): # Skip non-files
continue
_root, ext = os.path.splitext(filepath)
if extension.lower() == ext.lower():
yield filepath | python | def files_with_ext(extension, directory='.', recursive=False):
"""
Generator function that will iterate over all files in the specified
directory and return a path to the files which possess a matching extension.
You should include the period in your extension, and matching is not case
sensitive: '.xml' will also match '.XML' and vice versa.
An empty string passed to extension will match extensionless files.
"""
if recursive:
log.info('Recursively searching {0} for files with extension "{1}"'.format(directory, extension))
for dirname, subdirnames, filenames in os.walk(directory):
for filename in filenames:
filepath = os.path.join(dirname, filename)
_root, ext = os.path.splitext(filepath)
if extension.lower() == ext.lower():
yield filepath
else:
log.info('Looking in {0} for files with extension: "{1}"'.format(directory, extension))
for name in os.listdir(directory):
filepath = os.path.join(directory, name)
if not os.path.isfile(filepath): # Skip non-files
continue
_root, ext = os.path.splitext(filepath)
if extension.lower() == ext.lower():
yield filepath | [
"def",
"files_with_ext",
"(",
"extension",
",",
"directory",
"=",
"'.'",
",",
"recursive",
"=",
"False",
")",
":",
"if",
"recursive",
":",
"log",
".",
"info",
"(",
"'Recursively searching {0} for files with extension \"{1}\"'",
".",
"format",
"(",
"directory",
",",
"extension",
")",
")",
"for",
"dirname",
",",
"subdirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"directory",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"filename",
")",
"_root",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filepath",
")",
"if",
"extension",
".",
"lower",
"(",
")",
"==",
"ext",
".",
"lower",
"(",
")",
":",
"yield",
"filepath",
"else",
":",
"log",
".",
"info",
"(",
"'Looking in {0} for files with extension: \"{1}\"'",
".",
"format",
"(",
"directory",
",",
"extension",
")",
")",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"directory",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"name",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
":",
"# Skip non-files",
"continue",
"_root",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filepath",
")",
"if",
"extension",
".",
"lower",
"(",
")",
"==",
"ext",
".",
"lower",
"(",
")",
":",
"yield",
"filepath"
] | Generator function that will iterate over all files in the specified
directory and return a path to the files which possess a matching extension.
You should include the period in your extension, and matching is not case
sensitive: '.xml' will also match '.XML' and vice versa.
An empty string passed to extension will match extensionless files. | [
"Generator",
"function",
"that",
"will",
"iterate",
"over",
"all",
"files",
"in",
"the",
"specified",
"directory",
"and",
"return",
"a",
"path",
"to",
"the",
"files",
"which",
"possess",
"a",
"matching",
"extension",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/__init__.py#L261-L288 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/__init__.py | epubcheck | def epubcheck(epubname, config=None):
"""
This method takes the name of an epub file as an argument. This name is
the input for the java execution of a locally installed epubcheck-.jar. The
location of this .jar file is configured in config.py.
"""
if config is None:
config = load_config_module()
r, e = os.path.splitext(epubname)
if not e:
log.warning('Missing file extension, appending ".epub"')
e = '.epub'
epubname = r + e
elif not e == '.epub':
log.warning('File does not have ".epub" extension, appending it')
epubname += '.epub'
subprocess.call(['java', '-jar', config.epubcheck_jarfile, epubname]) | python | def epubcheck(epubname, config=None):
"""
This method takes the name of an epub file as an argument. This name is
the input for the java execution of a locally installed epubcheck-.jar. The
location of this .jar file is configured in config.py.
"""
if config is None:
config = load_config_module()
r, e = os.path.splitext(epubname)
if not e:
log.warning('Missing file extension, appending ".epub"')
e = '.epub'
epubname = r + e
elif not e == '.epub':
log.warning('File does not have ".epub" extension, appending it')
epubname += '.epub'
subprocess.call(['java', '-jar', config.epubcheck_jarfile, epubname]) | [
"def",
"epubcheck",
"(",
"epubname",
",",
"config",
"=",
"None",
")",
":",
"if",
"config",
"is",
"None",
":",
"config",
"=",
"load_config_module",
"(",
")",
"r",
",",
"e",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"epubname",
")",
"if",
"not",
"e",
":",
"log",
".",
"warning",
"(",
"'Missing file extension, appending \".epub\"'",
")",
"e",
"=",
"'.epub'",
"epubname",
"=",
"r",
"+",
"e",
"elif",
"not",
"e",
"==",
"'.epub'",
":",
"log",
".",
"warning",
"(",
"'File does not have \".epub\" extension, appending it'",
")",
"epubname",
"+=",
"'.epub'",
"subprocess",
".",
"call",
"(",
"[",
"'java'",
",",
"'-jar'",
",",
"config",
".",
"epubcheck_jarfile",
",",
"epubname",
"]",
")"
] | This method takes the name of an epub file as an argument. This name is
the input for the java execution of a locally installed epubcheck-.jar. The
location of this .jar file is configured in config.py. | [
"This",
"method",
"takes",
"the",
"name",
"of",
"an",
"epub",
"file",
"as",
"an",
"argument",
".",
"This",
"name",
"is",
"the",
"input",
"for",
"the",
"java",
"execution",
"of",
"a",
"locally",
"installed",
"epubcheck",
"-",
".",
"jar",
".",
"The",
"location",
"of",
"this",
".",
"jar",
"file",
"is",
"configured",
"in",
"config",
".",
"py",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/__init__.py#L291-L307 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/__init__.py | dir_exists | def dir_exists(directory):
"""
If a directory already exists that will be overwritten by some action, this
will ask the user whether or not to continue with the deletion.
If the user responds affirmatively, then the directory will be removed. If
the user responds negatively, then the process will abort.
"""
log.info('Directory exists! Asking the user')
reply = input('''The directory {0} already exists.
It will be overwritten if the operation continues.
Replace? [Y/n]'''.format(directory))
if reply.lower() in ['y', 'yes', '']:
shutil.rmtree(directory)
os.makedirs(directory)
else:
log.critical('Aborting process, user declined overwriting {0}'.format(directory))
sys.exit('Aborting process!') | python | def dir_exists(directory):
"""
If a directory already exists that will be overwritten by some action, this
will ask the user whether or not to continue with the deletion.
If the user responds affirmatively, then the directory will be removed. If
the user responds negatively, then the process will abort.
"""
log.info('Directory exists! Asking the user')
reply = input('''The directory {0} already exists.
It will be overwritten if the operation continues.
Replace? [Y/n]'''.format(directory))
if reply.lower() in ['y', 'yes', '']:
shutil.rmtree(directory)
os.makedirs(directory)
else:
log.critical('Aborting process, user declined overwriting {0}'.format(directory))
sys.exit('Aborting process!') | [
"def",
"dir_exists",
"(",
"directory",
")",
":",
"log",
".",
"info",
"(",
"'Directory exists! Asking the user'",
")",
"reply",
"=",
"input",
"(",
"'''The directory {0} already exists.\nIt will be overwritten if the operation continues.\nReplace? [Y/n]'''",
".",
"format",
"(",
"directory",
")",
")",
"if",
"reply",
".",
"lower",
"(",
")",
"in",
"[",
"'y'",
",",
"'yes'",
",",
"''",
"]",
":",
"shutil",
".",
"rmtree",
"(",
"directory",
")",
"os",
".",
"makedirs",
"(",
"directory",
")",
"else",
":",
"log",
".",
"critical",
"(",
"'Aborting process, user declined overwriting {0}'",
".",
"format",
"(",
"directory",
")",
")",
"sys",
".",
"exit",
"(",
"'Aborting process!'",
")"
] | If a directory already exists that will be overwritten by some action, this
will ask the user whether or not to continue with the deletion.
If the user responds affirmatively, then the directory will be removed. If
the user responds negatively, then the process will abort. | [
"If",
"a",
"directory",
"already",
"exists",
"that",
"will",
"be",
"overwritten",
"by",
"some",
"action",
"this",
"will",
"ask",
"the",
"user",
"whether",
"or",
"not",
"to",
"continue",
"with",
"the",
"deletion",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/__init__.py#L310-L327 |
bachya/pyflunearyou | pyflunearyou/helpers/geo.py | get_nearest_by_coordinates | def get_nearest_by_coordinates(
data: list, latitude_key: str, longitude_key: str,
target_latitude: float, target_longitude: float) -> Any:
"""Get the closest dict entry based on latitude/longitude."""
return min(
data,
key=lambda p: haversine(
target_latitude,
target_longitude,
float(p[latitude_key]),
float(p[longitude_key])
)) | python | def get_nearest_by_coordinates(
data: list, latitude_key: str, longitude_key: str,
target_latitude: float, target_longitude: float) -> Any:
"""Get the closest dict entry based on latitude/longitude."""
return min(
data,
key=lambda p: haversine(
target_latitude,
target_longitude,
float(p[latitude_key]),
float(p[longitude_key])
)) | [
"def",
"get_nearest_by_coordinates",
"(",
"data",
":",
"list",
",",
"latitude_key",
":",
"str",
",",
"longitude_key",
":",
"str",
",",
"target_latitude",
":",
"float",
",",
"target_longitude",
":",
"float",
")",
"->",
"Any",
":",
"return",
"min",
"(",
"data",
",",
"key",
"=",
"lambda",
"p",
":",
"haversine",
"(",
"target_latitude",
",",
"target_longitude",
",",
"float",
"(",
"p",
"[",
"latitude_key",
"]",
")",
",",
"float",
"(",
"p",
"[",
"longitude_key",
"]",
")",
")",
")"
] | Get the closest dict entry based on latitude/longitude. | [
"Get",
"the",
"closest",
"dict",
"entry",
"based",
"on",
"latitude",
"/",
"longitude",
"."
] | train | https://github.com/bachya/pyflunearyou/blob/16a2f839c8df851e925e010a6b5c5708386febac/pyflunearyou/helpers/geo.py#L7-L18 |
bachya/pyflunearyou | pyflunearyou/helpers/geo.py | haversine | def haversine(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
"""Determine the distance between two latitude/longitude pairs."""
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
calc_a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
calc_c = 2 * asin(sqrt(calc_a))
return 6371 * calc_c | python | def haversine(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
"""Determine the distance between two latitude/longitude pairs."""
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
calc_a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
calc_c = 2 * asin(sqrt(calc_a))
return 6371 * calc_c | [
"def",
"haversine",
"(",
"lat1",
":",
"float",
",",
"lon1",
":",
"float",
",",
"lat2",
":",
"float",
",",
"lon2",
":",
"float",
")",
"->",
"float",
":",
"lon1",
",",
"lat1",
",",
"lon2",
",",
"lat2",
"=",
"map",
"(",
"radians",
",",
"[",
"lon1",
",",
"lat1",
",",
"lon2",
",",
"lat2",
"]",
")",
"# haversine formula",
"dlon",
"=",
"lon2",
"-",
"lon1",
"dlat",
"=",
"lat2",
"-",
"lat1",
"calc_a",
"=",
"sin",
"(",
"dlat",
"/",
"2",
")",
"**",
"2",
"+",
"cos",
"(",
"lat1",
")",
"*",
"cos",
"(",
"lat2",
")",
"*",
"sin",
"(",
"dlon",
"/",
"2",
")",
"**",
"2",
"calc_c",
"=",
"2",
"*",
"asin",
"(",
"sqrt",
"(",
"calc_a",
")",
")",
"return",
"6371",
"*",
"calc_c"
] | Determine the distance between two latitude/longitude pairs. | [
"Determine",
"the",
"distance",
"between",
"two",
"latitude",
"/",
"longitude",
"pairs",
"."
] | train | https://github.com/bachya/pyflunearyou/blob/16a2f839c8df851e925e010a6b5c5708386febac/pyflunearyou/helpers/geo.py#L21-L31 |
ChrisTimperley/Kaskara | python/kaskara/insertions.py | InsertionPointDB.in_file | def in_file(self, fn: str) -> Iterator[InsertionPoint]:
"""
Returns an iterator over all of the insertion points in a given file.
"""
logger.debug("finding insertion points in file: %s", fn)
yield from self.__file_insertions.get(fn, []) | python | def in_file(self, fn: str) -> Iterator[InsertionPoint]:
"""
Returns an iterator over all of the insertion points in a given file.
"""
logger.debug("finding insertion points in file: %s", fn)
yield from self.__file_insertions.get(fn, []) | [
"def",
"in_file",
"(",
"self",
",",
"fn",
":",
"str",
")",
"->",
"Iterator",
"[",
"InsertionPoint",
"]",
":",
"logger",
".",
"debug",
"(",
"\"finding insertion points in file: %s\"",
",",
"fn",
")",
"yield",
"from",
"self",
".",
"__file_insertions",
".",
"get",
"(",
"fn",
",",
"[",
"]",
")"
] | Returns an iterator over all of the insertion points in a given file. | [
"Returns",
"an",
"iterator",
"over",
"all",
"of",
"the",
"insertion",
"points",
"in",
"a",
"given",
"file",
"."
] | train | https://github.com/ChrisTimperley/Kaskara/blob/3d182d95b2938508e5990eddd30321be15e2f2ef/python/kaskara/insertions.py#L91-L96 |
ChrisTimperley/Kaskara | python/kaskara/insertions.py | InsertionPointDB.at_line | def at_line(self, line: FileLine) -> Iterator[InsertionPoint]:
"""
Returns an iterator over all of the insertion points located at a
given line.
"""
logger.debug("finding insertion points at line: %s", str(line))
filename = line.filename # type: str
line_num = line.num # type: int
for ins in self.in_file(filename):
if line_num == ins.location.line:
logger.debug("found insertion point at line [%s]: %s",
str(line), ins)
yield ins | python | def at_line(self, line: FileLine) -> Iterator[InsertionPoint]:
"""
Returns an iterator over all of the insertion points located at a
given line.
"""
logger.debug("finding insertion points at line: %s", str(line))
filename = line.filename # type: str
line_num = line.num # type: int
for ins in self.in_file(filename):
if line_num == ins.location.line:
logger.debug("found insertion point at line [%s]: %s",
str(line), ins)
yield ins | [
"def",
"at_line",
"(",
"self",
",",
"line",
":",
"FileLine",
")",
"->",
"Iterator",
"[",
"InsertionPoint",
"]",
":",
"logger",
".",
"debug",
"(",
"\"finding insertion points at line: %s\"",
",",
"str",
"(",
"line",
")",
")",
"filename",
"=",
"line",
".",
"filename",
"# type: str",
"line_num",
"=",
"line",
".",
"num",
"# type: int",
"for",
"ins",
"in",
"self",
".",
"in_file",
"(",
"filename",
")",
":",
"if",
"line_num",
"==",
"ins",
".",
"location",
".",
"line",
":",
"logger",
".",
"debug",
"(",
"\"found insertion point at line [%s]: %s\"",
",",
"str",
"(",
"line",
")",
",",
"ins",
")",
"yield",
"ins"
] | Returns an iterator over all of the insertion points located at a
given line. | [
"Returns",
"an",
"iterator",
"over",
"all",
"of",
"the",
"insertion",
"points",
"located",
"at",
"a",
"given",
"line",
"."
] | train | https://github.com/ChrisTimperley/Kaskara/blob/3d182d95b2938508e5990eddd30321be15e2f2ef/python/kaskara/insertions.py#L98-L110 |
linkhub-sdk/popbill.py | popbill/closedownService.py | ClosedownService.getUnitCost | def getUnitCost(self, CorpNum):
""" ν΄νμ
μ‘°ν λ¨κ° νμΈ.
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
return
λ°νλ¨κ° by float
raise
PopbillException
"""
result = self._httpget('/CloseDown/UnitCost', CorpNum)
return float(result.unitCost) | python | def getUnitCost(self, CorpNum):
""" ν΄νμ
μ‘°ν λ¨κ° νμΈ.
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
return
λ°νλ¨κ° by float
raise
PopbillException
"""
result = self._httpget('/CloseDown/UnitCost', CorpNum)
return float(result.unitCost) | [
"def",
"getUnitCost",
"(",
"self",
",",
"CorpNum",
")",
":",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/CloseDown/UnitCost'",
",",
"CorpNum",
")",
"return",
"float",
"(",
"result",
".",
"unitCost",
")"
] | ν΄νμ
μ‘°ν λ¨κ° νμΈ.
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
return
λ°νλ¨κ° by float
raise
PopbillException | [
"ν΄νμ
μ‘°ν",
"λ¨κ°",
"νμΈ",
".",
"args",
"CorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
"return",
"λ°νλ¨κ°",
"by",
"float",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/closedownService.py#L39-L51 |
linkhub-sdk/popbill.py | popbill/closedownService.py | ClosedownService.checkCorpNum | def checkCorpNum(self, MemberCorpNum, CheckCorpNum):
""" ν΄νμ
μ‘°ν - λ¨κ±΄
args
MemberCorpNum : νλΉνμ μ¬μ
μλ²νΈ
CorpNum : μ‘°νν μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
return
ν΄νμ
μ 보 object
raise
PopbillException
"""
if MemberCorpNum == None or MemberCorpNum == "" :
raise PopbillException(-99999999,"νλΉνμ μ¬μ
μλ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if CheckCorpNum == None or CheckCorpNum == "" :
raise PopbillException(-99999999,"μ‘°νν μ¬μ
μλ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httpget('/CloseDown?CN=' +CheckCorpNum, MemberCorpNum) | python | def checkCorpNum(self, MemberCorpNum, CheckCorpNum):
""" ν΄νμ
μ‘°ν - λ¨κ±΄
args
MemberCorpNum : νλΉνμ μ¬μ
μλ²νΈ
CorpNum : μ‘°νν μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
return
ν΄νμ
μ 보 object
raise
PopbillException
"""
if MemberCorpNum == None or MemberCorpNum == "" :
raise PopbillException(-99999999,"νλΉνμ μ¬μ
μλ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if CheckCorpNum == None or CheckCorpNum == "" :
raise PopbillException(-99999999,"μ‘°νν μ¬μ
μλ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httpget('/CloseDown?CN=' +CheckCorpNum, MemberCorpNum) | [
"def",
"checkCorpNum",
"(",
"self",
",",
"MemberCorpNum",
",",
"CheckCorpNum",
")",
":",
"if",
"MemberCorpNum",
"==",
"None",
"or",
"MemberCorpNum",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"νλΉνμ μ¬μ
μλ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.\")",
"",
"if",
"CheckCorpNum",
"==",
"None",
"or",
"CheckCorpNum",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"μ‘°νν μ¬μ
μλ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.\")",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/CloseDown?CN='",
"+",
"CheckCorpNum",
",",
"MemberCorpNum",
")"
] | ν΄νμ
μ‘°ν - λ¨κ±΄
args
MemberCorpNum : νλΉνμ μ¬μ
μλ²νΈ
CorpNum : μ‘°νν μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
return
ν΄νμ
μ 보 object
raise
PopbillException | [
"ν΄νμ
μ‘°ν",
"-",
"λ¨κ±΄",
"args",
"MemberCorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
"CorpNum",
":",
"μ‘°νν ",
"μ¬μ
μλ²νΈ",
"MgtKey",
":",
"λ¬Έμκ΄λ¦¬λ²νΈ",
"return",
"ν΄νμ
μ 보",
"object",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/closedownService.py#L53-L71 |
linkhub-sdk/popbill.py | popbill/closedownService.py | ClosedownService.checkCorpNums | def checkCorpNums(self, MemberCorpNum, CorpNumList):
""" ν΄νμ
μ‘°ν λλ νμΈ, μ΅λ 1000건
args
MemberCorpNum : νλΉνμ μ¬μ
μλ²νΈ
CorpNumList : μ‘°νν μ¬μ
μλ²νΈ λ°°μ΄
return
ν΄νμ
μ 보 Object as List
raise
PopbillException
"""
if CorpNumList == None or len(CorpNumList) < 1:
raise PopbillException(-99999999,"μ‘°μ£ν μ¬μ
μλ²νΈ λͺ©λ‘μ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
postData = self._stringtify(CorpNumList)
return self._httppost('/CloseDown',postData,MemberCorpNum) | python | def checkCorpNums(self, MemberCorpNum, CorpNumList):
""" ν΄νμ
μ‘°ν λλ νμΈ, μ΅λ 1000건
args
MemberCorpNum : νλΉνμ μ¬μ
μλ²νΈ
CorpNumList : μ‘°νν μ¬μ
μλ²νΈ λ°°μ΄
return
ν΄νμ
μ 보 Object as List
raise
PopbillException
"""
if CorpNumList == None or len(CorpNumList) < 1:
raise PopbillException(-99999999,"μ‘°μ£ν μ¬μ
μλ²νΈ λͺ©λ‘μ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
postData = self._stringtify(CorpNumList)
return self._httppost('/CloseDown',postData,MemberCorpNum) | [
"def",
"checkCorpNums",
"(",
"self",
",",
"MemberCorpNum",
",",
"CorpNumList",
")",
":",
"if",
"CorpNumList",
"==",
"None",
"or",
"len",
"(",
"CorpNumList",
")",
"<",
"1",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"μ‘°μ£ν μ¬μ
μλ²νΈ λͺ©λ‘μ΄ μ
λ ₯λμ§ μμμ΅λλ€.\")",
"",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"CorpNumList",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/CloseDown'",
",",
"postData",
",",
"MemberCorpNum",
")"
] | ν΄νμ
μ‘°ν λλ νμΈ, μ΅λ 1000건
args
MemberCorpNum : νλΉνμ μ¬μ
μλ²νΈ
CorpNumList : μ‘°νν μ¬μ
μλ²νΈ λ°°μ΄
return
ν΄νμ
μ 보 Object as List
raise
PopbillException | [
"ν΄νμ
μ‘°ν",
"λλ",
"νμΈ",
"μ΅λ",
"1000건",
"args",
"MemberCorpNum",
":",
"νλΉνμ",
"μ¬μ
μλ²νΈ",
"CorpNumList",
":",
"μ‘°νν ",
"μ¬μ
μλ²νΈ",
"λ°°μ΄",
"return",
"ν΄νμ
μ 보",
"Object",
"as",
"List",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/closedownService.py#L73-L88 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_mdlCrt.py | loadPng | def loadPng(varNumVol, tplPngSize, strPathPng):
"""Load PNG files.
Parameters
----------
varNumVol : float
Number of volumes, i.e. number of time points in all runs.
tplPngSize : tuple
Shape of the stimulus image (i.e. png).
strPathPng: str
Path to the folder cointaining the png files.
Returns
-------
aryPngData : 2d numpy array, shape [png_x, png_y, n_vols]
Stack of stimulus data.
"""
print('------Load PNGs')
# Create list of png files to load:
lstPngPaths = [None] * varNumVol
for idx01 in range(0, varNumVol):
lstPngPaths[idx01] = (strPathPng + str(idx01) + '.png')
# Load png files. The png data will be saved in a numpy array of the
# following order: aryPngData[x-pixel, y-pixel, PngNumber]. The
# sp.misc.imread function actually contains three values per pixel (RGB),
# but since the stimuli are black-and-white, any one of these is sufficient
# and we discard the others.
aryPngData = np.zeros((tplPngSize[0],
tplPngSize[1],
varNumVol))
for idx01 in range(0, varNumVol):
aryPngData[:, :, idx01] = np.array(Image.open(lstPngPaths[idx01]))
# Convert RGB values (0 to 255) to integer ones and zeros:
aryPngData = (aryPngData > 0).astype(int)
return aryPngData | python | def loadPng(varNumVol, tplPngSize, strPathPng):
"""Load PNG files.
Parameters
----------
varNumVol : float
Number of volumes, i.e. number of time points in all runs.
tplPngSize : tuple
Shape of the stimulus image (i.e. png).
strPathPng: str
Path to the folder cointaining the png files.
Returns
-------
aryPngData : 2d numpy array, shape [png_x, png_y, n_vols]
Stack of stimulus data.
"""
print('------Load PNGs')
# Create list of png files to load:
lstPngPaths = [None] * varNumVol
for idx01 in range(0, varNumVol):
lstPngPaths[idx01] = (strPathPng + str(idx01) + '.png')
# Load png files. The png data will be saved in a numpy array of the
# following order: aryPngData[x-pixel, y-pixel, PngNumber]. The
# sp.misc.imread function actually contains three values per pixel (RGB),
# but since the stimuli are black-and-white, any one of these is sufficient
# and we discard the others.
aryPngData = np.zeros((tplPngSize[0],
tplPngSize[1],
varNumVol))
for idx01 in range(0, varNumVol):
aryPngData[:, :, idx01] = np.array(Image.open(lstPngPaths[idx01]))
# Convert RGB values (0 to 255) to integer ones and zeros:
aryPngData = (aryPngData > 0).astype(int)
return aryPngData | [
"def",
"loadPng",
"(",
"varNumVol",
",",
"tplPngSize",
",",
"strPathPng",
")",
":",
"print",
"(",
"'------Load PNGs'",
")",
"# Create list of png files to load:",
"lstPngPaths",
"=",
"[",
"None",
"]",
"*",
"varNumVol",
"for",
"idx01",
"in",
"range",
"(",
"0",
",",
"varNumVol",
")",
":",
"lstPngPaths",
"[",
"idx01",
"]",
"=",
"(",
"strPathPng",
"+",
"str",
"(",
"idx01",
")",
"+",
"'.png'",
")",
"# Load png files. The png data will be saved in a numpy array of the",
"# following order: aryPngData[x-pixel, y-pixel, PngNumber]. The",
"# sp.misc.imread function actually contains three values per pixel (RGB),",
"# but since the stimuli are black-and-white, any one of these is sufficient",
"# and we discard the others.",
"aryPngData",
"=",
"np",
".",
"zeros",
"(",
"(",
"tplPngSize",
"[",
"0",
"]",
",",
"tplPngSize",
"[",
"1",
"]",
",",
"varNumVol",
")",
")",
"for",
"idx01",
"in",
"range",
"(",
"0",
",",
"varNumVol",
")",
":",
"aryPngData",
"[",
":",
",",
":",
",",
"idx01",
"]",
"=",
"np",
".",
"array",
"(",
"Image",
".",
"open",
"(",
"lstPngPaths",
"[",
"idx01",
"]",
")",
")",
"# Convert RGB values (0 to 255) to integer ones and zeros:",
"aryPngData",
"=",
"(",
"aryPngData",
">",
"0",
")",
".",
"astype",
"(",
"int",
")",
"return",
"aryPngData"
] | Load PNG files.
Parameters
----------
varNumVol : float
Number of volumes, i.e. number of time points in all runs.
tplPngSize : tuple
Shape of the stimulus image (i.e. png).
strPathPng: str
Path to the folder cointaining the png files.
Returns
-------
aryPngData : 2d numpy array, shape [png_x, png_y, n_vols]
Stack of stimulus data. | [
"Load",
"PNG",
"files",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_mdlCrt.py#L29-L66 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_mdlCrt.py | loadPrsOrd | def loadPrsOrd(vecRunLngth, strPathPresOrd, vecVslStim):
"""Load presentation order of motion directions.
Parameters
----------
vecRunLngth : list
Number of volumes in every run
strPathPresOrd : str
Path to the npy vector containing order of presented motion directions.
vecVslStim: list
Key of (stimulus) condition presented in every run
Returns
-------
aryPresOrdAprt : 1d numpy array, shape [n_vols]
Presentation order of aperture position.
aryPresOrdMtn : 1d numpy array, shape [n_vols]
Presentation order of motion direction.
"""
print('------Load presentation order of motion directions')
aryPresOrd = np.empty((0, 2))
for idx01 in range(0, len(vecRunLngth)):
# reconstruct file name
# ---> consider: some runs were shorter than others(replace next row)
filename1 = (strPathPresOrd + str(vecVslStim[idx01]) +
'.pickle')
# filename1 = (strPathPresOrd + str(idx01+1) + '.pickle')
# load array
with open(filename1, 'rb') as handle:
array1 = pickle.load(handle)
tempCond = array1["Conditions"]
tempCond = tempCond[:vecRunLngth[idx01], :]
# add temp array to aryPresOrd
aryPresOrd = np.concatenate((aryPresOrd, tempCond), axis=0)
aryPresOrdAprt = aryPresOrd[:, 0].astype(int)
aryPresOrdMtn = aryPresOrd[:, 1].astype(int)
return aryPresOrdAprt, aryPresOrdMtn | python | def loadPrsOrd(vecRunLngth, strPathPresOrd, vecVslStim):
"""Load presentation order of motion directions.
Parameters
----------
vecRunLngth : list
Number of volumes in every run
strPathPresOrd : str
Path to the npy vector containing order of presented motion directions.
vecVslStim: list
Key of (stimulus) condition presented in every run
Returns
-------
aryPresOrdAprt : 1d numpy array, shape [n_vols]
Presentation order of aperture position.
aryPresOrdMtn : 1d numpy array, shape [n_vols]
Presentation order of motion direction.
"""
print('------Load presentation order of motion directions')
aryPresOrd = np.empty((0, 2))
for idx01 in range(0, len(vecRunLngth)):
# reconstruct file name
# ---> consider: some runs were shorter than others(replace next row)
filename1 = (strPathPresOrd + str(vecVslStim[idx01]) +
'.pickle')
# filename1 = (strPathPresOrd + str(idx01+1) + '.pickle')
# load array
with open(filename1, 'rb') as handle:
array1 = pickle.load(handle)
tempCond = array1["Conditions"]
tempCond = tempCond[:vecRunLngth[idx01], :]
# add temp array to aryPresOrd
aryPresOrd = np.concatenate((aryPresOrd, tempCond), axis=0)
aryPresOrdAprt = aryPresOrd[:, 0].astype(int)
aryPresOrdMtn = aryPresOrd[:, 1].astype(int)
return aryPresOrdAprt, aryPresOrdMtn | [
"def",
"loadPrsOrd",
"(",
"vecRunLngth",
",",
"strPathPresOrd",
",",
"vecVslStim",
")",
":",
"print",
"(",
"'------Load presentation order of motion directions'",
")",
"aryPresOrd",
"=",
"np",
".",
"empty",
"(",
"(",
"0",
",",
"2",
")",
")",
"for",
"idx01",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"vecRunLngth",
")",
")",
":",
"# reconstruct file name",
"# ---> consider: some runs were shorter than others(replace next row)",
"filename1",
"=",
"(",
"strPathPresOrd",
"+",
"str",
"(",
"vecVslStim",
"[",
"idx01",
"]",
")",
"+",
"'.pickle'",
")",
"# filename1 = (strPathPresOrd + str(idx01+1) + '.pickle')",
"# load array",
"with",
"open",
"(",
"filename1",
",",
"'rb'",
")",
"as",
"handle",
":",
"array1",
"=",
"pickle",
".",
"load",
"(",
"handle",
")",
"tempCond",
"=",
"array1",
"[",
"\"Conditions\"",
"]",
"tempCond",
"=",
"tempCond",
"[",
":",
"vecRunLngth",
"[",
"idx01",
"]",
",",
":",
"]",
"# add temp array to aryPresOrd",
"aryPresOrd",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryPresOrd",
",",
"tempCond",
")",
",",
"axis",
"=",
"0",
")",
"aryPresOrdAprt",
"=",
"aryPresOrd",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"int",
")",
"aryPresOrdMtn",
"=",
"aryPresOrd",
"[",
":",
",",
"1",
"]",
".",
"astype",
"(",
"int",
")",
"return",
"aryPresOrdAprt",
",",
"aryPresOrdMtn"
] | Load presentation order of motion directions.
Parameters
----------
vecRunLngth : list
Number of volumes in every run
strPathPresOrd : str
Path to the npy vector containing order of presented motion directions.
vecVslStim: list
Key of (stimulus) condition presented in every run
Returns
-------
aryPresOrdAprt : 1d numpy array, shape [n_vols]
Presentation order of aperture position.
aryPresOrdMtn : 1d numpy array, shape [n_vols]
Presentation order of motion direction. | [
"Load",
"presentation",
"order",
"of",
"motion",
"directions",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_mdlCrt.py#L69-L105 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_mdlCrt.py | crtPwBoxCarFn | def crtPwBoxCarFn(varNumVol, aryPngData, aryPresOrd, vecMtDrctn):
"""Create pixel-wise boxcar functions.
Parameters
----------
input1 : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
input2 : float, positive
Description of input 2.
Returns
-------
data : 2d numpy array, shape [n_samples, n_measurements]
Closed data.
Reference
---------
[1]
"""
print('------Create pixel-wise boxcar functions')
aryBoxCar = np.empty(aryPngData.shape[0:2] + (len(vecMtDrctn),) +
(varNumVol,), dtype='int64')
for ind, num in enumerate(vecMtDrctn):
aryCondTemp = np.zeros((aryPngData.shape), dtype='int64')
lgcTempMtDrctn = [aryPresOrd == num][0]
aryCondTemp[:, :, lgcTempMtDrctn] = np.copy(
aryPngData[:, :, lgcTempMtDrctn])
aryBoxCar[:, :, ind, :] = aryCondTemp
return aryBoxCar | python | def crtPwBoxCarFn(varNumVol, aryPngData, aryPresOrd, vecMtDrctn):
"""Create pixel-wise boxcar functions.
Parameters
----------
input1 : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
input2 : float, positive
Description of input 2.
Returns
-------
data : 2d numpy array, shape [n_samples, n_measurements]
Closed data.
Reference
---------
[1]
"""
print('------Create pixel-wise boxcar functions')
aryBoxCar = np.empty(aryPngData.shape[0:2] + (len(vecMtDrctn),) +
(varNumVol,), dtype='int64')
for ind, num in enumerate(vecMtDrctn):
aryCondTemp = np.zeros((aryPngData.shape), dtype='int64')
lgcTempMtDrctn = [aryPresOrd == num][0]
aryCondTemp[:, :, lgcTempMtDrctn] = np.copy(
aryPngData[:, :, lgcTempMtDrctn])
aryBoxCar[:, :, ind, :] = aryCondTemp
return aryBoxCar | [
"def",
"crtPwBoxCarFn",
"(",
"varNumVol",
",",
"aryPngData",
",",
"aryPresOrd",
",",
"vecMtDrctn",
")",
":",
"print",
"(",
"'------Create pixel-wise boxcar functions'",
")",
"aryBoxCar",
"=",
"np",
".",
"empty",
"(",
"aryPngData",
".",
"shape",
"[",
"0",
":",
"2",
"]",
"+",
"(",
"len",
"(",
"vecMtDrctn",
")",
",",
")",
"+",
"(",
"varNumVol",
",",
")",
",",
"dtype",
"=",
"'int64'",
")",
"for",
"ind",
",",
"num",
"in",
"enumerate",
"(",
"vecMtDrctn",
")",
":",
"aryCondTemp",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryPngData",
".",
"shape",
")",
",",
"dtype",
"=",
"'int64'",
")",
"lgcTempMtDrctn",
"=",
"[",
"aryPresOrd",
"==",
"num",
"]",
"[",
"0",
"]",
"aryCondTemp",
"[",
":",
",",
":",
",",
"lgcTempMtDrctn",
"]",
"=",
"np",
".",
"copy",
"(",
"aryPngData",
"[",
":",
",",
":",
",",
"lgcTempMtDrctn",
"]",
")",
"aryBoxCar",
"[",
":",
",",
":",
",",
"ind",
",",
":",
"]",
"=",
"aryCondTemp",
"return",
"aryBoxCar"
] | Create pixel-wise boxcar functions.
Parameters
----------
input1 : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
input2 : float, positive
Description of input 2.
Returns
-------
data : 2d numpy array, shape [n_samples, n_measurements]
Closed data.
Reference
---------
[1] | [
"Create",
"pixel",
"-",
"wise",
"boxcar",
"functions",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_mdlCrt.py#L108-L135 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_mdlCrt.py | crtGauss2D | def crtGauss2D(varSizeX, varSizeY, varPosX, varPosY, varSd):
"""Create 2D Gaussian kernel.
Parameters
----------
varSizeX : int, positive
Width of the visual field.
varSizeY : int, positive
Height of the visual field..
varPosX : int, positive
X position of centre of 2D Gauss.
varPosY : int, positive
Y position of centre of 2D Gauss.
varSd : float, positive
Standard deviation of 2D Gauss.
Returns
-------
aryGauss : 2d numpy array, shape [varSizeX, varSizeY]
2d Gaussian.
Reference
---------
[1]
"""
varSizeX = int(varSizeX)
varSizeY = int(varSizeY)
# aryX and aryY are in reversed order, this seems to be necessary:
aryY, aryX = sp.mgrid[0:varSizeX,
0:varSizeY]
# The actual creation of the Gaussian array:
aryGauss = (
(np.square((aryX - varPosX)) + np.square((aryY - varPosY))) /
(2.0 * np.square(varSd))
)
aryGauss = np.exp(-aryGauss) / (2 * np.pi * np.square(varSd))
return aryGauss | python | def crtGauss2D(varSizeX, varSizeY, varPosX, varPosY, varSd):
"""Create 2D Gaussian kernel.
Parameters
----------
varSizeX : int, positive
Width of the visual field.
varSizeY : int, positive
Height of the visual field..
varPosX : int, positive
X position of centre of 2D Gauss.
varPosY : int, positive
Y position of centre of 2D Gauss.
varSd : float, positive
Standard deviation of 2D Gauss.
Returns
-------
aryGauss : 2d numpy array, shape [varSizeX, varSizeY]
2d Gaussian.
Reference
---------
[1]
"""
varSizeX = int(varSizeX)
varSizeY = int(varSizeY)
# aryX and aryY are in reversed order, this seems to be necessary:
aryY, aryX = sp.mgrid[0:varSizeX,
0:varSizeY]
# The actual creation of the Gaussian array:
aryGauss = (
(np.square((aryX - varPosX)) + np.square((aryY - varPosY))) /
(2.0 * np.square(varSd))
)
aryGauss = np.exp(-aryGauss) / (2 * np.pi * np.square(varSd))
return aryGauss | [
"def",
"crtGauss2D",
"(",
"varSizeX",
",",
"varSizeY",
",",
"varPosX",
",",
"varPosY",
",",
"varSd",
")",
":",
"varSizeX",
"=",
"int",
"(",
"varSizeX",
")",
"varSizeY",
"=",
"int",
"(",
"varSizeY",
")",
"# aryX and aryY are in reversed order, this seems to be necessary:",
"aryY",
",",
"aryX",
"=",
"sp",
".",
"mgrid",
"[",
"0",
":",
"varSizeX",
",",
"0",
":",
"varSizeY",
"]",
"# The actual creation of the Gaussian array:",
"aryGauss",
"=",
"(",
"(",
"np",
".",
"square",
"(",
"(",
"aryX",
"-",
"varPosX",
")",
")",
"+",
"np",
".",
"square",
"(",
"(",
"aryY",
"-",
"varPosY",
")",
")",
")",
"/",
"(",
"2.0",
"*",
"np",
".",
"square",
"(",
"varSd",
")",
")",
")",
"aryGauss",
"=",
"np",
".",
"exp",
"(",
"-",
"aryGauss",
")",
"/",
"(",
"2",
"*",
"np",
".",
"pi",
"*",
"np",
".",
"square",
"(",
"varSd",
")",
")",
"return",
"aryGauss"
] | Create 2D Gaussian kernel.
Parameters
----------
varSizeX : int, positive
Width of the visual field.
varSizeY : int, positive
Height of the visual field..
varPosX : int, positive
X position of centre of 2D Gauss.
varPosY : int, positive
Y position of centre of 2D Gauss.
varSd : float, positive
Standard deviation of 2D Gauss.
Returns
-------
aryGauss : 2d numpy array, shape [varSizeX, varSizeY]
2d Gaussian.
Reference
---------
[1] | [
"Create",
"2D",
"Gaussian",
"kernel",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_mdlCrt.py#L138-L175 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_mdlCrt.py | cnvlGauss2D | def cnvlGauss2D(idxPrc, aryBoxCar, aryMdlParamsChnk, tplPngSize, varNumVol,
queOut):
"""Spatially convolve boxcar functions with 2D Gaussian.
Parameters
----------
idxPrc : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
aryBoxCar : float, positive
Description of input 2.
aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
tplPngSize : float, positive
Description of input 2.
varNumVol : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
queOut : float, positive
Description of input 2.
Returns
-------
data : 2d numpy array, shape [n_samples, n_measurements]
Closed data.
Reference
---------
[1]
"""
# Number of combinations of model parameters in the current chunk:
varChnkSze = np.size(aryMdlParamsChnk, axis=0)
# Determine number of motion directions
varNumMtnDrtn = aryBoxCar.shape[2]
# Output array with pRF model time courses:
aryOut = np.zeros([varChnkSze, varNumMtnDrtn, varNumVol])
# Loop through different motion directions:
for idxMtn in range(0, varNumMtnDrtn):
# Loop through combinations of model parameters:
for idxMdl in range(0, varChnkSze):
# Spatial parameters of current model:
varTmpX = aryMdlParamsChnk[idxMdl, 1]
varTmpY = aryMdlParamsChnk[idxMdl, 2]
varTmpSd = aryMdlParamsChnk[idxMdl, 3]
# Create pRF model (2D):
aryGauss = crtGauss2D(tplPngSize[0],
tplPngSize[1],
varTmpX,
varTmpY,
varTmpSd)
# Multiply pixel-time courses with Gaussian pRF models:
aryPrfTcTmp = np.multiply(aryBoxCar[:, :, idxMtn, :],
aryGauss[:, :, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'. This is essentially an unscaled version of the
# pRF time course model (i.e. not yet scaled for size of the pRF).
aryPrfTcTmp = np.sum(aryPrfTcTmp, axis=(0, 1))
# Put model time courses into function's output with 2d Gaussian
# arrray:
aryOut[idxMdl, idxMtn, :] = aryPrfTcTmp
# Put column with the indicies of model-parameter-combinations into the
# output array (in order to be able to put the pRF model time courses into
# the correct order after the parallelised function):
lstOut = [idxPrc,
aryOut]
# Put output to queue:
queOut.put(lstOut) | python | def cnvlGauss2D(idxPrc, aryBoxCar, aryMdlParamsChnk, tplPngSize, varNumVol,
queOut):
"""Spatially convolve boxcar functions with 2D Gaussian.
Parameters
----------
idxPrc : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
aryBoxCar : float, positive
Description of input 2.
aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
tplPngSize : float, positive
Description of input 2.
varNumVol : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
queOut : float, positive
Description of input 2.
Returns
-------
data : 2d numpy array, shape [n_samples, n_measurements]
Closed data.
Reference
---------
[1]
"""
# Number of combinations of model parameters in the current chunk:
varChnkSze = np.size(aryMdlParamsChnk, axis=0)
# Determine number of motion directions
varNumMtnDrtn = aryBoxCar.shape[2]
# Output array with pRF model time courses:
aryOut = np.zeros([varChnkSze, varNumMtnDrtn, varNumVol])
# Loop through different motion directions:
for idxMtn in range(0, varNumMtnDrtn):
# Loop through combinations of model parameters:
for idxMdl in range(0, varChnkSze):
# Spatial parameters of current model:
varTmpX = aryMdlParamsChnk[idxMdl, 1]
varTmpY = aryMdlParamsChnk[idxMdl, 2]
varTmpSd = aryMdlParamsChnk[idxMdl, 3]
# Create pRF model (2D):
aryGauss = crtGauss2D(tplPngSize[0],
tplPngSize[1],
varTmpX,
varTmpY,
varTmpSd)
# Multiply pixel-time courses with Gaussian pRF models:
aryPrfTcTmp = np.multiply(aryBoxCar[:, :, idxMtn, :],
aryGauss[:, :, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'. This is essentially an unscaled version of the
# pRF time course model (i.e. not yet scaled for size of the pRF).
aryPrfTcTmp = np.sum(aryPrfTcTmp, axis=(0, 1))
# Put model time courses into function's output with 2d Gaussian
# arrray:
aryOut[idxMdl, idxMtn, :] = aryPrfTcTmp
# Put column with the indicies of model-parameter-combinations into the
# output array (in order to be able to put the pRF model time courses into
# the correct order after the parallelised function):
lstOut = [idxPrc,
aryOut]
# Put output to queue:
queOut.put(lstOut) | [
"def",
"cnvlGauss2D",
"(",
"idxPrc",
",",
"aryBoxCar",
",",
"aryMdlParamsChnk",
",",
"tplPngSize",
",",
"varNumVol",
",",
"queOut",
")",
":",
"# Number of combinations of model parameters in the current chunk:",
"varChnkSze",
"=",
"np",
".",
"size",
"(",
"aryMdlParamsChnk",
",",
"axis",
"=",
"0",
")",
"# Determine number of motion directions",
"varNumMtnDrtn",
"=",
"aryBoxCar",
".",
"shape",
"[",
"2",
"]",
"# Output array with pRF model time courses:",
"aryOut",
"=",
"np",
".",
"zeros",
"(",
"[",
"varChnkSze",
",",
"varNumMtnDrtn",
",",
"varNumVol",
"]",
")",
"# Loop through different motion directions:",
"for",
"idxMtn",
"in",
"range",
"(",
"0",
",",
"varNumMtnDrtn",
")",
":",
"# Loop through combinations of model parameters:",
"for",
"idxMdl",
"in",
"range",
"(",
"0",
",",
"varChnkSze",
")",
":",
"# Spatial parameters of current model:",
"varTmpX",
"=",
"aryMdlParamsChnk",
"[",
"idxMdl",
",",
"1",
"]",
"varTmpY",
"=",
"aryMdlParamsChnk",
"[",
"idxMdl",
",",
"2",
"]",
"varTmpSd",
"=",
"aryMdlParamsChnk",
"[",
"idxMdl",
",",
"3",
"]",
"# Create pRF model (2D):",
"aryGauss",
"=",
"crtGauss2D",
"(",
"tplPngSize",
"[",
"0",
"]",
",",
"tplPngSize",
"[",
"1",
"]",
",",
"varTmpX",
",",
"varTmpY",
",",
"varTmpSd",
")",
"# Multiply pixel-time courses with Gaussian pRF models:",
"aryPrfTcTmp",
"=",
"np",
".",
"multiply",
"(",
"aryBoxCar",
"[",
":",
",",
":",
",",
"idxMtn",
",",
":",
"]",
",",
"aryGauss",
"[",
":",
",",
":",
",",
"None",
"]",
")",
"# Calculate sum across x- and y-dimensions - the 'area under the",
"# Gaussian surface'. This is essentially an unscaled version of the",
"# pRF time course model (i.e. not yet scaled for size of the pRF).",
"aryPrfTcTmp",
"=",
"np",
".",
"sum",
"(",
"aryPrfTcTmp",
",",
"axis",
"=",
"(",
"0",
",",
"1",
")",
")",
"# Put model time courses into function's output with 2d Gaussian",
"# arrray:",
"aryOut",
"[",
"idxMdl",
",",
"idxMtn",
",",
":",
"]",
"=",
"aryPrfTcTmp",
"# Put column with the indicies of model-parameter-combinations into the",
"# output array (in order to be able to put the pRF model time courses into",
"# the correct order after the parallelised function):",
"lstOut",
"=",
"[",
"idxPrc",
",",
"aryOut",
"]",
"# Put output to queue:",
"queOut",
".",
"put",
"(",
"lstOut",
")"
] | Spatially convolve boxcar functions with 2D Gaussian.
Parameters
----------
idxPrc : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
aryBoxCar : float, positive
Description of input 2.
aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
tplPngSize : float, positive
Description of input 2.
varNumVol : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
queOut : float, positive
Description of input 2.
Returns
-------
data : 2d numpy array, shape [n_samples, n_measurements]
Closed data.
Reference
---------
[1] | [
"Spatially",
"convolve",
"boxcar",
"functions",
"with",
"2D",
"Gaussian",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_mdlCrt.py#L178-L250 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.