repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_def_list_elements | def convert_def_list_elements(self):
"""
A list in which each item consists of two parts: a word, phrase, term,
graphic, chemical structure, or equation paired with one of more
descriptions, discussions, explanations, or definitions of it.
<def-list> elements are lists of <def-item> elements which are in turn
composed of a pair of term (<term>) and definition (<def>). This method
will convert the <def-list> to a classed <div> with a styled format
for the terms and definitions.
"""
for def_list in self.main.getroot().findall('.//def-list'):
#Remove the attributes, excepting id
remove_all_attributes(def_list, exclude=['id'])
#Modify the def-list element
def_list.tag = 'div'
def_list.attrib['class'] = 'def-list'
for def_item in def_list.findall('def-item'):
#Get the term being defined, modify it
term = def_item.find('term')
term.tag = 'p'
term.attrib['class']= 'def-item-term'
#Insert it before its parent def_item
insert_before(def_item, term)
#Get the definition, handle missing with a warning
definition = def_item.find('def')
if definition is None:
log.warning('Missing def element in def-item')
remove(def_item)
continue
#PLoS appears to consistently place all definition text in a
#paragraph subelement of the def element
def_para = definition.find('p')
def_para.attrib['class'] = 'def-item-def'
#Replace the def-item element with the p element
replace(def_item, def_para) | python | def convert_def_list_elements(self):
"""
A list in which each item consists of two parts: a word, phrase, term,
graphic, chemical structure, or equation paired with one of more
descriptions, discussions, explanations, or definitions of it.
<def-list> elements are lists of <def-item> elements which are in turn
composed of a pair of term (<term>) and definition (<def>). This method
will convert the <def-list> to a classed <div> with a styled format
for the terms and definitions.
"""
for def_list in self.main.getroot().findall('.//def-list'):
#Remove the attributes, excepting id
remove_all_attributes(def_list, exclude=['id'])
#Modify the def-list element
def_list.tag = 'div'
def_list.attrib['class'] = 'def-list'
for def_item in def_list.findall('def-item'):
#Get the term being defined, modify it
term = def_item.find('term')
term.tag = 'p'
term.attrib['class']= 'def-item-term'
#Insert it before its parent def_item
insert_before(def_item, term)
#Get the definition, handle missing with a warning
definition = def_item.find('def')
if definition is None:
log.warning('Missing def element in def-item')
remove(def_item)
continue
#PLoS appears to consistently place all definition text in a
#paragraph subelement of the def element
def_para = definition.find('p')
def_para.attrib['class'] = 'def-item-def'
#Replace the def-item element with the p element
replace(def_item, def_para) | [
"def",
"convert_def_list_elements",
"(",
"self",
")",
":",
"for",
"def_list",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//def-list'",
")",
":",
"#Remove the attributes, excepting id",
"remove_all_attributes",
"(",
"def_list",
",",
"exclude",
"=",
"[",
"'id'",
"]",
")",
"#Modify the def-list element",
"def_list",
".",
"tag",
"=",
"'div'",
"def_list",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'def-list'",
"for",
"def_item",
"in",
"def_list",
".",
"findall",
"(",
"'def-item'",
")",
":",
"#Get the term being defined, modify it",
"term",
"=",
"def_item",
".",
"find",
"(",
"'term'",
")",
"term",
".",
"tag",
"=",
"'p'",
"term",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'def-item-term'",
"#Insert it before its parent def_item",
"insert_before",
"(",
"def_item",
",",
"term",
")",
"#Get the definition, handle missing with a warning",
"definition",
"=",
"def_item",
".",
"find",
"(",
"'def'",
")",
"if",
"definition",
"is",
"None",
":",
"log",
".",
"warning",
"(",
"'Missing def element in def-item'",
")",
"remove",
"(",
"def_item",
")",
"continue",
"#PLoS appears to consistently place all definition text in a",
"#paragraph subelement of the def element",
"def_para",
"=",
"definition",
".",
"find",
"(",
"'p'",
")",
"def_para",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'def-item-def'",
"#Replace the def-item element with the p element",
"replace",
"(",
"def_item",
",",
"def_para",
")"
] | A list in which each item consists of two parts: a word, phrase, term,
graphic, chemical structure, or equation paired with one of more
descriptions, discussions, explanations, or definitions of it.
<def-list> elements are lists of <def-item> elements which are in turn
composed of a pair of term (<term>) and definition (<def>). This method
will convert the <def-list> to a classed <div> with a styled format
for the terms and definitions. | [
"A",
"list",
"in",
"which",
"each",
"item",
"consists",
"of",
"two",
"parts",
":",
"a",
"word",
"phrase",
"term",
"graphic",
"chemical",
"structure",
"or",
"equation",
"paired",
"with",
"one",
"of",
"more",
"descriptions",
"discussions",
"explanations",
"or",
"definitions",
"of",
"it",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L1153-L1188 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_ref_list_elements | def convert_ref_list_elements(self):
"""
List of references (citations) for an article, which is often called
“References”, “Bibliography”, or “Additional Reading”.
No distinction is made between lists of cited references and lists of
suggested references.
This method should not be confused with the method(s) employed for the
formatting of a proper bibliography, though they are related.
Similarly, this is an area of major openness in development, I lack
access to PLOS' algorithm for proper citation formatting.
"""
#TODO: Handle nested ref-lists
for ref_list in self.main.getroot().findall('.//ref-list'):
remove_all_attributes(ref_list)
ref_list.tag = 'div'
ref_list.attrib['class'] = 'ref-list'
label = ref_list.find('label')
if label is not None:
label.tag = 'h3'
for ref in ref_list.findall('ref'):
ref_p = etree.Element('p')
ref_p.text = str(etree.tostring(ref, method='text', encoding='utf-8'), encoding='utf-8')
replace(ref, ref_p) | python | def convert_ref_list_elements(self):
"""
List of references (citations) for an article, which is often called
“References”, “Bibliography”, or “Additional Reading”.
No distinction is made between lists of cited references and lists of
suggested references.
This method should not be confused with the method(s) employed for the
formatting of a proper bibliography, though they are related.
Similarly, this is an area of major openness in development, I lack
access to PLOS' algorithm for proper citation formatting.
"""
#TODO: Handle nested ref-lists
for ref_list in self.main.getroot().findall('.//ref-list'):
remove_all_attributes(ref_list)
ref_list.tag = 'div'
ref_list.attrib['class'] = 'ref-list'
label = ref_list.find('label')
if label is not None:
label.tag = 'h3'
for ref in ref_list.findall('ref'):
ref_p = etree.Element('p')
ref_p.text = str(etree.tostring(ref, method='text', encoding='utf-8'), encoding='utf-8')
replace(ref, ref_p) | [
"def",
"convert_ref_list_elements",
"(",
"self",
")",
":",
"#TODO: Handle nested ref-lists",
"for",
"ref_list",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//ref-list'",
")",
":",
"remove_all_attributes",
"(",
"ref_list",
")",
"ref_list",
".",
"tag",
"=",
"'div'",
"ref_list",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'ref-list'",
"label",
"=",
"ref_list",
".",
"find",
"(",
"'label'",
")",
"if",
"label",
"is",
"not",
"None",
":",
"label",
".",
"tag",
"=",
"'h3'",
"for",
"ref",
"in",
"ref_list",
".",
"findall",
"(",
"'ref'",
")",
":",
"ref_p",
"=",
"etree",
".",
"Element",
"(",
"'p'",
")",
"ref_p",
".",
"text",
"=",
"str",
"(",
"etree",
".",
"tostring",
"(",
"ref",
",",
"method",
"=",
"'text'",
",",
"encoding",
"=",
"'utf-8'",
")",
",",
"encoding",
"=",
"'utf-8'",
")",
"replace",
"(",
"ref",
",",
"ref_p",
")"
] | List of references (citations) for an article, which is often called
“References”, “Bibliography”, or “Additional Reading”.
No distinction is made between lists of cited references and lists of
suggested references.
This method should not be confused with the method(s) employed for the
formatting of a proper bibliography, though they are related.
Similarly, this is an area of major openness in development, I lack
access to PLOS' algorithm for proper citation formatting. | [
"List",
"of",
"references",
"(",
"citations",
")",
"for",
"an",
"article",
"which",
"is",
"often",
"called",
"“References”",
"“Bibliography”",
"or",
"“Additional",
"Reading”",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L1192-L1216 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_table_wrap_elements | def convert_table_wrap_elements(self):
"""
Responsible for the correct conversion of JPTS 3.0 <table-wrap>
elements to EPUB content.
The 'id' attribute is treated as mandatory by this method.
"""
for table_wrap in self.main.getroot().findall('.//table-wrap'):
table_div = etree.Element('div', {'id': table_wrap.attrib['id']})
label = table_wrap.find('label')
caption = table_wrap.find('caption')
alternatives = table_wrap.find('alternatives')
graphic = table_wrap.find('graphic')
table = table_wrap.find('table')
if graphic is None:
if alternatives is not None:
graphic = alternatives.find('graphic')
if table is None:
if alternatives is not None:
table = alternatives.find('table')
#Handling the label and caption
if label is not None and caption is not None:
caption_div = etree.Element('div', {'class': 'table-caption'})
caption_div_b = etree.SubElement(caption_div, 'b')
if label is not None:
append_all_below(caption_div_b, label)
if caption is not None:
#Find, optional, title element and paragraph elements
caption_title = caption.find('title')
if caption_title is not None:
append_all_below(caption_div_b, caption_title)
caption_ps = caption.findall('p')
#For title and each paragraph, give children to the div
for caption_p in caption_ps:
append_all_below(caption_div, caption_p)
#Add this to the table div
table_div.append(caption_div)
### Practical Description ###
#A table may have both, one of, or neither of graphic and table
#The different combinations should be handled, but a table-wrap
#with neither should fail with an error
#
#If there is both an image and a table, the image should be placed
#in the text flow with a link to the html table
#
#If there is an image and no table, the image should be placed in
#the text flow without a link to an html table
#
#If there is a table with no image, then the table should be placed
#in the text flow.
if graphic is not None:
#Create the image path for the graphic
xlink_href = ns_format(graphic, 'xlink:href')
graphic_xlink_href = graphic.attrib[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Create the new img element
img_element = etree.Element('img', {'alt': 'A Table',
'src': img_path,
'class': 'table'})
#Add this to the table div
table_div.append(img_element)
#If table, add it to the list, and link to it
if table is not None: # Both graphic and table
#The label attribute is just a means of transmitting some
#plaintext which will be used for the labeling in the html
#tables file
div = etree.SubElement(self.tables.find('body'),
'div',
{'id': table_wrap.attrib['id']})
if label is not None:
bold_label = etree.SubElement(div, 'b')
append_all_below(bold_label, label)
#Add the table to the tables list
div.append(deepcopy(table))
#Also add the table's foot if it exists
table_wrap_foot = table_wrap.find('table-wrap-foot')
if table_wrap_foot is not None:
table_wrap_foot.tag = 'div'
table_wrap_foot.attrib['class'] = 'table-wrap-foot'
div.append(table_wrap_foot)
#Create a link to the html version of the table
html_table_link = etree.Element('a')
html_table_link.attrib['href'] = self.tables_fragment.format(table_wrap.attrib['id'])
html_table_link.text = 'Go to HTML version of this table'
#Add this to the table div
table_div.append(html_table_link)
remove(table)
elif table is not None: # Table only
#Simply append the table to the table div
table_div.append(table)
elif graphic is None and table is None:
sys.exit('Encountered table-wrap element with neither graphic nor table. Exiting.')
#Replace the original table-wrap with the newly constructed div
replace(table_wrap, table_div) | python | def convert_table_wrap_elements(self):
"""
Responsible for the correct conversion of JPTS 3.0 <table-wrap>
elements to EPUB content.
The 'id' attribute is treated as mandatory by this method.
"""
for table_wrap in self.main.getroot().findall('.//table-wrap'):
table_div = etree.Element('div', {'id': table_wrap.attrib['id']})
label = table_wrap.find('label')
caption = table_wrap.find('caption')
alternatives = table_wrap.find('alternatives')
graphic = table_wrap.find('graphic')
table = table_wrap.find('table')
if graphic is None:
if alternatives is not None:
graphic = alternatives.find('graphic')
if table is None:
if alternatives is not None:
table = alternatives.find('table')
#Handling the label and caption
if label is not None and caption is not None:
caption_div = etree.Element('div', {'class': 'table-caption'})
caption_div_b = etree.SubElement(caption_div, 'b')
if label is not None:
append_all_below(caption_div_b, label)
if caption is not None:
#Find, optional, title element and paragraph elements
caption_title = caption.find('title')
if caption_title is not None:
append_all_below(caption_div_b, caption_title)
caption_ps = caption.findall('p')
#For title and each paragraph, give children to the div
for caption_p in caption_ps:
append_all_below(caption_div, caption_p)
#Add this to the table div
table_div.append(caption_div)
### Practical Description ###
#A table may have both, one of, or neither of graphic and table
#The different combinations should be handled, but a table-wrap
#with neither should fail with an error
#
#If there is both an image and a table, the image should be placed
#in the text flow with a link to the html table
#
#If there is an image and no table, the image should be placed in
#the text flow without a link to an html table
#
#If there is a table with no image, then the table should be placed
#in the text flow.
if graphic is not None:
#Create the image path for the graphic
xlink_href = ns_format(graphic, 'xlink:href')
graphic_xlink_href = graphic.attrib[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Create the new img element
img_element = etree.Element('img', {'alt': 'A Table',
'src': img_path,
'class': 'table'})
#Add this to the table div
table_div.append(img_element)
#If table, add it to the list, and link to it
if table is not None: # Both graphic and table
#The label attribute is just a means of transmitting some
#plaintext which will be used for the labeling in the html
#tables file
div = etree.SubElement(self.tables.find('body'),
'div',
{'id': table_wrap.attrib['id']})
if label is not None:
bold_label = etree.SubElement(div, 'b')
append_all_below(bold_label, label)
#Add the table to the tables list
div.append(deepcopy(table))
#Also add the table's foot if it exists
table_wrap_foot = table_wrap.find('table-wrap-foot')
if table_wrap_foot is not None:
table_wrap_foot.tag = 'div'
table_wrap_foot.attrib['class'] = 'table-wrap-foot'
div.append(table_wrap_foot)
#Create a link to the html version of the table
html_table_link = etree.Element('a')
html_table_link.attrib['href'] = self.tables_fragment.format(table_wrap.attrib['id'])
html_table_link.text = 'Go to HTML version of this table'
#Add this to the table div
table_div.append(html_table_link)
remove(table)
elif table is not None: # Table only
#Simply append the table to the table div
table_div.append(table)
elif graphic is None and table is None:
sys.exit('Encountered table-wrap element with neither graphic nor table. Exiting.')
#Replace the original table-wrap with the newly constructed div
replace(table_wrap, table_div) | [
"def",
"convert_table_wrap_elements",
"(",
"self",
")",
":",
"for",
"table_wrap",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//table-wrap'",
")",
":",
"table_div",
"=",
"etree",
".",
"Element",
"(",
"'div'",
",",
"{",
"'id'",
":",
"table_wrap",
".",
"attrib",
"[",
"'id'",
"]",
"}",
")",
"label",
"=",
"table_wrap",
".",
"find",
"(",
"'label'",
")",
"caption",
"=",
"table_wrap",
".",
"find",
"(",
"'caption'",
")",
"alternatives",
"=",
"table_wrap",
".",
"find",
"(",
"'alternatives'",
")",
"graphic",
"=",
"table_wrap",
".",
"find",
"(",
"'graphic'",
")",
"table",
"=",
"table_wrap",
".",
"find",
"(",
"'table'",
")",
"if",
"graphic",
"is",
"None",
":",
"if",
"alternatives",
"is",
"not",
"None",
":",
"graphic",
"=",
"alternatives",
".",
"find",
"(",
"'graphic'",
")",
"if",
"table",
"is",
"None",
":",
"if",
"alternatives",
"is",
"not",
"None",
":",
"table",
"=",
"alternatives",
".",
"find",
"(",
"'table'",
")",
"#Handling the label and caption",
"if",
"label",
"is",
"not",
"None",
"and",
"caption",
"is",
"not",
"None",
":",
"caption_div",
"=",
"etree",
".",
"Element",
"(",
"'div'",
",",
"{",
"'class'",
":",
"'table-caption'",
"}",
")",
"caption_div_b",
"=",
"etree",
".",
"SubElement",
"(",
"caption_div",
",",
"'b'",
")",
"if",
"label",
"is",
"not",
"None",
":",
"append_all_below",
"(",
"caption_div_b",
",",
"label",
")",
"if",
"caption",
"is",
"not",
"None",
":",
"#Find, optional, title element and paragraph elements",
"caption_title",
"=",
"caption",
".",
"find",
"(",
"'title'",
")",
"if",
"caption_title",
"is",
"not",
"None",
":",
"append_all_below",
"(",
"caption_div_b",
",",
"caption_title",
")",
"caption_ps",
"=",
"caption",
".",
"findall",
"(",
"'p'",
")",
"#For title and each paragraph, give children to the div",
"for",
"caption_p",
"in",
"caption_ps",
":",
"append_all_below",
"(",
"caption_div",
",",
"caption_p",
")",
"#Add this to the table div",
"table_div",
".",
"append",
"(",
"caption_div",
")",
"### Practical Description ###",
"#A table may have both, one of, or neither of graphic and table",
"#The different combinations should be handled, but a table-wrap",
"#with neither should fail with an error",
"#",
"#If there is both an image and a table, the image should be placed",
"#in the text flow with a link to the html table",
"#",
"#If there is an image and no table, the image should be placed in",
"#the text flow without a link to an html table",
"#",
"#If there is a table with no image, then the table should be placed",
"#in the text flow.",
"if",
"graphic",
"is",
"not",
"None",
":",
"#Create the image path for the graphic",
"xlink_href",
"=",
"ns_format",
"(",
"graphic",
",",
"'xlink:href'",
")",
"graphic_xlink_href",
"=",
"graphic",
".",
"attrib",
"[",
"xlink_href",
"]",
"file_name",
"=",
"graphic_xlink_href",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"+",
"'.png'",
"img_dir",
"=",
"'images-'",
"+",
"self",
".",
"doi_suffix",
"(",
")",
"img_path",
"=",
"'/'",
".",
"join",
"(",
"[",
"img_dir",
",",
"file_name",
"]",
")",
"#Create the new img element",
"img_element",
"=",
"etree",
".",
"Element",
"(",
"'img'",
",",
"{",
"'alt'",
":",
"'A Table'",
",",
"'src'",
":",
"img_path",
",",
"'class'",
":",
"'table'",
"}",
")",
"#Add this to the table div",
"table_div",
".",
"append",
"(",
"img_element",
")",
"#If table, add it to the list, and link to it",
"if",
"table",
"is",
"not",
"None",
":",
"# Both graphic and table",
"#The label attribute is just a means of transmitting some",
"#plaintext which will be used for the labeling in the html",
"#tables file",
"div",
"=",
"etree",
".",
"SubElement",
"(",
"self",
".",
"tables",
".",
"find",
"(",
"'body'",
")",
",",
"'div'",
",",
"{",
"'id'",
":",
"table_wrap",
".",
"attrib",
"[",
"'id'",
"]",
"}",
")",
"if",
"label",
"is",
"not",
"None",
":",
"bold_label",
"=",
"etree",
".",
"SubElement",
"(",
"div",
",",
"'b'",
")",
"append_all_below",
"(",
"bold_label",
",",
"label",
")",
"#Add the table to the tables list",
"div",
".",
"append",
"(",
"deepcopy",
"(",
"table",
")",
")",
"#Also add the table's foot if it exists",
"table_wrap_foot",
"=",
"table_wrap",
".",
"find",
"(",
"'table-wrap-foot'",
")",
"if",
"table_wrap_foot",
"is",
"not",
"None",
":",
"table_wrap_foot",
".",
"tag",
"=",
"'div'",
"table_wrap_foot",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'table-wrap-foot'",
"div",
".",
"append",
"(",
"table_wrap_foot",
")",
"#Create a link to the html version of the table",
"html_table_link",
"=",
"etree",
".",
"Element",
"(",
"'a'",
")",
"html_table_link",
".",
"attrib",
"[",
"'href'",
"]",
"=",
"self",
".",
"tables_fragment",
".",
"format",
"(",
"table_wrap",
".",
"attrib",
"[",
"'id'",
"]",
")",
"html_table_link",
".",
"text",
"=",
"'Go to HTML version of this table'",
"#Add this to the table div",
"table_div",
".",
"append",
"(",
"html_table_link",
")",
"remove",
"(",
"table",
")",
"elif",
"table",
"is",
"not",
"None",
":",
"# Table only",
"#Simply append the table to the table div",
"table_div",
".",
"append",
"(",
"table",
")",
"elif",
"graphic",
"is",
"None",
"and",
"table",
"is",
"None",
":",
"sys",
".",
"exit",
"(",
"'Encountered table-wrap element with neither graphic nor table. Exiting.'",
")",
"#Replace the original table-wrap with the newly constructed div",
"replace",
"(",
"table_wrap",
",",
"table_div",
")"
] | Responsible for the correct conversion of JPTS 3.0 <table-wrap>
elements to EPUB content.
The 'id' attribute is treated as mandatory by this method. | [
"Responsible",
"for",
"the",
"correct",
"conversion",
"of",
"JPTS",
"3",
".",
"0",
"<table",
"-",
"wrap",
">",
"elements",
"to",
"EPUB",
"content",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L1220-L1323 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_graphic_elements | def convert_graphic_elements(self):
"""
This is a method for the odd special cases where <graphic> elements are
standalone, or rather, not a part of a standard graphical element such
as a figure or a table. This method should always be employed after the
standard cases have already been handled.
"""
for graphic in self.main.getroot().findall('.//graphic'):
graphic.tag = 'img'
graphic.attrib['alt'] = 'unowned-graphic'
ns_xlink_href = ns_format(graphic, 'xlink:href')
if ns_xlink_href in graphic.attrib:
xlink_href = graphic.attrib[ns_xlink_href]
file_name = xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
graphic.attrib['src'] = img_path
remove_all_attributes(graphic, exclude=['id', 'class', 'alt', 'src']) | python | def convert_graphic_elements(self):
"""
This is a method for the odd special cases where <graphic> elements are
standalone, or rather, not a part of a standard graphical element such
as a figure or a table. This method should always be employed after the
standard cases have already been handled.
"""
for graphic in self.main.getroot().findall('.//graphic'):
graphic.tag = 'img'
graphic.attrib['alt'] = 'unowned-graphic'
ns_xlink_href = ns_format(graphic, 'xlink:href')
if ns_xlink_href in graphic.attrib:
xlink_href = graphic.attrib[ns_xlink_href]
file_name = xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
graphic.attrib['src'] = img_path
remove_all_attributes(graphic, exclude=['id', 'class', 'alt', 'src']) | [
"def",
"convert_graphic_elements",
"(",
"self",
")",
":",
"for",
"graphic",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//graphic'",
")",
":",
"graphic",
".",
"tag",
"=",
"'img'",
"graphic",
".",
"attrib",
"[",
"'alt'",
"]",
"=",
"'unowned-graphic'",
"ns_xlink_href",
"=",
"ns_format",
"(",
"graphic",
",",
"'xlink:href'",
")",
"if",
"ns_xlink_href",
"in",
"graphic",
".",
"attrib",
":",
"xlink_href",
"=",
"graphic",
".",
"attrib",
"[",
"ns_xlink_href",
"]",
"file_name",
"=",
"xlink_href",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"+",
"'.png'",
"img_dir",
"=",
"'images-'",
"+",
"self",
".",
"doi_suffix",
"(",
")",
"img_path",
"=",
"'/'",
".",
"join",
"(",
"[",
"img_dir",
",",
"file_name",
"]",
")",
"graphic",
".",
"attrib",
"[",
"'src'",
"]",
"=",
"img_path",
"remove_all_attributes",
"(",
"graphic",
",",
"exclude",
"=",
"[",
"'id'",
",",
"'class'",
",",
"'alt'",
",",
"'src'",
"]",
")"
] | This is a method for the odd special cases where <graphic> elements are
standalone, or rather, not a part of a standard graphical element such
as a figure or a table. This method should always be employed after the
standard cases have already been handled. | [
"This",
"is",
"a",
"method",
"for",
"the",
"odd",
"special",
"cases",
"where",
"<graphic",
">",
"elements",
"are",
"standalone",
"or",
"rather",
"not",
"a",
"part",
"of",
"a",
"standard",
"graphical",
"element",
"such",
"as",
"a",
"figure",
"or",
"a",
"table",
".",
"This",
"method",
"should",
"always",
"be",
"employed",
"after",
"the",
"standard",
"cases",
"have",
"already",
"been",
"handled",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L1344-L1361 |
linkhub-sdk/popbill.py | popbill/messageService.py | MessageService.getChargeInfo | def getChargeInfo(self, CorpNum, MsgType, UserID=None):
""" 과금정보 확인
args
CorpNum : 회원 사업자번호
MsgType : 문자전송 유형
UserID : 팝빌 회원아이디
return
과금정보 객체
raise
PopbillException
"""
if MsgType == None or MsgType == "":
raise PopbillException(-99999999, "전송유형이 입력되지 않았습니다.")
return self._httpget('/Message/ChargeInfo?Type=' + MsgType, CorpNum, UserID) | python | def getChargeInfo(self, CorpNum, MsgType, UserID=None):
""" 과금정보 확인
args
CorpNum : 회원 사업자번호
MsgType : 문자전송 유형
UserID : 팝빌 회원아이디
return
과금정보 객체
raise
PopbillException
"""
if MsgType == None or MsgType == "":
raise PopbillException(-99999999, "전송유형이 입력되지 않았습니다.")
return self._httpget('/Message/ChargeInfo?Type=' + MsgType, CorpNum, UserID) | [
"def",
"getChargeInfo",
"(",
"self",
",",
"CorpNum",
",",
"MsgType",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MsgType",
"==",
"None",
"or",
"MsgType",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"전송유형이 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Message/ChargeInfo?Type='",
"+",
"MsgType",
",",
"CorpNum",
",",
"UserID",
")"
] | 과금정보 확인
args
CorpNum : 회원 사업자번호
MsgType : 문자전송 유형
UserID : 팝빌 회원아이디
return
과금정보 객체
raise
PopbillException | [
"과금정보",
"확인",
"args",
"CorpNum",
":",
"회원",
"사업자번호",
"MsgType",
":",
"문자전송",
"유형",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"과금정보",
"객체",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L30-L44 |
linkhub-sdk/popbill.py | popbill/messageService.py | MessageService.sendSMS_multi | def sendSMS_multi(self, CorpNum, Sender, Contents, Messages, reserveDT, adsYN=False, UserID=None, RequestNum=None):
""" 단문 문자메시지 다량전송
args
CorpNum : 팝빌회원 사업자번호
Sender : 발신자번호 (동보전송용)
Contents : 문자 내용 (동보전송용)
Messages : 개별전송정보 배열
reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
RequestNum : 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
return self.sendMessage("SMS", CorpNum, Sender, '', '', Contents, Messages, reserveDT, adsYN, UserID,
RequestNum) | python | def sendSMS_multi(self, CorpNum, Sender, Contents, Messages, reserveDT, adsYN=False, UserID=None, RequestNum=None):
""" 단문 문자메시지 다량전송
args
CorpNum : 팝빌회원 사업자번호
Sender : 발신자번호 (동보전송용)
Contents : 문자 내용 (동보전송용)
Messages : 개별전송정보 배열
reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
RequestNum : 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
return self.sendMessage("SMS", CorpNum, Sender, '', '', Contents, Messages, reserveDT, adsYN, UserID,
RequestNum) | [
"def",
"sendSMS_multi",
"(",
"self",
",",
"CorpNum",
",",
"Sender",
",",
"Contents",
",",
"Messages",
",",
"reserveDT",
",",
"adsYN",
"=",
"False",
",",
"UserID",
"=",
"None",
",",
"RequestNum",
"=",
"None",
")",
":",
"return",
"self",
".",
"sendMessage",
"(",
"\"SMS\"",
",",
"CorpNum",
",",
"Sender",
",",
"''",
",",
"''",
",",
"Contents",
",",
"Messages",
",",
"reserveDT",
",",
"adsYN",
",",
"UserID",
",",
"RequestNum",
")"
] | 단문 문자메시지 다량전송
args
CorpNum : 팝빌회원 사업자번호
Sender : 발신자번호 (동보전송용)
Contents : 문자 내용 (동보전송용)
Messages : 개별전송정보 배열
reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
RequestNum : 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException | [
"단문",
"문자메시지",
"다량전송",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"Sender",
":",
"발신자번호",
"(",
"동보전송용",
")",
"Contents",
":",
"문자",
"내용",
"(",
"동보전송용",
")",
"Messages",
":",
"개별전송정보",
"배열",
"reserveDT",
":",
"예약전송시간",
"(",
"형식",
".",
"yyyyMMddHHmmss",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"RequestNum",
":",
"전송요청번호",
"return",
"접수번호",
"(",
"receiptNum",
")",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L104-L121 |
linkhub-sdk/popbill.py | popbill/messageService.py | MessageService.sendLMS | def sendLMS(self, CorpNum, Sender, Receiver, ReceiverName, Subject, Contents, reserveDT, adsYN=False, UserID=None,
SenderName=None, RequestNum=None):
""" 장문 문자메시지 단건 전송
args
CorpNum : 팝빌회원 사업자번호
Sender : 발신번호
Receiver : 수신번호
ReceiverName : 수신자명
Subject : 메시지 제목
Contents : 메시지 내용(2000Byte 초과시 길이가 조정되어 전송됨)
reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
SenderName : 발신자명
RequestNum = 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
Messages = []
Messages.append(MessageReceiver(
snd=Sender,
sndnm=SenderName,
rcv=Receiver,
rcvnm=ReceiverName,
msg=Contents,
sjt=Subject)
)
return self.sendMessage("LMS", CorpNum, Sender, '', Subject, Contents, Messages, reserveDT, adsYN, UserID,
RequestNum) | python | def sendLMS(self, CorpNum, Sender, Receiver, ReceiverName, Subject, Contents, reserveDT, adsYN=False, UserID=None,
SenderName=None, RequestNum=None):
""" 장문 문자메시지 단건 전송
args
CorpNum : 팝빌회원 사업자번호
Sender : 발신번호
Receiver : 수신번호
ReceiverName : 수신자명
Subject : 메시지 제목
Contents : 메시지 내용(2000Byte 초과시 길이가 조정되어 전송됨)
reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
SenderName : 발신자명
RequestNum = 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
Messages = []
Messages.append(MessageReceiver(
snd=Sender,
sndnm=SenderName,
rcv=Receiver,
rcvnm=ReceiverName,
msg=Contents,
sjt=Subject)
)
return self.sendMessage("LMS", CorpNum, Sender, '', Subject, Contents, Messages, reserveDT, adsYN, UserID,
RequestNum) | [
"def",
"sendLMS",
"(",
"self",
",",
"CorpNum",
",",
"Sender",
",",
"Receiver",
",",
"ReceiverName",
",",
"Subject",
",",
"Contents",
",",
"reserveDT",
",",
"adsYN",
"=",
"False",
",",
"UserID",
"=",
"None",
",",
"SenderName",
"=",
"None",
",",
"RequestNum",
"=",
"None",
")",
":",
"Messages",
"=",
"[",
"]",
"Messages",
".",
"append",
"(",
"MessageReceiver",
"(",
"snd",
"=",
"Sender",
",",
"sndnm",
"=",
"SenderName",
",",
"rcv",
"=",
"Receiver",
",",
"rcvnm",
"=",
"ReceiverName",
",",
"msg",
"=",
"Contents",
",",
"sjt",
"=",
"Subject",
")",
")",
"return",
"self",
".",
"sendMessage",
"(",
"\"LMS\"",
",",
"CorpNum",
",",
"Sender",
",",
"''",
",",
"Subject",
",",
"Contents",
",",
"Messages",
",",
"reserveDT",
",",
"adsYN",
",",
"UserID",
",",
"RequestNum",
")"
] | 장문 문자메시지 단건 전송
args
CorpNum : 팝빌회원 사업자번호
Sender : 발신번호
Receiver : 수신번호
ReceiverName : 수신자명
Subject : 메시지 제목
Contents : 메시지 내용(2000Byte 초과시 길이가 조정되어 전송됨)
reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
SenderName : 발신자명
RequestNum = 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException | [
"장문",
"문자메시지",
"단건",
"전송",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"Sender",
":",
"발신번호",
"Receiver",
":",
"수신번호",
"ReceiverName",
":",
"수신자명",
"Subject",
":",
"메시지",
"제목",
"Contents",
":",
"메시지",
"내용",
"(",
"2000Byte",
"초과시",
"길이가",
"조정되어",
"전송됨",
")",
"reserveDT",
":",
"예약전송시간",
"(",
"형식",
".",
"yyyyMMddHHmmss",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"SenderName",
":",
"발신자명",
"RequestNum",
"=",
"전송요청번호",
"return",
"접수번호",
"(",
"receiptNum",
")",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L123-L154 |
linkhub-sdk/popbill.py | popbill/messageService.py | MessageService.sendLMS_multi | def sendLMS_multi(self, CorpNum, Sender, Subject, Contents, Messages, reserveDT, adsYN=False, UserID=None,
RequestNum=None):
""" 장문 문자메시지 다량전송
args
CorpNum : 팝빌회원 사업자번호
Sender : 발신자번호 (동보전송용)
Subject : 장문 메시지 제목 (동보전송용)
Contents : 장문 문자 내용 (동보전송용)
Messages : 개별전송정보 배열
reserveDT : 예약시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
RequestNum = 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
return self.sendMessage("LMS", CorpNum, Sender, '', Subject, Contents, Messages, reserveDT, adsYN, UserID,
RequestNum) | python | def sendLMS_multi(self, CorpNum, Sender, Subject, Contents, Messages, reserveDT, adsYN=False, UserID=None,
RequestNum=None):
""" 장문 문자메시지 다량전송
args
CorpNum : 팝빌회원 사업자번호
Sender : 발신자번호 (동보전송용)
Subject : 장문 메시지 제목 (동보전송용)
Contents : 장문 문자 내용 (동보전송용)
Messages : 개별전송정보 배열
reserveDT : 예약시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
RequestNum = 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
return self.sendMessage("LMS", CorpNum, Sender, '', Subject, Contents, Messages, reserveDT, adsYN, UserID,
RequestNum) | [
"def",
"sendLMS_multi",
"(",
"self",
",",
"CorpNum",
",",
"Sender",
",",
"Subject",
",",
"Contents",
",",
"Messages",
",",
"reserveDT",
",",
"adsYN",
"=",
"False",
",",
"UserID",
"=",
"None",
",",
"RequestNum",
"=",
"None",
")",
":",
"return",
"self",
".",
"sendMessage",
"(",
"\"LMS\"",
",",
"CorpNum",
",",
"Sender",
",",
"''",
",",
"Subject",
",",
"Contents",
",",
"Messages",
",",
"reserveDT",
",",
"adsYN",
",",
"UserID",
",",
"RequestNum",
")"
] | 장문 문자메시지 다량전송
args
CorpNum : 팝빌회원 사업자번호
Sender : 발신자번호 (동보전송용)
Subject : 장문 메시지 제목 (동보전송용)
Contents : 장문 문자 내용 (동보전송용)
Messages : 개별전송정보 배열
reserveDT : 예약시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
RequestNum = 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException | [
"장문",
"문자메시지",
"다량전송",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"Sender",
":",
"발신자번호",
"(",
"동보전송용",
")",
"Subject",
":",
"장문",
"메시지",
"제목",
"(",
"동보전송용",
")",
"Contents",
":",
"장문",
"문자",
"내용",
"(",
"동보전송용",
")",
"Messages",
":",
"개별전송정보",
"배열",
"reserveDT",
":",
"예약시간",
"(",
"형식",
".",
"yyyyMMddHHmmss",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"RequestNum",
"=",
"전송요청번호",
"return",
"접수번호",
"(",
"receiptNum",
")",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L156-L175 |
linkhub-sdk/popbill.py | popbill/messageService.py | MessageService.sendMMS_Multi | def sendMMS_Multi(self, CorpNum, Sender, Subject, Contents, Messages, FilePath, reserveDT, adsYN=False, UserID=None,
RequestNum=None):
""" 멀티 문자메시지 다량 전송
args
CorpNum : 팝빌회원 사업자번호
Sender : 발신자번호 (동보전송용)
Subject : 장문 메시지 제목 (동보전송용)
Contents : 장문 문자 내용 (동보전송용)
Messages : 개별전송정보 배열
FilePath : 전송하고자 하는 파일 경로
reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
RequestNum = 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
if Messages == None or len(Messages) < 1:
raise PopbillException(-99999999, "전송할 메시지가 입력되지 않았습니다.")
req = {}
if Sender != None or Sender != '':
req['snd'] = Sender
if Contents != None or Contents != '':
req['content'] = Contents
if Subject != None or Subject != '':
req['subject'] = Subject
if reserveDT != None or reserveDT != '':
req['sndDT'] = reserveDT
if Messages != None or Messages != '':
req['msgs'] = Messages
if RequestNum != None or RequestNum != '':
req['requestNum'] = RequestNum
if adsYN:
req['adsYN'] = True
postData = self._stringtify(req)
files = []
try:
with open(FilePath, "rb") as F:
files = [File(fieldName='file',
fileName=F.name,
fileData=F.read())]
except IOError:
raise PopbillException(-99999999, "해당경로에 파일이 없거나 읽을 수 없습니다.")
result = self._httppost_files('/MMS', postData, files, CorpNum, UserID)
return result.receiptNum | python | def sendMMS_Multi(self, CorpNum, Sender, Subject, Contents, Messages, FilePath, reserveDT, adsYN=False, UserID=None,
RequestNum=None):
""" 멀티 문자메시지 다량 전송
args
CorpNum : 팝빌회원 사업자번호
Sender : 발신자번호 (동보전송용)
Subject : 장문 메시지 제목 (동보전송용)
Contents : 장문 문자 내용 (동보전송용)
Messages : 개별전송정보 배열
FilePath : 전송하고자 하는 파일 경로
reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
RequestNum = 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
if Messages == None or len(Messages) < 1:
raise PopbillException(-99999999, "전송할 메시지가 입력되지 않았습니다.")
req = {}
if Sender != None or Sender != '':
req['snd'] = Sender
if Contents != None or Contents != '':
req['content'] = Contents
if Subject != None or Subject != '':
req['subject'] = Subject
if reserveDT != None or reserveDT != '':
req['sndDT'] = reserveDT
if Messages != None or Messages != '':
req['msgs'] = Messages
if RequestNum != None or RequestNum != '':
req['requestNum'] = RequestNum
if adsYN:
req['adsYN'] = True
postData = self._stringtify(req)
files = []
try:
with open(FilePath, "rb") as F:
files = [File(fieldName='file',
fileName=F.name,
fileData=F.read())]
except IOError:
raise PopbillException(-99999999, "해당경로에 파일이 없거나 읽을 수 없습니다.")
result = self._httppost_files('/MMS', postData, files, CorpNum, UserID)
return result.receiptNum | [
"def",
"sendMMS_Multi",
"(",
"self",
",",
"CorpNum",
",",
"Sender",
",",
"Subject",
",",
"Contents",
",",
"Messages",
",",
"FilePath",
",",
"reserveDT",
",",
"adsYN",
"=",
"False",
",",
"UserID",
"=",
"None",
",",
"RequestNum",
"=",
"None",
")",
":",
"if",
"Messages",
"==",
"None",
"or",
"len",
"(",
"Messages",
")",
"<",
"1",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"전송할 메시지가 입력되지 않았습니다.\")\r",
"",
"req",
"=",
"{",
"}",
"if",
"Sender",
"!=",
"None",
"or",
"Sender",
"!=",
"''",
":",
"req",
"[",
"'snd'",
"]",
"=",
"Sender",
"if",
"Contents",
"!=",
"None",
"or",
"Contents",
"!=",
"''",
":",
"req",
"[",
"'content'",
"]",
"=",
"Contents",
"if",
"Subject",
"!=",
"None",
"or",
"Subject",
"!=",
"''",
":",
"req",
"[",
"'subject'",
"]",
"=",
"Subject",
"if",
"reserveDT",
"!=",
"None",
"or",
"reserveDT",
"!=",
"''",
":",
"req",
"[",
"'sndDT'",
"]",
"=",
"reserveDT",
"if",
"Messages",
"!=",
"None",
"or",
"Messages",
"!=",
"''",
":",
"req",
"[",
"'msgs'",
"]",
"=",
"Messages",
"if",
"RequestNum",
"!=",
"None",
"or",
"RequestNum",
"!=",
"''",
":",
"req",
"[",
"'requestNum'",
"]",
"=",
"RequestNum",
"if",
"adsYN",
":",
"req",
"[",
"'adsYN'",
"]",
"=",
"True",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"req",
")",
"files",
"=",
"[",
"]",
"try",
":",
"with",
"open",
"(",
"FilePath",
",",
"\"rb\"",
")",
"as",
"F",
":",
"files",
"=",
"[",
"File",
"(",
"fieldName",
"=",
"'file'",
",",
"fileName",
"=",
"F",
".",
"name",
",",
"fileData",
"=",
"F",
".",
"read",
"(",
")",
")",
"]",
"except",
"IOError",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"해당경로에 파일이 없거나 읽을 수 없습니다.\")\r",
"",
"result",
"=",
"self",
".",
"_httppost_files",
"(",
"'/MMS'",
",",
"postData",
",",
"files",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"receiptNum"
] | 멀티 문자메시지 다량 전송
args
CorpNum : 팝빌회원 사업자번호
Sender : 발신자번호 (동보전송용)
Subject : 장문 메시지 제목 (동보전송용)
Contents : 장문 문자 내용 (동보전송용)
Messages : 개별전송정보 배열
FilePath : 전송하고자 하는 파일 경로
reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
RequestNum = 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException | [
"멀티",
"문자메시지",
"다량",
"전송",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"Sender",
":",
"발신자번호",
"(",
"동보전송용",
")",
"Subject",
":",
"장문",
"메시지",
"제목",
"(",
"동보전송용",
")",
"Contents",
":",
"장문",
"문자",
"내용",
"(",
"동보전송용",
")",
"Messages",
":",
"개별전송정보",
"배열",
"FilePath",
":",
"전송하고자",
"하는",
"파일",
"경로",
"reserveDT",
":",
"예약전송시간",
"(",
"형식",
".",
"yyyyMMddHHmmss",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"RequestNum",
"=",
"전송요청번호",
"return",
"접수번호",
"(",
"receiptNum",
")",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L210-L261 |
linkhub-sdk/popbill.py | popbill/messageService.py | MessageService.sendMessage | def sendMessage(self, MsgType, CorpNum, Sender, SenderName, Subject, Contents, Messages, reserveDT, adsYN=False,
UserID=None, RequestNum=None):
""" 문자 메시지 전송
args
MsgType : 문자 전송 유형(단문:SMS, 장문:LMS, 단/장문:XMS)
CorpNum : 팝빌회원 사업자번호
Sender : 발신자번호 (동보전송용)
Subject : 장문 메시지 제목 (동보전송용)
Contents : 장문 문자 내용 (동보전송용)
Messages : 개별전송정보 배열
reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
RequestNum : 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
if MsgType == None or MsgType == '':
raise PopbillException(-99999999, "문자 전송 유형이 입력되지 않았습니다.")
if Messages == None or len(Messages) < 1:
raise PopbillException(-99999999, "전송할 메시지가 입력되지 않았습니다.")
req = {}
if Sender != None or Sender != '':
req['snd'] = Sender
if SenderName != None or SenderName != '':
req['sndnm'] = SenderName
if Contents != None or Contents != '':
req['content'] = Contents
if Subject != None or Subject != '':
req['subject'] = Subject
if reserveDT != None or reserveDT != '':
req['sndDT'] = reserveDT
if Messages != None or Messages != '':
req['msgs'] = Messages
if RequestNum != None or RequestNum != '':
req['requestnum'] = RequestNum
if adsYN:
req['adsYN'] = True
postData = self._stringtify(req)
result = self._httppost('/' + MsgType, postData, CorpNum, UserID)
return result.receiptNum | python | def sendMessage(self, MsgType, CorpNum, Sender, SenderName, Subject, Contents, Messages, reserveDT, adsYN=False,
UserID=None, RequestNum=None):
""" 문자 메시지 전송
args
MsgType : 문자 전송 유형(단문:SMS, 장문:LMS, 단/장문:XMS)
CorpNum : 팝빌회원 사업자번호
Sender : 발신자번호 (동보전송용)
Subject : 장문 메시지 제목 (동보전송용)
Contents : 장문 문자 내용 (동보전송용)
Messages : 개별전송정보 배열
reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
RequestNum : 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
if MsgType == None or MsgType == '':
raise PopbillException(-99999999, "문자 전송 유형이 입력되지 않았습니다.")
if Messages == None or len(Messages) < 1:
raise PopbillException(-99999999, "전송할 메시지가 입력되지 않았습니다.")
req = {}
if Sender != None or Sender != '':
req['snd'] = Sender
if SenderName != None or SenderName != '':
req['sndnm'] = SenderName
if Contents != None or Contents != '':
req['content'] = Contents
if Subject != None or Subject != '':
req['subject'] = Subject
if reserveDT != None or reserveDT != '':
req['sndDT'] = reserveDT
if Messages != None or Messages != '':
req['msgs'] = Messages
if RequestNum != None or RequestNum != '':
req['requestnum'] = RequestNum
if adsYN:
req['adsYN'] = True
postData = self._stringtify(req)
result = self._httppost('/' + MsgType, postData, CorpNum, UserID)
return result.receiptNum | [
"def",
"sendMessage",
"(",
"self",
",",
"MsgType",
",",
"CorpNum",
",",
"Sender",
",",
"SenderName",
",",
"Subject",
",",
"Contents",
",",
"Messages",
",",
"reserveDT",
",",
"adsYN",
"=",
"False",
",",
"UserID",
"=",
"None",
",",
"RequestNum",
"=",
"None",
")",
":",
"if",
"MsgType",
"==",
"None",
"or",
"MsgType",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"문자 전송 유형이 입력되지 않았습니다.\")\r",
"",
"if",
"Messages",
"==",
"None",
"or",
"len",
"(",
"Messages",
")",
"<",
"1",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"전송할 메시지가 입력되지 않았습니다.\")\r",
"",
"req",
"=",
"{",
"}",
"if",
"Sender",
"!=",
"None",
"or",
"Sender",
"!=",
"''",
":",
"req",
"[",
"'snd'",
"]",
"=",
"Sender",
"if",
"SenderName",
"!=",
"None",
"or",
"SenderName",
"!=",
"''",
":",
"req",
"[",
"'sndnm'",
"]",
"=",
"SenderName",
"if",
"Contents",
"!=",
"None",
"or",
"Contents",
"!=",
"''",
":",
"req",
"[",
"'content'",
"]",
"=",
"Contents",
"if",
"Subject",
"!=",
"None",
"or",
"Subject",
"!=",
"''",
":",
"req",
"[",
"'subject'",
"]",
"=",
"Subject",
"if",
"reserveDT",
"!=",
"None",
"or",
"reserveDT",
"!=",
"''",
":",
"req",
"[",
"'sndDT'",
"]",
"=",
"reserveDT",
"if",
"Messages",
"!=",
"None",
"or",
"Messages",
"!=",
"''",
":",
"req",
"[",
"'msgs'",
"]",
"=",
"Messages",
"if",
"RequestNum",
"!=",
"None",
"or",
"RequestNum",
"!=",
"''",
":",
"req",
"[",
"'requestnum'",
"]",
"=",
"RequestNum",
"if",
"adsYN",
":",
"req",
"[",
"'adsYN'",
"]",
"=",
"True",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"req",
")",
"result",
"=",
"self",
".",
"_httppost",
"(",
"'/'",
"+",
"MsgType",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"receiptNum"
] | 문자 메시지 전송
args
MsgType : 문자 전송 유형(단문:SMS, 장문:LMS, 단/장문:XMS)
CorpNum : 팝빌회원 사업자번호
Sender : 발신자번호 (동보전송용)
Subject : 장문 메시지 제목 (동보전송용)
Contents : 장문 문자 내용 (동보전송용)
Messages : 개별전송정보 배열
reserveDT : 예약전송시간 (형식. yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
RequestNum : 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException | [
"문자",
"메시지",
"전송",
"args",
"MsgType",
":",
"문자",
"전송",
"유형",
"(",
"단문",
":",
"SMS",
"장문",
":",
"LMS",
"단",
"/",
"장문",
":",
"XMS",
")",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"Sender",
":",
"발신자번호",
"(",
"동보전송용",
")",
"Subject",
":",
"장문",
"메시지",
"제목",
"(",
"동보전송용",
")",
"Contents",
":",
"장문",
"문자",
"내용",
"(",
"동보전송용",
")",
"Messages",
":",
"개별전송정보",
"배열",
"reserveDT",
":",
"예약전송시간",
"(",
"형식",
".",
"yyyyMMddHHmmss",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"RequestNum",
":",
"전송요청번호",
"return",
"접수번호",
"(",
"receiptNum",
")",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L316-L363 |
linkhub-sdk/popbill.py | popbill/messageService.py | MessageService.getMessages | def getMessages(self, CorpNum, ReceiptNum, UserID=None):
""" 문자 전송결과 조회
args
CorpNum : 팝빌회원 사업자번호
ReceiptNum : 전송요청시 발급받은 접수번호
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException
"""
if ReceiptNum == None or len(ReceiptNum) != 18:
raise PopbillException(-99999999, "접수번호가 올바르지 않습니다.")
return self._httpget('/Message/' + ReceiptNum, CorpNum, UserID) | python | def getMessages(self, CorpNum, ReceiptNum, UserID=None):
""" 문자 전송결과 조회
args
CorpNum : 팝빌회원 사업자번호
ReceiptNum : 전송요청시 발급받은 접수번호
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException
"""
if ReceiptNum == None or len(ReceiptNum) != 18:
raise PopbillException(-99999999, "접수번호가 올바르지 않습니다.")
return self._httpget('/Message/' + ReceiptNum, CorpNum, UserID) | [
"def",
"getMessages",
"(",
"self",
",",
"CorpNum",
",",
"ReceiptNum",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"ReceiptNum",
"==",
"None",
"or",
"len",
"(",
"ReceiptNum",
")",
"!=",
"18",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"접수번호가 올바르지 않습니다.\")\r",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Message/'",
"+",
"ReceiptNum",
",",
"CorpNum",
",",
"UserID",
")"
] | 문자 전송결과 조회
args
CorpNum : 팝빌회원 사업자번호
ReceiptNum : 전송요청시 발급받은 접수번호
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException | [
"문자",
"전송결과",
"조회",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ReceiptNum",
":",
"전송요청시",
"발급받은",
"접수번호",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"전송정보",
"as",
"list",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L365-L379 |
linkhub-sdk/popbill.py | popbill/messageService.py | MessageService.getMessagesRN | def getMessagesRN(self, CorpNum, RequestNum, UserID=None):
""" 문자 전송결과 조회
args
CorpNum : 팝빌회원 사업자번호
RequestNum : 전송요청시 할당한 전송요청번호
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException
"""
if RequestNum == None or RequestNum == '':
raise PopbillException(-99999999, "요청번호가 입력되지 않았습니다.")
return self._httpget('/Message/Get/' + RequestNum, CorpNum, UserID) | python | def getMessagesRN(self, CorpNum, RequestNum, UserID=None):
""" 문자 전송결과 조회
args
CorpNum : 팝빌회원 사업자번호
RequestNum : 전송요청시 할당한 전송요청번호
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException
"""
if RequestNum == None or RequestNum == '':
raise PopbillException(-99999999, "요청번호가 입력되지 않았습니다.")
return self._httpget('/Message/Get/' + RequestNum, CorpNum, UserID) | [
"def",
"getMessagesRN",
"(",
"self",
",",
"CorpNum",
",",
"RequestNum",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"RequestNum",
"==",
"None",
"or",
"RequestNum",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"요청번호가 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Message/Get/'",
"+",
"RequestNum",
",",
"CorpNum",
",",
"UserID",
")"
] | 문자 전송결과 조회
args
CorpNum : 팝빌회원 사업자번호
RequestNum : 전송요청시 할당한 전송요청번호
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException | [
"문자",
"전송결과",
"조회",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"RequestNum",
":",
"전송요청시",
"할당한",
"전송요청번호",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"전송정보",
"as",
"list",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L381-L395 |
linkhub-sdk/popbill.py | popbill/messageService.py | MessageService.cancelReserveRN | def cancelReserveRN(self, CorpNum, RequestNum, UserID=None):
""" 문자 예약전송 취소
args
CorpNum : 팝빌회원 사업자번호
RequestNum : 전송요청시 할당한 전송요청번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if RequestNum == None or RequestNum == '':
raise PopbillException(-99999999, "요청번호가 입력되지 않았습니다.")
return self._httpget('/Message/Cancel/' + RequestNum, CorpNum, UserID) | python | def cancelReserveRN(self, CorpNum, RequestNum, UserID=None):
""" 문자 예약전송 취소
args
CorpNum : 팝빌회원 사업자번호
RequestNum : 전송요청시 할당한 전송요청번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if RequestNum == None or RequestNum == '':
raise PopbillException(-99999999, "요청번호가 입력되지 않았습니다.")
return self._httpget('/Message/Cancel/' + RequestNum, CorpNum, UserID) | [
"def",
"cancelReserveRN",
"(",
"self",
",",
"CorpNum",
",",
"RequestNum",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"RequestNum",
"==",
"None",
"or",
"RequestNum",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"요청번호가 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Message/Cancel/'",
"+",
"RequestNum",
",",
"CorpNum",
",",
"UserID",
")"
] | 문자 예약전송 취소
args
CorpNum : 팝빌회원 사업자번호
RequestNum : 전송요청시 할당한 전송요청번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"문자",
"예약전송",
"취소",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"RequestNum",
":",
"전송요청시",
"할당한",
"전송요청번호",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L413-L427 |
linkhub-sdk/popbill.py | popbill/messageService.py | MessageService.getURL | def getURL(self, CorpNum, UserID, ToGo):
""" 문자 관련 팝빌 URL
args
CorpNum : 팝빌회원 사업자번호
UserID : 팝빌회원 아이디
TOGO : BOX (전송내역조회 팝업)
return
팝빌 URL
raise
PopbillException
"""
if ToGo == None or ToGo == '':
raise PopbillException(-99999999, "TOGO값이 입력되지 않았습니다.")
result = self._httpget('/Message/?TG=' + ToGo, CorpNum, UserID)
return result.url | python | def getURL(self, CorpNum, UserID, ToGo):
""" 문자 관련 팝빌 URL
args
CorpNum : 팝빌회원 사업자번호
UserID : 팝빌회원 아이디
TOGO : BOX (전송내역조회 팝업)
return
팝빌 URL
raise
PopbillException
"""
if ToGo == None or ToGo == '':
raise PopbillException(-99999999, "TOGO값이 입력되지 않았습니다.")
result = self._httpget('/Message/?TG=' + ToGo, CorpNum, UserID)
return result.url | [
"def",
"getURL",
"(",
"self",
",",
"CorpNum",
",",
"UserID",
",",
"ToGo",
")",
":",
"if",
"ToGo",
"==",
"None",
"or",
"ToGo",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"TOGO값이 입력되지 않았습니다.\")\r",
"",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/Message/?TG='",
"+",
"ToGo",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"url"
] | 문자 관련 팝빌 URL
args
CorpNum : 팝빌회원 사업자번호
UserID : 팝빌회원 아이디
TOGO : BOX (전송내역조회 팝업)
return
팝빌 URL
raise
PopbillException | [
"문자",
"관련",
"팝빌",
"URL",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"UserID",
":",
"팝빌회원",
"아이디",
"TOGO",
":",
"BOX",
"(",
"전송내역조회",
"팝업",
")",
"return",
"팝빌",
"URL",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L468-L484 |
linkhub-sdk/popbill.py | popbill/messageService.py | MessageService.getStates | def getStates(self, Corpnum, reciptNumList, UserID=None):
""" 전송내역 요약정보 확인
args
CorpNum : 팝빌회원 사업자번호
reciptNumList : 문자전송 접수번호 배열
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException
"""
if reciptNumList == None or len(reciptNumList) < 1:
raise PopbillException(-99999999, "접수번호가 입력되지 않았습니다.")
postData = self._stringtify(reciptNumList)
return self._httppost('/Message/States', postData, Corpnum, UserID) | python | def getStates(self, Corpnum, reciptNumList, UserID=None):
""" 전송내역 요약정보 확인
args
CorpNum : 팝빌회원 사업자번호
reciptNumList : 문자전송 접수번호 배열
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException
"""
if reciptNumList == None or len(reciptNumList) < 1:
raise PopbillException(-99999999, "접수번호가 입력되지 않았습니다.")
postData = self._stringtify(reciptNumList)
return self._httppost('/Message/States', postData, Corpnum, UserID) | [
"def",
"getStates",
"(",
"self",
",",
"Corpnum",
",",
"reciptNumList",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"reciptNumList",
"==",
"None",
"or",
"len",
"(",
"reciptNumList",
")",
"<",
"1",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"접수번호가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"reciptNumList",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Message/States'",
",",
"postData",
",",
"Corpnum",
",",
"UserID",
")"
] | 전송내역 요약정보 확인
args
CorpNum : 팝빌회원 사업자번호
reciptNumList : 문자전송 접수번호 배열
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException | [
"전송내역",
"요약정보",
"확인",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"reciptNumList",
":",
"문자전송",
"접수번호",
"배열",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"전송정보",
"as",
"list",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L524-L540 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_fitBetaCurve.py | funcGauss1D | def funcGauss1D(x, mu, sig):
""" Create 1D Gaussian. Source:
http://mathworld.wolfram.com/GaussianFunction.html
"""
arrOut = np.exp(-np.power((x - mu)/sig, 2.)/2)
# normalize
arrOut = arrOut/(np.sqrt(2.*np.pi)*sig)
return arrOut | python | def funcGauss1D(x, mu, sig):
""" Create 1D Gaussian. Source:
http://mathworld.wolfram.com/GaussianFunction.html
"""
arrOut = np.exp(-np.power((x - mu)/sig, 2.)/2)
# normalize
arrOut = arrOut/(np.sqrt(2.*np.pi)*sig)
return arrOut | [
"def",
"funcGauss1D",
"(",
"x",
",",
"mu",
",",
"sig",
")",
":",
"arrOut",
"=",
"np",
".",
"exp",
"(",
"-",
"np",
".",
"power",
"(",
"(",
"x",
"-",
"mu",
")",
"/",
"sig",
",",
"2.",
")",
"/",
"2",
")",
"# normalize",
"arrOut",
"=",
"arrOut",
"/",
"(",
"np",
".",
"sqrt",
"(",
"2.",
"*",
"np",
".",
"pi",
")",
"*",
"sig",
")",
"return",
"arrOut"
] | Create 1D Gaussian. Source:
http://mathworld.wolfram.com/GaussianFunction.html | [
"Create",
"1D",
"Gaussian",
".",
"Source",
":",
"http",
":",
"//",
"mathworld",
".",
"wolfram",
".",
"com",
"/",
"GaussianFunction",
".",
"html"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_fitBetaCurve.py#L77-L85 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/commands/configure.py | absolute_path | def absolute_path(user_path):
"""
Some paths must be made absolute, this will attempt to convert them.
"""
if os.path.abspath(user_path):
return unix_path_coercion(user_path)
else:
try:
openaccess_epub.utils.evaluate_relative_path(relative=user_path)
except:
raise ValidationError('This path could not be rendered as absolute') | python | def absolute_path(user_path):
"""
Some paths must be made absolute, this will attempt to convert them.
"""
if os.path.abspath(user_path):
return unix_path_coercion(user_path)
else:
try:
openaccess_epub.utils.evaluate_relative_path(relative=user_path)
except:
raise ValidationError('This path could not be rendered as absolute') | [
"def",
"absolute_path",
"(",
"user_path",
")",
":",
"if",
"os",
".",
"path",
".",
"abspath",
"(",
"user_path",
")",
":",
"return",
"unix_path_coercion",
"(",
"user_path",
")",
"else",
":",
"try",
":",
"openaccess_epub",
".",
"utils",
".",
"evaluate_relative_path",
"(",
"relative",
"=",
"user_path",
")",
"except",
":",
"raise",
"ValidationError",
"(",
"'This path could not be rendered as absolute'",
")"
] | Some paths must be made absolute, this will attempt to convert them. | [
"Some",
"paths",
"must",
"be",
"made",
"absolute",
"this",
"will",
"attempt",
"to",
"convert",
"them",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/commands/configure.py#L131-L141 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/commands/configure.py | configure | def configure(default=None, dev=None):
"""
The inner control loops for user interaction during quickstart
configuration.
"""
cache_loc = openaccess_epub.utils.cache_location()
config_loc = openaccess_epub.utils.config_location()
#Make the cache directory
openaccess_epub.utils.mkdir_p(cache_loc)
defaults = {'now': time.asctime(),
'oae-version': openaccess_epub.__version__,
'cache-location': unix_path_coercion(cache_loc),
'input-relative-images': 'images-*',
'use-input-relative-images': 'y',
'image-cache': os.path.join(cache_loc, 'img_cache'),
'use-image-cache': 'n',
'use-image-fetching': 'y',
'default-output': '.',
'input-relative-css': '.',
'epubcheck-jarfile': os.path.join(cache_loc,
'epubcheck-3.0',
'epubcheck-3.0.jar')}
if default or dev: # Skip interactive and apply defaults
#Pass through the validation/modification steps
if dev: # The only current difference between dev and default
defaults['use-image-cache'] = 'y'
defaults['input-relative-images'] = list_opts(defaults['input-relative-images'])
defaults['use-input-relative-images'] = boolean(defaults['use-input-relative-images'])
defaults['image-cache'] = absolute_path(defaults['image-cache'])
defaults['use-image-cache'] = boolean(defaults['use-image-cache'])
defaults['use-image-fetching'] = boolean(defaults['use-image-fetching'])
defaults['default-output'] = nonempty(defaults['default-output'])
defaults['input-relative-css'] = nonempty(defaults['input-relative-css'])
defaults['epubcheck-jarfile'] = absolute_path(defaults['epubcheck-jarfile'])
config = config_formatter(CONFIG_TEXT, defaults)
with open(config_loc, 'wb') as conf_out:
conf_out.write(bytes(config, 'UTF-8'))
print('The config file has been written to {0}'.format(config_loc))
return
config_dict = {'now': time.asctime(),
'oae-version': openaccess_epub.__version__,
'cache-location': unix_path_coercion(cache_loc)}
print('''\nWelcome to the interactive configuration for OpenAccess_EPUB''')
print('''
Please enter values for the following settings. To accept the default value
for the settings, shown in brackets, just push Enter.
-------------------------------------------------------------------------------\
''')
print('''
OpenAccess_EPUB defines a default cache location for the storage of various
data (and the global config.py file), this location is:\n\n{0}
'''.format(cache_loc))
input('Press Enter to start...')
#Image Configuration
print('''
-- Configure Image Behavior --
When OpenAccess_EPUB is executed using the oaepub script, it can find the
images for the input articles using the following strategies (in order of
preference):
Input-Relative: a path relative to the input file
Cached Images: locate the images in a cache
Fetched Online: attempts to download from the Internet (may fail)
We'll configure some values for each of these, and you\'ll also have the option
to turn them off.''')
#Input-relative image details
print('''
Where should OpenAccess_EPUB look for images relative to the input file?
A star "*" may be used as a wildcard to match the name of the input file.
Multiple path values may be specified if separated by commas.''')
user_prompt(config_dict, 'input-relative-images', 'Input-relative images?:',
default=defaults['input-relative-images'], validator=list_opts)
print('''
Should OpenAccess_EPUB look for images relative to the input file by default?\
''')
user_prompt(config_dict, 'use-input-relative-images',
'Use input-relative images?: (Y/n)',
default=defaults['use-input-relative-images'],
validator=boolean)
#Image cache details
print('''
Where should OpenAccess_EPUB place the image cache?''')
user_prompt(config_dict, 'image-cache', 'Image cache?:',
default=defaults['image-cache'],
validator=absolute_path)
print('''
Should OpenAccess_EPUB use the image cache by default? This feature is intended
for developers and testers without local access to the image files and will
consume extra disk space for storage.''')
user_prompt(config_dict, 'use-image-cache', 'Use image cache?: (y/N)',
default=defaults['use-image-cache'],
validator=boolean)
#Image fetching online details
print('''
Should OpenAccess_EPUB attempt to download the images from the Internet? This
is not supported for all publishers and not 100% guaranteed to succeed, you may
need to download them manually if this does not work.''')
user_prompt(config_dict, 'use-image-fetching', 'Attempt image download?: (Y/n)',
default=defaults['use-image-fetching'],
validator=boolean)
#Output configuration
print('''
-- Configure Output Behavior --
OpenAccess_EPUB produces ePub and log files as output. The following options
will determine what is done with these.
Where should OpenAccess_EPUB place the output ePub and log files? If you supply
a relative path, the output path will be relative to the input; if you supply
an absolute path, the output will always be placed there. The default behavior
is to place them in the same directory as the input.''')
user_prompt(config_dict, 'default-output', 'Output path?:',
default=defaults['default-output'],
validator=nonempty)
print('''
-- Configure CSS Behavior --
ePub files use CSS for improved styling, and ePub-readers must support a basic
subset of CSS functions. OpenAccess_EPUB provides a default CSS file, but a
manual one may be supplied, relative to the input. Please define an
appropriate input-relative path.''')
user_prompt(config_dict, 'input-relative-css', 'Input-relative CSS path?:',
default=defaults['input-relative-css'],
validator=nonempty)
print('''
-- Configure EpubCheck --
EpubCheck is a program written and maintained by the IDPF as a tool to validate
ePub. In order to use it, your system must have Java installed and it is
recommended to use the latest version. Downloads of this program are found here:
https://github.com/IDPF/epubcheck/releases
Once you have downloaded the zip file for the program, unzip the archive and
write a path to the .jar file here.''')
user_prompt(config_dict, 'epubcheck-jarfile', 'Absolute path to epubcheck?:',
default=defaults['epubcheck-jarfile'], validator=absolute_path)
#Write the config.py file
config = config_formatter(CONFIG_TEXT, config_dict)
with open(config_loc, 'wb') as conf_out:
conf_out.write(bytes(config, 'UTF-8'))
print('''
Done configuring OpenAccess_EPUB!''') | python | def configure(default=None, dev=None):
"""
The inner control loops for user interaction during quickstart
configuration.
"""
cache_loc = openaccess_epub.utils.cache_location()
config_loc = openaccess_epub.utils.config_location()
#Make the cache directory
openaccess_epub.utils.mkdir_p(cache_loc)
defaults = {'now': time.asctime(),
'oae-version': openaccess_epub.__version__,
'cache-location': unix_path_coercion(cache_loc),
'input-relative-images': 'images-*',
'use-input-relative-images': 'y',
'image-cache': os.path.join(cache_loc, 'img_cache'),
'use-image-cache': 'n',
'use-image-fetching': 'y',
'default-output': '.',
'input-relative-css': '.',
'epubcheck-jarfile': os.path.join(cache_loc,
'epubcheck-3.0',
'epubcheck-3.0.jar')}
if default or dev: # Skip interactive and apply defaults
#Pass through the validation/modification steps
if dev: # The only current difference between dev and default
defaults['use-image-cache'] = 'y'
defaults['input-relative-images'] = list_opts(defaults['input-relative-images'])
defaults['use-input-relative-images'] = boolean(defaults['use-input-relative-images'])
defaults['image-cache'] = absolute_path(defaults['image-cache'])
defaults['use-image-cache'] = boolean(defaults['use-image-cache'])
defaults['use-image-fetching'] = boolean(defaults['use-image-fetching'])
defaults['default-output'] = nonempty(defaults['default-output'])
defaults['input-relative-css'] = nonempty(defaults['input-relative-css'])
defaults['epubcheck-jarfile'] = absolute_path(defaults['epubcheck-jarfile'])
config = config_formatter(CONFIG_TEXT, defaults)
with open(config_loc, 'wb') as conf_out:
conf_out.write(bytes(config, 'UTF-8'))
print('The config file has been written to {0}'.format(config_loc))
return
config_dict = {'now': time.asctime(),
'oae-version': openaccess_epub.__version__,
'cache-location': unix_path_coercion(cache_loc)}
print('''\nWelcome to the interactive configuration for OpenAccess_EPUB''')
print('''
Please enter values for the following settings. To accept the default value
for the settings, shown in brackets, just push Enter.
-------------------------------------------------------------------------------\
''')
print('''
OpenAccess_EPUB defines a default cache location for the storage of various
data (and the global config.py file), this location is:\n\n{0}
'''.format(cache_loc))
input('Press Enter to start...')
#Image Configuration
print('''
-- Configure Image Behavior --
When OpenAccess_EPUB is executed using the oaepub script, it can find the
images for the input articles using the following strategies (in order of
preference):
Input-Relative: a path relative to the input file
Cached Images: locate the images in a cache
Fetched Online: attempts to download from the Internet (may fail)
We'll configure some values for each of these, and you\'ll also have the option
to turn them off.''')
#Input-relative image details
print('''
Where should OpenAccess_EPUB look for images relative to the input file?
A star "*" may be used as a wildcard to match the name of the input file.
Multiple path values may be specified if separated by commas.''')
user_prompt(config_dict, 'input-relative-images', 'Input-relative images?:',
default=defaults['input-relative-images'], validator=list_opts)
print('''
Should OpenAccess_EPUB look for images relative to the input file by default?\
''')
user_prompt(config_dict, 'use-input-relative-images',
'Use input-relative images?: (Y/n)',
default=defaults['use-input-relative-images'],
validator=boolean)
#Image cache details
print('''
Where should OpenAccess_EPUB place the image cache?''')
user_prompt(config_dict, 'image-cache', 'Image cache?:',
default=defaults['image-cache'],
validator=absolute_path)
print('''
Should OpenAccess_EPUB use the image cache by default? This feature is intended
for developers and testers without local access to the image files and will
consume extra disk space for storage.''')
user_prompt(config_dict, 'use-image-cache', 'Use image cache?: (y/N)',
default=defaults['use-image-cache'],
validator=boolean)
#Image fetching online details
print('''
Should OpenAccess_EPUB attempt to download the images from the Internet? This
is not supported for all publishers and not 100% guaranteed to succeed, you may
need to download them manually if this does not work.''')
user_prompt(config_dict, 'use-image-fetching', 'Attempt image download?: (Y/n)',
default=defaults['use-image-fetching'],
validator=boolean)
#Output configuration
print('''
-- Configure Output Behavior --
OpenAccess_EPUB produces ePub and log files as output. The following options
will determine what is done with these.
Where should OpenAccess_EPUB place the output ePub and log files? If you supply
a relative path, the output path will be relative to the input; if you supply
an absolute path, the output will always be placed there. The default behavior
is to place them in the same directory as the input.''')
user_prompt(config_dict, 'default-output', 'Output path?:',
default=defaults['default-output'],
validator=nonempty)
print('''
-- Configure CSS Behavior --
ePub files use CSS for improved styling, and ePub-readers must support a basic
subset of CSS functions. OpenAccess_EPUB provides a default CSS file, but a
manual one may be supplied, relative to the input. Please define an
appropriate input-relative path.''')
user_prompt(config_dict, 'input-relative-css', 'Input-relative CSS path?:',
default=defaults['input-relative-css'],
validator=nonempty)
print('''
-- Configure EpubCheck --
EpubCheck is a program written and maintained by the IDPF as a tool to validate
ePub. In order to use it, your system must have Java installed and it is
recommended to use the latest version. Downloads of this program are found here:
https://github.com/IDPF/epubcheck/releases
Once you have downloaded the zip file for the program, unzip the archive and
write a path to the .jar file here.''')
user_prompt(config_dict, 'epubcheck-jarfile', 'Absolute path to epubcheck?:',
default=defaults['epubcheck-jarfile'], validator=absolute_path)
#Write the config.py file
config = config_formatter(CONFIG_TEXT, config_dict)
with open(config_loc, 'wb') as conf_out:
conf_out.write(bytes(config, 'UTF-8'))
print('''
Done configuring OpenAccess_EPUB!''') | [
"def",
"configure",
"(",
"default",
"=",
"None",
",",
"dev",
"=",
"None",
")",
":",
"cache_loc",
"=",
"openaccess_epub",
".",
"utils",
".",
"cache_location",
"(",
")",
"config_loc",
"=",
"openaccess_epub",
".",
"utils",
".",
"config_location",
"(",
")",
"#Make the cache directory",
"openaccess_epub",
".",
"utils",
".",
"mkdir_p",
"(",
"cache_loc",
")",
"defaults",
"=",
"{",
"'now'",
":",
"time",
".",
"asctime",
"(",
")",
",",
"'oae-version'",
":",
"openaccess_epub",
".",
"__version__",
",",
"'cache-location'",
":",
"unix_path_coercion",
"(",
"cache_loc",
")",
",",
"'input-relative-images'",
":",
"'images-*'",
",",
"'use-input-relative-images'",
":",
"'y'",
",",
"'image-cache'",
":",
"os",
".",
"path",
".",
"join",
"(",
"cache_loc",
",",
"'img_cache'",
")",
",",
"'use-image-cache'",
":",
"'n'",
",",
"'use-image-fetching'",
":",
"'y'",
",",
"'default-output'",
":",
"'.'",
",",
"'input-relative-css'",
":",
"'.'",
",",
"'epubcheck-jarfile'",
":",
"os",
".",
"path",
".",
"join",
"(",
"cache_loc",
",",
"'epubcheck-3.0'",
",",
"'epubcheck-3.0.jar'",
")",
"}",
"if",
"default",
"or",
"dev",
":",
"# Skip interactive and apply defaults",
"#Pass through the validation/modification steps",
"if",
"dev",
":",
"# The only current difference between dev and default",
"defaults",
"[",
"'use-image-cache'",
"]",
"=",
"'y'",
"defaults",
"[",
"'input-relative-images'",
"]",
"=",
"list_opts",
"(",
"defaults",
"[",
"'input-relative-images'",
"]",
")",
"defaults",
"[",
"'use-input-relative-images'",
"]",
"=",
"boolean",
"(",
"defaults",
"[",
"'use-input-relative-images'",
"]",
")",
"defaults",
"[",
"'image-cache'",
"]",
"=",
"absolute_path",
"(",
"defaults",
"[",
"'image-cache'",
"]",
")",
"defaults",
"[",
"'use-image-cache'",
"]",
"=",
"boolean",
"(",
"defaults",
"[",
"'use-image-cache'",
"]",
")",
"defaults",
"[",
"'use-image-fetching'",
"]",
"=",
"boolean",
"(",
"defaults",
"[",
"'use-image-fetching'",
"]",
")",
"defaults",
"[",
"'default-output'",
"]",
"=",
"nonempty",
"(",
"defaults",
"[",
"'default-output'",
"]",
")",
"defaults",
"[",
"'input-relative-css'",
"]",
"=",
"nonempty",
"(",
"defaults",
"[",
"'input-relative-css'",
"]",
")",
"defaults",
"[",
"'epubcheck-jarfile'",
"]",
"=",
"absolute_path",
"(",
"defaults",
"[",
"'epubcheck-jarfile'",
"]",
")",
"config",
"=",
"config_formatter",
"(",
"CONFIG_TEXT",
",",
"defaults",
")",
"with",
"open",
"(",
"config_loc",
",",
"'wb'",
")",
"as",
"conf_out",
":",
"conf_out",
".",
"write",
"(",
"bytes",
"(",
"config",
",",
"'UTF-8'",
")",
")",
"print",
"(",
"'The config file has been written to {0}'",
".",
"format",
"(",
"config_loc",
")",
")",
"return",
"config_dict",
"=",
"{",
"'now'",
":",
"time",
".",
"asctime",
"(",
")",
",",
"'oae-version'",
":",
"openaccess_epub",
".",
"__version__",
",",
"'cache-location'",
":",
"unix_path_coercion",
"(",
"cache_loc",
")",
"}",
"print",
"(",
"'''\\nWelcome to the interactive configuration for OpenAccess_EPUB'''",
")",
"print",
"(",
"'''\nPlease enter values for the following settings. To accept the default value\nfor the settings, shown in brackets, just push Enter.\n\n-------------------------------------------------------------------------------\\\n'''",
")",
"print",
"(",
"'''\nOpenAccess_EPUB defines a default cache location for the storage of various\ndata (and the global config.py file), this location is:\\n\\n{0}\n'''",
".",
"format",
"(",
"cache_loc",
")",
")",
"input",
"(",
"'Press Enter to start...'",
")",
"#Image Configuration",
"print",
"(",
"'''\n -- Configure Image Behavior --\n\nWhen OpenAccess_EPUB is executed using the oaepub script, it can find the\nimages for the input articles using the following strategies (in order of\npreference):\n\n Input-Relative: a path relative to the input file\n Cached Images: locate the images in a cache\n Fetched Online: attempts to download from the Internet (may fail)\n\nWe'll configure some values for each of these, and you\\'ll also have the option\nto turn them off.'''",
")",
"#Input-relative image details",
"print",
"(",
"'''\nWhere should OpenAccess_EPUB look for images relative to the input file?\nA star \"*\" may be used as a wildcard to match the name of the input file.\nMultiple path values may be specified if separated by commas.'''",
")",
"user_prompt",
"(",
"config_dict",
",",
"'input-relative-images'",
",",
"'Input-relative images?:'",
",",
"default",
"=",
"defaults",
"[",
"'input-relative-images'",
"]",
",",
"validator",
"=",
"list_opts",
")",
"print",
"(",
"'''\nShould OpenAccess_EPUB look for images relative to the input file by default?\\\n'''",
")",
"user_prompt",
"(",
"config_dict",
",",
"'use-input-relative-images'",
",",
"'Use input-relative images?: (Y/n)'",
",",
"default",
"=",
"defaults",
"[",
"'use-input-relative-images'",
"]",
",",
"validator",
"=",
"boolean",
")",
"#Image cache details",
"print",
"(",
"'''\nWhere should OpenAccess_EPUB place the image cache?'''",
")",
"user_prompt",
"(",
"config_dict",
",",
"'image-cache'",
",",
"'Image cache?:'",
",",
"default",
"=",
"defaults",
"[",
"'image-cache'",
"]",
",",
"validator",
"=",
"absolute_path",
")",
"print",
"(",
"'''\nShould OpenAccess_EPUB use the image cache by default? This feature is intended\nfor developers and testers without local access to the image files and will\nconsume extra disk space for storage.'''",
")",
"user_prompt",
"(",
"config_dict",
",",
"'use-image-cache'",
",",
"'Use image cache?: (y/N)'",
",",
"default",
"=",
"defaults",
"[",
"'use-image-cache'",
"]",
",",
"validator",
"=",
"boolean",
")",
"#Image fetching online details",
"print",
"(",
"'''\nShould OpenAccess_EPUB attempt to download the images from the Internet? This\nis not supported for all publishers and not 100% guaranteed to succeed, you may\nneed to download them manually if this does not work.'''",
")",
"user_prompt",
"(",
"config_dict",
",",
"'use-image-fetching'",
",",
"'Attempt image download?: (Y/n)'",
",",
"default",
"=",
"defaults",
"[",
"'use-image-fetching'",
"]",
",",
"validator",
"=",
"boolean",
")",
"#Output configuration",
"print",
"(",
"'''\n -- Configure Output Behavior --\n\nOpenAccess_EPUB produces ePub and log files as output. The following options\nwill determine what is done with these.\n\nWhere should OpenAccess_EPUB place the output ePub and log files? If you supply\na relative path, the output path will be relative to the input; if you supply\nan absolute path, the output will always be placed there. The default behavior\nis to place them in the same directory as the input.'''",
")",
"user_prompt",
"(",
"config_dict",
",",
"'default-output'",
",",
"'Output path?:'",
",",
"default",
"=",
"defaults",
"[",
"'default-output'",
"]",
",",
"validator",
"=",
"nonempty",
")",
"print",
"(",
"'''\n -- Configure CSS Behavior --\n\nePub files use CSS for improved styling, and ePub-readers must support a basic\nsubset of CSS functions. OpenAccess_EPUB provides a default CSS file, but a\nmanual one may be supplied, relative to the input. Please define an\nappropriate input-relative path.'''",
")",
"user_prompt",
"(",
"config_dict",
",",
"'input-relative-css'",
",",
"'Input-relative CSS path?:'",
",",
"default",
"=",
"defaults",
"[",
"'input-relative-css'",
"]",
",",
"validator",
"=",
"nonempty",
")",
"print",
"(",
"'''\n -- Configure EpubCheck --\n\nEpubCheck is a program written and maintained by the IDPF as a tool to validate\nePub. In order to use it, your system must have Java installed and it is\nrecommended to use the latest version. Downloads of this program are found here:\n\nhttps://github.com/IDPF/epubcheck/releases\n\nOnce you have downloaded the zip file for the program, unzip the archive and\nwrite a path to the .jar file here.'''",
")",
"user_prompt",
"(",
"config_dict",
",",
"'epubcheck-jarfile'",
",",
"'Absolute path to epubcheck?:'",
",",
"default",
"=",
"defaults",
"[",
"'epubcheck-jarfile'",
"]",
",",
"validator",
"=",
"absolute_path",
")",
"#Write the config.py file",
"config",
"=",
"config_formatter",
"(",
"CONFIG_TEXT",
",",
"config_dict",
")",
"with",
"open",
"(",
"config_loc",
",",
"'wb'",
")",
"as",
"conf_out",
":",
"conf_out",
".",
"write",
"(",
"bytes",
"(",
"config",
",",
"'UTF-8'",
")",
")",
"print",
"(",
"'''\nDone configuring OpenAccess_EPUB!'''",
")"
] | The inner control loops for user interaction during quickstart
configuration. | [
"The",
"inner",
"control",
"loops",
"for",
"user",
"interaction",
"during",
"quickstart",
"configuration",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/commands/configure.py#L191-L344 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.getCertificateExpireDate | def getCertificateExpireDate(self, CorpNum):
""" 공인인증서 만료일 확인, 등록여부 확인용도로 활용가능
args
CorpNum : 확인할 회원 사업자번호
return
등록시 만료일자, 미등록시 해당 PopbillException raise.
raise
PopbillException
"""
result = self._httpget('/Taxinvoice?cfg=CERT', CorpNum)
return datetime.strptime(result.certificateExpiration, '%Y%m%d%H%M%S') | python | def getCertificateExpireDate(self, CorpNum):
""" 공인인증서 만료일 확인, 등록여부 확인용도로 활용가능
args
CorpNum : 확인할 회원 사업자번호
return
등록시 만료일자, 미등록시 해당 PopbillException raise.
raise
PopbillException
"""
result = self._httpget('/Taxinvoice?cfg=CERT', CorpNum)
return datetime.strptime(result.certificateExpiration, '%Y%m%d%H%M%S') | [
"def",
"getCertificateExpireDate",
"(",
"self",
",",
"CorpNum",
")",
":",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/Taxinvoice?cfg=CERT'",
",",
"CorpNum",
")",
"return",
"datetime",
".",
"strptime",
"(",
"result",
".",
"certificateExpiration",
",",
"'%Y%m%d%H%M%S'",
")"
] | 공인인증서 만료일 확인, 등록여부 확인용도로 활용가능
args
CorpNum : 확인할 회원 사업자번호
return
등록시 만료일자, 미등록시 해당 PopbillException raise.
raise
PopbillException | [
"공인인증서",
"만료일",
"확인",
"등록여부",
"확인용도로",
"활용가능",
"args",
"CorpNum",
":",
"확인할",
"회원",
"사업자번호",
"return",
"등록시",
"만료일자",
"미등록시",
"해당",
"PopbillException",
"raise",
".",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L72-L82 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.checkMgtKeyInUse | def checkMgtKeyInUse(self, CorpNum, MgtKeyType, MgtKey):
""" 파트너 관리번호 사용중 여부 확인.
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
사용중 여부 by True/False
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
try:
result = self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, CorpNum)
return result.itemKey != None and result.itemKey != ""
except PopbillException as PE:
if PE.code == -11000005:
return False
raise PE | python | def checkMgtKeyInUse(self, CorpNum, MgtKeyType, MgtKey):
""" 파트너 관리번호 사용중 여부 확인.
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
사용중 여부 by True/False
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
try:
result = self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, CorpNum)
return result.itemKey != None and result.itemKey != ""
except PopbillException as PE:
if PE.code == -11000005:
return False
raise PE | [
"def",
"checkMgtKeyInUse",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"try",
":",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"\"/\"",
"+",
"MgtKey",
",",
"CorpNum",
")",
"return",
"result",
".",
"itemKey",
"!=",
"None",
"and",
"result",
".",
"itemKey",
"!=",
"\"\"",
"except",
"PopbillException",
"as",
"PE",
":",
"if",
"PE",
".",
"code",
"==",
"-",
"11000005",
":",
"return",
"False",
"raise",
"PE"
] | 파트너 관리번호 사용중 여부 확인.
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
사용중 여부 by True/False
raise
PopbillException | [
"파트너",
"관리번호",
"사용중",
"여부",
"확인",
".",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKey",
":",
"파트너",
"관리번호",
"return",
"사용중",
"여부",
"by",
"True",
"/",
"False",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L95-L117 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.register | def register(self, CorpNum, taxinvoice, writeSpecification=False, UserID=None):
""" 임시저장
args
CorpNum : 회원 사업자 번호
taxinvoice : 등록할 세금계산서 object. Made with Taxinvoice(...)
writeSpecification : 등록시 거래명세서 동시 작성 여부
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if taxinvoice == None:
raise PopbillException(-99999999, "등록할 세금계산서 정보가 입력되지 않았습니다.")
if writeSpecification:
taxinvoice.writeSpecification = True
postData = self._stringtify(taxinvoice)
return self._httppost('/Taxinvoice', postData, CorpNum, UserID) | python | def register(self, CorpNum, taxinvoice, writeSpecification=False, UserID=None):
""" 임시저장
args
CorpNum : 회원 사업자 번호
taxinvoice : 등록할 세금계산서 object. Made with Taxinvoice(...)
writeSpecification : 등록시 거래명세서 동시 작성 여부
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if taxinvoice == None:
raise PopbillException(-99999999, "등록할 세금계산서 정보가 입력되지 않았습니다.")
if writeSpecification:
taxinvoice.writeSpecification = True
postData = self._stringtify(taxinvoice)
return self._httppost('/Taxinvoice', postData, CorpNum, UserID) | [
"def",
"register",
"(",
"self",
",",
"CorpNum",
",",
"taxinvoice",
",",
"writeSpecification",
"=",
"False",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"taxinvoice",
"==",
"None",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"등록할 세금계산서 정보가 입력되지 않았습니다.\")",
"",
"if",
"writeSpecification",
":",
"taxinvoice",
".",
"writeSpecification",
"=",
"True",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"taxinvoice",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Taxinvoice'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")"
] | 임시저장
args
CorpNum : 회원 사업자 번호
taxinvoice : 등록할 세금계산서 object. Made with Taxinvoice(...)
writeSpecification : 등록시 거래명세서 동시 작성 여부
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"임시저장",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"taxinvoice",
":",
"등록할",
"세금계산서",
"object",
".",
"Made",
"with",
"Taxinvoice",
"(",
"...",
")",
"writeSpecification",
":",
"등록시",
"거래명세서",
"동시",
"작성",
"여부",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L119-L138 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.registIssue | def registIssue(self, CorpNum, taxinvoice, writeSpecification=False, forceIssue=False, dealInvoiceMgtKey=None,
memo=None, emailSubject=None, UserID=None):
""" 즉시 발행
args
CorpNum : 팝빌회원 사업자번호
taxinvoice : 세금계산서 객체
writeSpecification : 거래명세서 동시작성 여부
forceIssue : 지연발행 강제여부
dealInvoiceMgtKey : 거래명세서 문서관리번호
memo : 메모
emailSubject : 메일제목, 미기재시 기본제목으로 전송
UsreID : 팝빌회원 아이디
return
검색결과 정보
raise
PopbillException
"""
if writeSpecification:
taxinvoice.writeSpecification = True
if forceIssue:
taxinvoice.forceIssue = True
if dealInvoiceMgtKey != None and dealInvoiceMgtKey != '':
taxinvoice.dealInvoiceMgtKey = dealInvoiceMgtKey
if memo != None and memo != '':
taxinvoice.memo = memo
if emailSubject != None and emailSubject != '':
taxinvoice.emailSubject = emailSubject
postData = self._stringtify(taxinvoice)
return self._httppost('/Taxinvoice', postData, CorpNum, UserID, "ISSUE") | python | def registIssue(self, CorpNum, taxinvoice, writeSpecification=False, forceIssue=False, dealInvoiceMgtKey=None,
memo=None, emailSubject=None, UserID=None):
""" 즉시 발행
args
CorpNum : 팝빌회원 사업자번호
taxinvoice : 세금계산서 객체
writeSpecification : 거래명세서 동시작성 여부
forceIssue : 지연발행 강제여부
dealInvoiceMgtKey : 거래명세서 문서관리번호
memo : 메모
emailSubject : 메일제목, 미기재시 기본제목으로 전송
UsreID : 팝빌회원 아이디
return
검색결과 정보
raise
PopbillException
"""
if writeSpecification:
taxinvoice.writeSpecification = True
if forceIssue:
taxinvoice.forceIssue = True
if dealInvoiceMgtKey != None and dealInvoiceMgtKey != '':
taxinvoice.dealInvoiceMgtKey = dealInvoiceMgtKey
if memo != None and memo != '':
taxinvoice.memo = memo
if emailSubject != None and emailSubject != '':
taxinvoice.emailSubject = emailSubject
postData = self._stringtify(taxinvoice)
return self._httppost('/Taxinvoice', postData, CorpNum, UserID, "ISSUE") | [
"def",
"registIssue",
"(",
"self",
",",
"CorpNum",
",",
"taxinvoice",
",",
"writeSpecification",
"=",
"False",
",",
"forceIssue",
"=",
"False",
",",
"dealInvoiceMgtKey",
"=",
"None",
",",
"memo",
"=",
"None",
",",
"emailSubject",
"=",
"None",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"writeSpecification",
":",
"taxinvoice",
".",
"writeSpecification",
"=",
"True",
"if",
"forceIssue",
":",
"taxinvoice",
".",
"forceIssue",
"=",
"True",
"if",
"dealInvoiceMgtKey",
"!=",
"None",
"and",
"dealInvoiceMgtKey",
"!=",
"''",
":",
"taxinvoice",
".",
"dealInvoiceMgtKey",
"=",
"dealInvoiceMgtKey",
"if",
"memo",
"!=",
"None",
"and",
"memo",
"!=",
"''",
":",
"taxinvoice",
".",
"memo",
"=",
"memo",
"if",
"emailSubject",
"!=",
"None",
"and",
"emailSubject",
"!=",
"''",
":",
"taxinvoice",
".",
"emailSubject",
"=",
"emailSubject",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"taxinvoice",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Taxinvoice'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"ISSUE\"",
")"
] | 즉시 발행
args
CorpNum : 팝빌회원 사업자번호
taxinvoice : 세금계산서 객체
writeSpecification : 거래명세서 동시작성 여부
forceIssue : 지연발행 강제여부
dealInvoiceMgtKey : 거래명세서 문서관리번호
memo : 메모
emailSubject : 메일제목, 미기재시 기본제목으로 전송
UsreID : 팝빌회원 아이디
return
검색결과 정보
raise
PopbillException | [
"즉시",
"발행",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"taxinvoice",
":",
"세금계산서",
"객체",
"writeSpecification",
":",
"거래명세서",
"동시작성",
"여부",
"forceIssue",
":",
"지연발행",
"강제여부",
"dealInvoiceMgtKey",
":",
"거래명세서",
"문서관리번호",
"memo",
":",
"메모",
"emailSubject",
":",
"메일제목",
"미기재시",
"기본제목으로",
"전송",
"UsreID",
":",
"팝빌회원",
"아이디",
"return",
"검색결과",
"정보",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L140-L174 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.update | def update(self, CorpNum, MgtKeyType, MgtKey, taxinvoice, writeSpecification=False, UserID=None):
""" 수정
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
taxinvoice : 수정할 세금계산서 object. Made with Taxinvoice(...)
writeSpecification : 등록시 거래명세서 동시 작성 여부
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if taxinvoice == None:
raise PopbillException(-99999999, "수정할 세금계산서 정보가 입력되지 않았습니다.")
if writeSpecification:
taxinvoice.writeSpecification = True
postData = self._stringtify(taxinvoice)
return self._httppost('/Taxinvoice/' + MgtKeyType + '/' + MgtKey, postData, CorpNum, UserID, 'PATCH') | python | def update(self, CorpNum, MgtKeyType, MgtKey, taxinvoice, writeSpecification=False, UserID=None):
""" 수정
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
taxinvoice : 수정할 세금계산서 object. Made with Taxinvoice(...)
writeSpecification : 등록시 거래명세서 동시 작성 여부
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if taxinvoice == None:
raise PopbillException(-99999999, "수정할 세금계산서 정보가 입력되지 않았습니다.")
if writeSpecification:
taxinvoice.writeSpecification = True
postData = self._stringtify(taxinvoice)
return self._httppost('/Taxinvoice/' + MgtKeyType + '/' + MgtKey, postData, CorpNum, UserID, 'PATCH') | [
"def",
"update",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
",",
"taxinvoice",
",",
"writeSpecification",
"=",
"False",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"if",
"taxinvoice",
"==",
"None",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"수정할 세금계산서 정보가 입력되지 않았습니다.\")",
"",
"if",
"writeSpecification",
":",
"taxinvoice",
".",
"writeSpecification",
"=",
"True",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"taxinvoice",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"'/'",
"+",
"MgtKey",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"'PATCH'",
")"
] | 수정
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
taxinvoice : 수정할 세금계산서 object. Made with Taxinvoice(...)
writeSpecification : 등록시 거래명세서 동시 작성 여부
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"수정",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKey",
":",
"파트너",
"관리번호",
"taxinvoice",
":",
"수정할",
"세금계산서",
"object",
".",
"Made",
"with",
"Taxinvoice",
"(",
"...",
")",
"writeSpecification",
":",
"등록시",
"거래명세서",
"동시",
"작성",
"여부",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L176-L201 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.getInfo | def getInfo(self, CorpNum, MgtKeyType, MgtKey):
""" 상태정보 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Taxinvoice/' + MgtKeyType + '/' + MgtKey, CorpNum) | python | def getInfo(self, CorpNum, MgtKeyType, MgtKey):
""" 상태정보 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Taxinvoice/' + MgtKeyType + '/' + MgtKey, CorpNum) | [
"def",
"getInfo",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"'/'",
"+",
"MgtKey",
",",
"CorpNum",
")"
] | 상태정보 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
처리결과. consist of code and message
raise
PopbillException | [
"상태정보",
"확인",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKey",
":",
"파트너",
"관리번호",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L203-L219 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.getDetailInfo | def getDetailInfo(self, CorpNum, MgtKeyType, MgtKey):
""" 상세정보 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "?Detail", CorpNum) | python | def getDetailInfo(self, CorpNum, MgtKeyType, MgtKey):
""" 상세정보 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "?Detail", CorpNum) | [
"def",
"getDetailInfo",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"\"/\"",
"+",
"MgtKey",
"+",
"\"?Detail\"",
",",
"CorpNum",
")"
] | 상세정보 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
처리결과. consist of code and message
raise
PopbillException | [
"상세정보",
"확인",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKey",
":",
"파트너",
"관리번호",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L221-L237 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.delete | def delete(self, CorpNum, MgtKeyType, MgtKey, UserID=None):
""" 삭제
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, '', CorpNum, UserID, "DELETE") | python | def delete(self, CorpNum, MgtKeyType, MgtKey, UserID=None):
""" 삭제
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, '', CorpNum, UserID, "DELETE") | [
"def",
"delete",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"return",
"self",
".",
"_httppost",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"\"/\"",
"+",
"MgtKey",
",",
"''",
",",
"CorpNum",
",",
"UserID",
",",
"\"DELETE\"",
")"
] | 삭제
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"삭제",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKey",
":",
"파트너",
"관리번호",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L239-L256 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.send | def send(self, CorpNum, MgtKeyType, MgtKey, Memo=None, EmailSubject=None, UserID=None):
""" 승인요청
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
Memo : 처리 메모
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
req = {}
if Memo != None and Memo != '':
req["memo"] = Memo
if EmailSubject != None and EmailSubject != '':
req["emailSubject"] = EmailSubject
postData = self._stringtify(req)
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "SEND") | python | def send(self, CorpNum, MgtKeyType, MgtKey, Memo=None, EmailSubject=None, UserID=None):
""" 승인요청
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
Memo : 처리 메모
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
req = {}
if Memo != None and Memo != '':
req["memo"] = Memo
if EmailSubject != None and EmailSubject != '':
req["emailSubject"] = EmailSubject
postData = self._stringtify(req)
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "SEND") | [
"def",
"send",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
",",
"Memo",
"=",
"None",
",",
"EmailSubject",
"=",
"None",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"req",
"=",
"{",
"}",
"if",
"Memo",
"!=",
"None",
"and",
"Memo",
"!=",
"''",
":",
"req",
"[",
"\"memo\"",
"]",
"=",
"Memo",
"if",
"EmailSubject",
"!=",
"None",
"and",
"EmailSubject",
"!=",
"''",
":",
"req",
"[",
"\"emailSubject\"",
"]",
"=",
"EmailSubject",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"req",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"\"/\"",
"+",
"MgtKey",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"SEND\"",
")"
] | 승인요청
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
Memo : 처리 메모
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"승인요청",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKey",
":",
"파트너",
"관리번호",
"Memo",
":",
"처리",
"메모",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L258-L285 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.cancelSend | def cancelSend(self, CorpNum, MgtKeyType, MgtKey, Memo=None, UserID=None):
""" 승인요청 취소
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
Memo : 처리 메모
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if Memo != None and Memo != '':
postData = self._stringtify({"memo": Memo})
else:
postData = ''
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "CANCELSEND") | python | def cancelSend(self, CorpNum, MgtKeyType, MgtKey, Memo=None, UserID=None):
""" 승인요청 취소
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
Memo : 처리 메모
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if Memo != None and Memo != '':
postData = self._stringtify({"memo": Memo})
else:
postData = ''
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "CANCELSEND") | [
"def",
"cancelSend",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
",",
"Memo",
"=",
"None",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"if",
"Memo",
"!=",
"None",
"and",
"Memo",
"!=",
"''",
":",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"{",
"\"memo\"",
":",
"Memo",
"}",
")",
"else",
":",
"postData",
"=",
"''",
"return",
"self",
".",
"_httppost",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"\"/\"",
"+",
"MgtKey",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"CANCELSEND\"",
")"
] | 승인요청 취소
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
Memo : 처리 메모
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"승인요청",
"취소",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKey",
":",
"파트너",
"관리번호",
"Memo",
":",
"처리",
"메모",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L287-L310 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.issue | def issue(self, CorpNum, MgtKeyType, MgtKey, Memo=None, EmailSubject=None, ForceIssue=False, UserID=None):
""" 발행
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
Memo : 처리 메모
EmailSubject : 발행메일 이메일 제목
ForceIssue : 지연발행 세금계산서 강제발행 여부.
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
req = {"forceIssue": ForceIssue}
if Memo != None and Memo != '':
req["memo"] = Memo
if EmailSubject != None and EmailSubject != '':
req["emailSubject"] = EmailSubject
postData = self._stringtify(req)
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "ISSUE") | python | def issue(self, CorpNum, MgtKeyType, MgtKey, Memo=None, EmailSubject=None, ForceIssue=False, UserID=None):
""" 발행
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
Memo : 처리 메모
EmailSubject : 발행메일 이메일 제목
ForceIssue : 지연발행 세금계산서 강제발행 여부.
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
req = {"forceIssue": ForceIssue}
if Memo != None and Memo != '':
req["memo"] = Memo
if EmailSubject != None and EmailSubject != '':
req["emailSubject"] = EmailSubject
postData = self._stringtify(req)
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "ISSUE") | [
"def",
"issue",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
",",
"Memo",
"=",
"None",
",",
"EmailSubject",
"=",
"None",
",",
"ForceIssue",
"=",
"False",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"req",
"=",
"{",
"\"forceIssue\"",
":",
"ForceIssue",
"}",
"if",
"Memo",
"!=",
"None",
"and",
"Memo",
"!=",
"''",
":",
"req",
"[",
"\"memo\"",
"]",
"=",
"Memo",
"if",
"EmailSubject",
"!=",
"None",
"and",
"EmailSubject",
"!=",
"''",
":",
"req",
"[",
"\"emailSubject\"",
"]",
"=",
"EmailSubject",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"req",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"\"/\"",
"+",
"MgtKey",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"ISSUE\"",
")"
] | 발행
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
Memo : 처리 메모
EmailSubject : 발행메일 이메일 제목
ForceIssue : 지연발행 세금계산서 강제발행 여부.
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"발행",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKey",
":",
"파트너",
"관리번호",
"Memo",
":",
"처리",
"메모",
"EmailSubject",
":",
"발행메일",
"이메일",
"제목",
"ForceIssue",
":",
"지연발행",
"세금계산서",
"강제발행",
"여부",
".",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L362-L392 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.registRequest | def registRequest(self, CorpNum, taxinvoice, memo=None, UserID=None):
""" 즉시 요청
args
CorpNum : 팝빌회원 사업자번호
taxinvoice : 세금계산서 객체
memo : 메모
UsreID : 팝빌회원 아이디
return
검색결과 정보
raise
PopbillException
"""
if memo != None and memo != '':
taxinvoice.memo = memo
postData = self._stringtify(taxinvoice)
return self._httppost('/Taxinvoice', postData, CorpNum, UserID, "REQUEST") | python | def registRequest(self, CorpNum, taxinvoice, memo=None, UserID=None):
""" 즉시 요청
args
CorpNum : 팝빌회원 사업자번호
taxinvoice : 세금계산서 객체
memo : 메모
UsreID : 팝빌회원 아이디
return
검색결과 정보
raise
PopbillException
"""
if memo != None and memo != '':
taxinvoice.memo = memo
postData = self._stringtify(taxinvoice)
return self._httppost('/Taxinvoice', postData, CorpNum, UserID, "REQUEST") | [
"def",
"registRequest",
"(",
"self",
",",
"CorpNum",
",",
"taxinvoice",
",",
"memo",
"=",
"None",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"memo",
"!=",
"None",
"and",
"memo",
"!=",
"''",
":",
"taxinvoice",
".",
"memo",
"=",
"memo",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"taxinvoice",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Taxinvoice'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"REQUEST\"",
")"
] | 즉시 요청
args
CorpNum : 팝빌회원 사업자번호
taxinvoice : 세금계산서 객체
memo : 메모
UsreID : 팝빌회원 아이디
return
검색결과 정보
raise
PopbillException | [
"즉시",
"요청",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"taxinvoice",
":",
"세금계산서",
"객체",
"memo",
":",
"메모",
"UsreID",
":",
"팝빌회원",
"아이디",
"return",
"검색결과",
"정보",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L419-L437 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.sendToNTS | def sendToNTS(self, CorpNum, MgtKeyType, MgtKey, UserID=None):
""" 국세청 즉시전송
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
postData = ''
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "NTS") | python | def sendToNTS(self, CorpNum, MgtKeyType, MgtKey, UserID=None):
""" 국세청 즉시전송
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
postData = ''
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "NTS") | [
"def",
"sendToNTS",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"postData",
"=",
"''",
"return",
"self",
".",
"_httppost",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"\"/\"",
"+",
"MgtKey",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"NTS\"",
")"
] | 국세청 즉시전송
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"국세청",
"즉시전송",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKey",
":",
"파트너",
"관리번호",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L514-L533 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.getLogs | def getLogs(self, CorpNum, MgtKeyType, MgtKey):
""" 세금계산서 문서이력 목록 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
문서이력 정보 목록 as List
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "/Logs", CorpNum) | python | def getLogs(self, CorpNum, MgtKeyType, MgtKey):
""" 세금계산서 문서이력 목록 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
문서이력 정보 목록 as List
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "/Logs", CorpNum) | [
"def",
"getLogs",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"\"/\"",
"+",
"MgtKey",
"+",
"\"/Logs\"",
",",
"CorpNum",
")"
] | 세금계산서 문서이력 목록 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
문서이력 정보 목록 as List
raise
PopbillException | [
"세금계산서",
"문서이력",
"목록",
"확인",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKey",
":",
"파트너",
"관리번호",
"return",
"문서이력",
"정보",
"목록",
"as",
"List",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L611-L627 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.getFiles | def getFiles(self, CorpNum, MgtKeyType, MgtKey):
""" 첨부파일 목록 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
첩부파일 정보 목록 as List
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "/Files", CorpNum) | python | def getFiles(self, CorpNum, MgtKeyType, MgtKey):
""" 첨부파일 목록 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
첩부파일 정보 목록 as List
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "/Files", CorpNum) | [
"def",
"getFiles",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"\"/\"",
"+",
"MgtKey",
"+",
"\"/Files\"",
",",
"CorpNum",
")"
] | 첨부파일 목록 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
첩부파일 정보 목록 as List
raise
PopbillException | [
"첨부파일",
"목록",
"확인",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKey",
":",
"파트너",
"관리번호",
"return",
"첩부파일",
"정보",
"목록",
"as",
"List",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L660-L676 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.deleteFile | def deleteFile(self, CorpNum, MgtKeyType, MgtKey, FileID, UserID=None):
""" 첨부파일 삭제
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if FileID == None or FileID == "":
raise PopbillException(-99999999, "파일아이디가 입력되지 않았습니다.")
postData = ''
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "/Files/" + FileID, postData, CorpNum,
UserID, 'DELETE') | python | def deleteFile(self, CorpNum, MgtKeyType, MgtKey, FileID, UserID=None):
""" 첨부파일 삭제
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if FileID == None or FileID == "":
raise PopbillException(-99999999, "파일아이디가 입력되지 않았습니다.")
postData = ''
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "/Files/" + FileID, postData, CorpNum,
UserID, 'DELETE') | [
"def",
"deleteFile",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
",",
"FileID",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"if",
"FileID",
"==",
"None",
"or",
"FileID",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"파일아이디가 입력되지 않았습니다.\")",
"",
"postData",
"=",
"''",
"return",
"self",
".",
"_httppost",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"\"/\"",
"+",
"MgtKey",
"+",
"\"/Files/\"",
"+",
"FileID",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"'DELETE'",
")"
] | 첨부파일 삭제
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"첨부파일",
"삭제",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKey",
":",
"파트너",
"관리번호",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L678-L700 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.getMassPrintURL | def getMassPrintURL(self, CorpNum, MgtKeyType, MgtKeyList, UserID=None):
""" 다량 인쇄 URL 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKeyList : 파트너 관리번호 목록
UserID : 팝빌 회원아이디
return
팝빌 URL as str
raise
PopbillException
"""
if MgtKeyList == None or len(MgtKeyList) < 1:
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
postData = self._stringtify(MgtKeyList)
Result = self._httppost('/Taxinvoice/' + MgtKeyType + "?Print", postData, CorpNum, UserID)
return Result.url | python | def getMassPrintURL(self, CorpNum, MgtKeyType, MgtKeyList, UserID=None):
""" 다량 인쇄 URL 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKeyList : 파트너 관리번호 목록
UserID : 팝빌 회원아이디
return
팝빌 URL as str
raise
PopbillException
"""
if MgtKeyList == None or len(MgtKeyList) < 1:
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
postData = self._stringtify(MgtKeyList)
Result = self._httppost('/Taxinvoice/' + MgtKeyType + "?Print", postData, CorpNum, UserID)
return Result.url | [
"def",
"getMassPrintURL",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKeyList",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKeyList",
"==",
"None",
"or",
"len",
"(",
"MgtKeyList",
")",
"<",
"1",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"MgtKeyList",
")",
"Result",
"=",
"self",
".",
"_httppost",
"(",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"\"?Print\"",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"Result",
".",
"url"
] | 다량 인쇄 URL 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKeyList : 파트너 관리번호 목록
UserID : 팝빌 회원아이디
return
팝빌 URL as str
raise
PopbillException | [
"다량",
"인쇄",
"URL",
"확인",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyType",
":",
"관리번호",
"유형",
"one",
"of",
"[",
"SELL",
"BUY",
"TRUSTEE",
"]",
"MgtKeyList",
":",
"파트너",
"관리번호",
"목록",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"팝빌",
"URL",
"as",
"str",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L825-L844 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.search | def search(self, CorpNum, MgtKeyType, DType, SDate, EDate, State, Type, TaxType, LateOnly, TaxRegIDYN, TaxRegIDType,
TaxRegID, Page, PerPage, Order, UserID=None, QString=None, InterOPYN=None, IssueType=None):
""" 목록 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
DType : 일자유형, R-등록일시, W-작성일자, I-발행일시 중 택 1
SDate : 시작일자, 표시형식(yyyyMMdd)
EDate : 종료일자, 표시형식(yyyyMMdd)
State : 상태코드, 2,3번째 자리에 와일드카드(*) 사용가능
Type : 문서형태 배열, N-일반세금계산서, M-수정세금계산서
TaxType : 과세형태 배열, T-과세, N-면세, Z-영세
LateOnly : 지연발행, 공백-전체조회, 0-정상발행조회, 1-지연발행 조회
TaxRegIdYN : 종사업장번호 유무, 공백-전체조회, 0-종사업장번호 없음 1-종사업장번호 있음
TaxRegIDType : 종사업장번호 사업자유형, S-공급자, B-공급받는자, T-수탁자
TaxRegID : 종사업장번호, 콤마(,)로 구분하여 구성 ex)'0001,1234'
Page : 페이지번호
PerPage : 페이지당 목록개수
Order : 정렬방향, D-내림차순, A-오름차순
UserID : 팝빌 회원아이디
QString : 거래처 정보, 거래처 상호 또는 사업자등록번호 기재, 미기재시 전체조회
InterOPYN : 연동문서 여부, 공백-전체조회, 0-일반문서 조회, 1-연동문서 조회
IssueType : 발행형태 배열, N-정발행, R-역발행, T-위수탁
return
조회목록 Object
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if DType == None or DType == '':
raise PopbillException(-99999999, "일자유형이 입력되지 않았습니다.")
if SDate == None or SDate == '':
raise PopbillException(-99999999, "시작일자가 입력되지 않았습니다.")
if EDate == None or EDate == '':
raise PopbillException(-99999999, "종료일자가 입력되지 않았습니다.")
uri = '/Taxinvoice/' + MgtKeyType
uri += '?DType=' + DType
uri += '&SDate=' + SDate
uri += '&EDate=' + EDate
uri += '&State=' + ','.join(State)
uri += '&Type=' + ','.join(Type)
uri += '&TaxType=' + ','.join(TaxType)
uri += '&TaxRegIDType=' + TaxRegIDType
uri += '&TaxRegID=' + TaxRegID
uri += '&Page=' + str(Page)
uri += '&PerPage=' + str(PerPage)
uri += '&Order=' + Order
uri += '&InterOPYN=' + InterOPYN
if LateOnly != '':
uri += '&LateOnly=' + LateOnly
if TaxRegIDYN != '':
uri += '&TaxRegIDType=' + TaxRegIDType
if QString is not None:
uri += '&QString=' + QString
if IssueType is not None:
uri += '&IssueType=' + ','.join(IssueType)
return self._httpget(uri, CorpNum, UserID) | python | def search(self, CorpNum, MgtKeyType, DType, SDate, EDate, State, Type, TaxType, LateOnly, TaxRegIDYN, TaxRegIDType,
TaxRegID, Page, PerPage, Order, UserID=None, QString=None, InterOPYN=None, IssueType=None):
""" 목록 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
DType : 일자유형, R-등록일시, W-작성일자, I-발행일시 중 택 1
SDate : 시작일자, 표시형식(yyyyMMdd)
EDate : 종료일자, 표시형식(yyyyMMdd)
State : 상태코드, 2,3번째 자리에 와일드카드(*) 사용가능
Type : 문서형태 배열, N-일반세금계산서, M-수정세금계산서
TaxType : 과세형태 배열, T-과세, N-면세, Z-영세
LateOnly : 지연발행, 공백-전체조회, 0-정상발행조회, 1-지연발행 조회
TaxRegIdYN : 종사업장번호 유무, 공백-전체조회, 0-종사업장번호 없음 1-종사업장번호 있음
TaxRegIDType : 종사업장번호 사업자유형, S-공급자, B-공급받는자, T-수탁자
TaxRegID : 종사업장번호, 콤마(,)로 구분하여 구성 ex)'0001,1234'
Page : 페이지번호
PerPage : 페이지당 목록개수
Order : 정렬방향, D-내림차순, A-오름차순
UserID : 팝빌 회원아이디
QString : 거래처 정보, 거래처 상호 또는 사업자등록번호 기재, 미기재시 전체조회
InterOPYN : 연동문서 여부, 공백-전체조회, 0-일반문서 조회, 1-연동문서 조회
IssueType : 발행형태 배열, N-정발행, R-역발행, T-위수탁
return
조회목록 Object
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if DType == None or DType == '':
raise PopbillException(-99999999, "일자유형이 입력되지 않았습니다.")
if SDate == None or SDate == '':
raise PopbillException(-99999999, "시작일자가 입력되지 않았습니다.")
if EDate == None or EDate == '':
raise PopbillException(-99999999, "종료일자가 입력되지 않았습니다.")
uri = '/Taxinvoice/' + MgtKeyType
uri += '?DType=' + DType
uri += '&SDate=' + SDate
uri += '&EDate=' + EDate
uri += '&State=' + ','.join(State)
uri += '&Type=' + ','.join(Type)
uri += '&TaxType=' + ','.join(TaxType)
uri += '&TaxRegIDType=' + TaxRegIDType
uri += '&TaxRegID=' + TaxRegID
uri += '&Page=' + str(Page)
uri += '&PerPage=' + str(PerPage)
uri += '&Order=' + Order
uri += '&InterOPYN=' + InterOPYN
if LateOnly != '':
uri += '&LateOnly=' + LateOnly
if TaxRegIDYN != '':
uri += '&TaxRegIDType=' + TaxRegIDType
if QString is not None:
uri += '&QString=' + QString
if IssueType is not None:
uri += '&IssueType=' + ','.join(IssueType)
return self._httpget(uri, CorpNum, UserID) | [
"def",
"search",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"DType",
",",
"SDate",
",",
"EDate",
",",
"State",
",",
"Type",
",",
"TaxType",
",",
"LateOnly",
",",
"TaxRegIDYN",
",",
"TaxRegIDType",
",",
"TaxRegID",
",",
"Page",
",",
"PerPage",
",",
"Order",
",",
"UserID",
"=",
"None",
",",
"QString",
"=",
"None",
",",
"InterOPYN",
"=",
"None",
",",
"IssueType",
"=",
"None",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"DType",
"==",
"None",
"or",
"DType",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"일자유형이 입력되지 않았습니다.\")",
"",
"if",
"SDate",
"==",
"None",
"or",
"SDate",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"시작일자가 입력되지 않았습니다.\")",
"",
"if",
"EDate",
"==",
"None",
"or",
"EDate",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"종료일자가 입력되지 않았습니다.\")",
"",
"uri",
"=",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"uri",
"+=",
"'?DType='",
"+",
"DType",
"uri",
"+=",
"'&SDate='",
"+",
"SDate",
"uri",
"+=",
"'&EDate='",
"+",
"EDate",
"uri",
"+=",
"'&State='",
"+",
"','",
".",
"join",
"(",
"State",
")",
"uri",
"+=",
"'&Type='",
"+",
"','",
".",
"join",
"(",
"Type",
")",
"uri",
"+=",
"'&TaxType='",
"+",
"','",
".",
"join",
"(",
"TaxType",
")",
"uri",
"+=",
"'&TaxRegIDType='",
"+",
"TaxRegIDType",
"uri",
"+=",
"'&TaxRegID='",
"+",
"TaxRegID",
"uri",
"+=",
"'&Page='",
"+",
"str",
"(",
"Page",
")",
"uri",
"+=",
"'&PerPage='",
"+",
"str",
"(",
"PerPage",
")",
"uri",
"+=",
"'&Order='",
"+",
"Order",
"uri",
"+=",
"'&InterOPYN='",
"+",
"InterOPYN",
"if",
"LateOnly",
"!=",
"''",
":",
"uri",
"+=",
"'&LateOnly='",
"+",
"LateOnly",
"if",
"TaxRegIDYN",
"!=",
"''",
":",
"uri",
"+=",
"'&TaxRegIDType='",
"+",
"TaxRegIDType",
"if",
"QString",
"is",
"not",
"None",
":",
"uri",
"+=",
"'&QString='",
"+",
"QString",
"if",
"IssueType",
"is",
"not",
"None",
":",
"uri",
"+=",
"'&IssueType='",
"+",
"','",
".",
"join",
"(",
"IssueType",
")",
"return",
"self",
".",
"_httpget",
"(",
"uri",
",",
"CorpNum",
",",
"UserID",
")"
] | 목록 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
DType : 일자유형, R-등록일시, W-작성일자, I-발행일시 중 택 1
SDate : 시작일자, 표시형식(yyyyMMdd)
EDate : 종료일자, 표시형식(yyyyMMdd)
State : 상태코드, 2,3번째 자리에 와일드카드(*) 사용가능
Type : 문서형태 배열, N-일반세금계산서, M-수정세금계산서
TaxType : 과세형태 배열, T-과세, N-면세, Z-영세
LateOnly : 지연발행, 공백-전체조회, 0-정상발행조회, 1-지연발행 조회
TaxRegIdYN : 종사업장번호 유무, 공백-전체조회, 0-종사업장번호 없음 1-종사업장번호 있음
TaxRegIDType : 종사업장번호 사업자유형, S-공급자, B-공급받는자, T-수탁자
TaxRegID : 종사업장번호, 콤마(,)로 구분하여 구성 ex)'0001,1234'
Page : 페이지번호
PerPage : 페이지당 목록개수
Order : 정렬방향, D-내림차순, A-오름차순
UserID : 팝빌 회원아이디
QString : 거래처 정보, 거래처 상호 또는 사업자등록번호 기재, 미기재시 전체조회
InterOPYN : 연동문서 여부, 공백-전체조회, 0-일반문서 조회, 1-연동문서 조회
IssueType : 발행형태 배열, N-정발행, R-역발행, T-위수탁
return
조회목록 Object
raise
PopbillException | [
"목록",
"조회",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"MgtKeyType",
":",
"세금계산서유형",
"SELL",
"-",
"매출",
"BUY",
"-",
"매입",
"TRUSTEE",
"-",
"위수탁",
"DType",
":",
"일자유형",
"R",
"-",
"등록일시",
"W",
"-",
"작성일자",
"I",
"-",
"발행일시",
"중",
"택",
"1",
"SDate",
":",
"시작일자",
"표시형식",
"(",
"yyyyMMdd",
")",
"EDate",
":",
"종료일자",
"표시형식",
"(",
"yyyyMMdd",
")",
"State",
":",
"상태코드",
"2",
"3번째",
"자리에",
"와일드카드",
"(",
"*",
")",
"사용가능",
"Type",
":",
"문서형태",
"배열",
"N",
"-",
"일반세금계산서",
"M",
"-",
"수정세금계산서",
"TaxType",
":",
"과세형태",
"배열",
"T",
"-",
"과세",
"N",
"-",
"면세",
"Z",
"-",
"영세",
"LateOnly",
":",
"지연발행",
"공백",
"-",
"전체조회",
"0",
"-",
"정상발행조회",
"1",
"-",
"지연발행",
"조회",
"TaxRegIdYN",
":",
"종사업장번호",
"유무",
"공백",
"-",
"전체조회",
"0",
"-",
"종사업장번호",
"없음",
"1",
"-",
"종사업장번호",
"있음",
"TaxRegIDType",
":",
"종사업장번호",
"사업자유형",
"S",
"-",
"공급자",
"B",
"-",
"공급받는자",
"T",
"-",
"수탁자",
"TaxRegID",
":",
"종사업장번호",
"콤마",
"(",
")",
"로",
"구분하여",
"구성",
"ex",
")",
"0001",
"1234",
"Page",
":",
"페이지번호",
"PerPage",
":",
"페이지당",
"목록개수",
"Order",
":",
"정렬방향",
"D",
"-",
"내림차순",
"A",
"-",
"오름차순",
"UserID",
":",
"팝빌",
"회원아이디",
"QString",
":",
"거래처",
"정보",
"거래처",
"상호",
"또는",
"사업자등록번호",
"기재",
"미기재시",
"전체조회",
"InterOPYN",
":",
"연동문서",
"여부",
"공백",
"-",
"전체조회",
"0",
"-",
"일반문서",
"조회",
"1",
"-",
"연동문서",
"조회",
"IssueType",
":",
"발행형태",
"배열",
"N",
"-",
"정발행",
"R",
"-",
"역발행",
"T",
"-",
"위수탁",
"return",
"조회목록",
"Object",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L846-L912 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.attachStatement | def attachStatement(self, CorpNum, MgtKeyType, MgtKey, ItemCode, StmtMgtKey, UserID=None):
""" 전자명세서 첨부
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
MgtKey : 세금계산서 문서관리번호
StmtCode : 명세서 종류코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
StmtMgtKey : 전자명세서 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
uri = '/Taxinvoice/' + MgtKeyType + '/' + MgtKey + '/AttachStmt'
postData = self._stringtify({"ItemCode": ItemCode, "MgtKey": StmtMgtKey})
return self._httppost(uri, postData, CorpNum, UserID) | python | def attachStatement(self, CorpNum, MgtKeyType, MgtKey, ItemCode, StmtMgtKey, UserID=None):
""" 전자명세서 첨부
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
MgtKey : 세금계산서 문서관리번호
StmtCode : 명세서 종류코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
StmtMgtKey : 전자명세서 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
uri = '/Taxinvoice/' + MgtKeyType + '/' + MgtKey + '/AttachStmt'
postData = self._stringtify({"ItemCode": ItemCode, "MgtKey": StmtMgtKey})
return self._httppost(uri, postData, CorpNum, UserID) | [
"def",
"attachStatement",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"MgtKey",
",",
"ItemCode",
",",
"StmtMgtKey",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKeyType",
"not",
"in",
"self",
".",
"__MgtKeyTypes",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 형태가 올바르지 않습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"uri",
"=",
"'/Taxinvoice/'",
"+",
"MgtKeyType",
"+",
"'/'",
"+",
"MgtKey",
"+",
"'/AttachStmt'",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"{",
"\"ItemCode\"",
":",
"ItemCode",
",",
"\"MgtKey\"",
":",
"StmtMgtKey",
"}",
")",
"return",
"self",
".",
"_httppost",
"(",
"uri",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")"
] | 전자명세서 첨부
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
MgtKey : 세금계산서 문서관리번호
StmtCode : 명세서 종류코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
StmtMgtKey : 전자명세서 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"전자명세서",
"첨부",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"MgtKeyType",
":",
"세금계산서",
"유형",
"SELL",
"-",
"매출",
"BUY",
"-",
"매입",
"TRUSTEE",
"-",
"위수탁",
"MgtKey",
":",
"세금계산서",
"문서관리번호",
"StmtCode",
":",
"명세서",
"종류코드",
"121",
"-",
"명세서",
"122",
"-",
"청구서",
"123",
"-",
"견적서",
"124",
"-",
"발주서",
"125",
"-",
"입금표",
"126",
"-",
"영수증",
"StmtMgtKey",
":",
"전자명세서",
"문서관리번호",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L914-L938 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.assignMgtKey | def assignMgtKey(self, CorpNum, MgtKeyType, ItemKey, MgtKey, UserID=None):
""" 관리번호할당
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
ItemKey : 아이템키 (Search API로 조회 가능)
MgtKey : 세금계산서에 할당할 파트너 관리 번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType == None or MgtKeyType == '':
raise PopbillException(-99999999, "세금계산서 발행유형이 입력되지 않았습니다.")
if ItemKey == None or ItemKey == '':
raise PopbillException(-99999999, "아이템키가 입력되지 않았습니다.")
if MgtKey == None or MgtKey == '':
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
postDate = "MgtKey=" + MgtKey
return self._httppost('/Taxinvoice/' + ItemKey + '/' + MgtKeyType, postDate, CorpNum, UserID, "",
"application/x-www-form-urlencoded; charset=utf-8") | python | def assignMgtKey(self, CorpNum, MgtKeyType, ItemKey, MgtKey, UserID=None):
""" 관리번호할당
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
ItemKey : 아이템키 (Search API로 조회 가능)
MgtKey : 세금계산서에 할당할 파트너 관리 번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType == None or MgtKeyType == '':
raise PopbillException(-99999999, "세금계산서 발행유형이 입력되지 않았습니다.")
if ItemKey == None or ItemKey == '':
raise PopbillException(-99999999, "아이템키가 입력되지 않았습니다.")
if MgtKey == None or MgtKey == '':
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
postDate = "MgtKey=" + MgtKey
return self._httppost('/Taxinvoice/' + ItemKey + '/' + MgtKeyType, postDate, CorpNum, UserID, "",
"application/x-www-form-urlencoded; charset=utf-8") | [
"def",
"assignMgtKey",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyType",
",",
"ItemKey",
",",
"MgtKey",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKeyType",
"==",
"None",
"or",
"MgtKeyType",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"세금계산서 발행유형이 입력되지 않았습니다.\")",
"",
"if",
"ItemKey",
"==",
"None",
"or",
"ItemKey",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"아이템키가 입력되지 않았습니다.\")",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")",
"",
"postDate",
"=",
"\"MgtKey=\"",
"+",
"MgtKey",
"return",
"self",
".",
"_httppost",
"(",
"'/Taxinvoice/'",
"+",
"ItemKey",
"+",
"'/'",
"+",
"MgtKeyType",
",",
"postDate",
",",
"CorpNum",
",",
"UserID",
",",
"\"\"",
",",
"\"application/x-www-form-urlencoded; charset=utf-8\"",
")"
] | 관리번호할당
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
ItemKey : 아이템키 (Search API로 조회 가능)
MgtKey : 세금계산서에 할당할 파트너 관리 번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"관리번호할당",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"MgtKeyType",
":",
"세금계산서",
"유형",
"SELL",
"-",
"매출",
"BUY",
"-",
"매입",
"TRUSTEE",
"-",
"위수탁",
"ItemKey",
":",
"아이템키",
"(",
"Search",
"API로",
"조회",
"가능",
")",
"MgtKey",
":",
"세금계산서에",
"할당할",
"파트너",
"관리",
"번호",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L972-L996 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.getSealURL | def getSealURL(self, CorpNum, UserID):
""" 팝빌 인감 및 첨부문서 등록 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/?TG=SEAL', CorpNum, UserID)
return result.url | python | def getSealURL(self, CorpNum, UserID):
""" 팝빌 인감 및 첨부문서 등록 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/?TG=SEAL', CorpNum, UserID)
return result.url | [
"def",
"getSealURL",
"(",
"self",
",",
"CorpNum",
",",
"UserID",
")",
":",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/?TG=SEAL'",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"url"
] | 팝빌 인감 및 첨부문서 등록 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException | [
"팝빌",
"인감",
"및",
"첨부문서",
"등록",
"URL",
"args",
"CorpNum",
":",
"회원",
"사업자번호",
"UserID",
":",
"회원",
"팝빌아이디",
"return",
"30초",
"보안",
"토큰을",
"포함한",
"url",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L1043-L1054 |
linkhub-sdk/popbill.py | popbill/taxinvoiceService.py | TaxinvoiceService.getTaxCertURL | def getTaxCertURL(self, CorpNum, UserID):
""" 공인인증서 등록 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/?TG=CERT', CorpNum, UserID)
return result.url | python | def getTaxCertURL(self, CorpNum, UserID):
""" 공인인증서 등록 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/?TG=CERT', CorpNum, UserID)
return result.url | [
"def",
"getTaxCertURL",
"(",
"self",
",",
"CorpNum",
",",
"UserID",
")",
":",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/?TG=CERT'",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"url"
] | 공인인증서 등록 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException | [
"공인인증서",
"등록",
"URL",
"args",
"CorpNum",
":",
"회원",
"사업자번호",
"UserID",
":",
"회원",
"팝빌아이디",
"return",
"30초",
"보안",
"토큰을",
"포함한",
"url",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L1056-L1067 |
ChrisTimperley/Kaskara | python/kaskara/functions.py | FunctionDB.encloses | def encloses(self,
location: FileLocation
) -> Optional[FunctionDesc]:
"""
Returns the function, if any, that encloses a given location.
"""
for func in self.in_file(location.filename):
if location in func.location:
return func
return None | python | def encloses(self,
location: FileLocation
) -> Optional[FunctionDesc]:
"""
Returns the function, if any, that encloses a given location.
"""
for func in self.in_file(location.filename):
if location in func.location:
return func
return None | [
"def",
"encloses",
"(",
"self",
",",
"location",
":",
"FileLocation",
")",
"->",
"Optional",
"[",
"FunctionDesc",
"]",
":",
"for",
"func",
"in",
"self",
".",
"in_file",
"(",
"location",
".",
"filename",
")",
":",
"if",
"location",
"in",
"func",
".",
"location",
":",
"return",
"func",
"return",
"None"
] | Returns the function, if any, that encloses a given location. | [
"Returns",
"the",
"function",
"if",
"any",
"that",
"encloses",
"a",
"given",
"location",
"."
] | train | https://github.com/ChrisTimperley/Kaskara/blob/3d182d95b2938508e5990eddd30321be15e2f2ef/python/kaskara/functions.py#L97-L106 |
ChrisTimperley/Kaskara | python/kaskara/functions.py | FunctionDB.in_file | def in_file(self, filename: str) -> Iterator[FunctionDesc]:
"""
Returns an iterator over all of the functions definitions that are
contained within a given file.
"""
yield from self.__filename_to_functions.get(filename, []) | python | def in_file(self, filename: str) -> Iterator[FunctionDesc]:
"""
Returns an iterator over all of the functions definitions that are
contained within a given file.
"""
yield from self.__filename_to_functions.get(filename, []) | [
"def",
"in_file",
"(",
"self",
",",
"filename",
":",
"str",
")",
"->",
"Iterator",
"[",
"FunctionDesc",
"]",
":",
"yield",
"from",
"self",
".",
"__filename_to_functions",
".",
"get",
"(",
"filename",
",",
"[",
"]",
")"
] | Returns an iterator over all of the functions definitions that are
contained within a given file. | [
"Returns",
"an",
"iterator",
"over",
"all",
"of",
"the",
"functions",
"definitions",
"that",
"are",
"contained",
"within",
"a",
"given",
"file",
"."
] | train | https://github.com/ChrisTimperley/Kaskara/blob/3d182d95b2938508e5990eddd30321be15e2f2ef/python/kaskara/functions.py#L108-L113 |
MSchnei/pyprf_feature | pyprf_feature/analysis/model_creation_opt.py | model_creation_opt | def model_creation_opt(dicCnfg, aryMdlParams, strPathHrf=None, varRat=None,
lgcPrint=True):
"""
Create or load pRF model time courses.
Parameters
----------
dicCnfg : dict
Dictionary containing config parameters.
aryMdlParams : numpy arrays
x, y and sigma parameters.
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
`aryPrfTc[x-position, y-position, SD, volume]`.
"""
# *************************************************************************
# *** Load parameters from config file
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# *************************************************************************
if cfg.lgcCrteMdl:
# *********************************************************************
# *** Load spatial condition information
arySptExpInf = np.load(cfg.strSptExpInf)
# Here we assume scientific convention and orientation of images where
# the origin should fall in the lower left corner, the x-axis occupies
# the width and the y-axis occupies the height dimension of the screen.
# We also assume that the first dimension that the user provides
# indexes x and the second indexes the y-axis. Since python is column
# major (i.e. first indexes columns, only then rows), we need to rotate
# arySptExpInf by 90 degrees rightward. This will insure that with the
# 0th axis we index the scientific x-axis and higher values move us to
# the right on that x-axis. It will also ensure that the 1st
# python axis indexes the scientific y-axis and higher values will
# move us up.
arySptExpInf = np.rot90(arySptExpInf, k=3)
# *********************************************************************
# *********************************************************************
# *** Load temporal condition information
# load temporal information about presented stimuli
aryTmpExpInf = np.load(cfg.strTmpExpInf)
# add fourth column to make it appropriate for pyprf_feature
if aryTmpExpInf.shape[-1] == 3:
vecNewCol = np.greater(aryTmpExpInf[:, 0], 0).astype(np.float16)
aryTmpExpInf = np.concatenate(
(aryTmpExpInf, np.expand_dims(vecNewCol, axis=1)), axis=1)
# *********************************************************************
# *********************************************************************
# If desired by user, also create model parameters for supp surround
if varRat is not None:
aryMdlParamsSur = np.copy(aryMdlParams)
aryMdlParamsSur[:, 2] = aryMdlParamsSur[:, 2] * varRat
# *********************************************************************
# *********************************************************************
# *** Create 2D Gauss model responses to spatial conditions.
aryMdlRsp = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
aryMdlParams, cfg.varPar, lgcPrint=lgcPrint)
# If desired by user, also create model responses for supp surround
if varRat is not None:
aryMdlRspSur = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
aryMdlParamsSur, cfg.varPar,
lgcPrint=lgcPrint)
del(arySptExpInf)
# *********************************************************************
# *********************************************************************
# *** Create prf time course models
# Check whether path to npy file with hrf parameters was provided
if strPathHrf is not None:
if lgcPrint:
print('---------Load custom hrf parameters')
aryCstPrm = np.load(strPathHrf)
dctPrm = {}
dctPrm['peak_delay'] = aryCstPrm[0]
dctPrm['under_delay'] = aryCstPrm[1]
dctPrm['peak_disp'] = aryCstPrm[2]
dctPrm['under_disp'] = aryCstPrm[3]
dctPrm['p_u_ratio'] = aryCstPrm[4]
# If not, set dctPrm to None, which will result in default hrf params
else:
if lgcPrint:
print('---------Use default hrf parameters')
dctPrm = None
aryPrfTc = crt_prf_ftr_tc(aryMdlRsp, aryTmpExpInf, cfg.varNumVol,
cfg.varTr, cfg.varTmpOvsmpl,
cfg.switchHrfSet, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
cfg.varPar, dctPrm=dctPrm, lgcPrint=lgcPrint)
# If desired by user, create prf time course models for supp surround
if varRat is not None:
if lgcPrint:
print('---------Add suppressive surround')
aryPrfTcSur = crt_prf_ftr_tc(aryMdlRspSur, aryTmpExpInf,
cfg.varNumVol, cfg.varTr,
cfg.varTmpOvsmpl, cfg.switchHrfSet,
(int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
cfg.varPar, dctPrm=dctPrm,
lgcPrint=lgcPrint)
# Concatenate aryPrfTc and aryPrfTcSur
aryPrfTc = np.concatenate((aryPrfTc, aryPrfTcSur), axis=1)
# *********************************************************************
return aryPrfTc | python | def model_creation_opt(dicCnfg, aryMdlParams, strPathHrf=None, varRat=None,
lgcPrint=True):
"""
Create or load pRF model time courses.
Parameters
----------
dicCnfg : dict
Dictionary containing config parameters.
aryMdlParams : numpy arrays
x, y and sigma parameters.
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
`aryPrfTc[x-position, y-position, SD, volume]`.
"""
# *************************************************************************
# *** Load parameters from config file
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# *************************************************************************
if cfg.lgcCrteMdl:
# *********************************************************************
# *** Load spatial condition information
arySptExpInf = np.load(cfg.strSptExpInf)
# Here we assume scientific convention and orientation of images where
# the origin should fall in the lower left corner, the x-axis occupies
# the width and the y-axis occupies the height dimension of the screen.
# We also assume that the first dimension that the user provides
# indexes x and the second indexes the y-axis. Since python is column
# major (i.e. first indexes columns, only then rows), we need to rotate
# arySptExpInf by 90 degrees rightward. This will insure that with the
# 0th axis we index the scientific x-axis and higher values move us to
# the right on that x-axis. It will also ensure that the 1st
# python axis indexes the scientific y-axis and higher values will
# move us up.
arySptExpInf = np.rot90(arySptExpInf, k=3)
# *********************************************************************
# *********************************************************************
# *** Load temporal condition information
# load temporal information about presented stimuli
aryTmpExpInf = np.load(cfg.strTmpExpInf)
# add fourth column to make it appropriate for pyprf_feature
if aryTmpExpInf.shape[-1] == 3:
vecNewCol = np.greater(aryTmpExpInf[:, 0], 0).astype(np.float16)
aryTmpExpInf = np.concatenate(
(aryTmpExpInf, np.expand_dims(vecNewCol, axis=1)), axis=1)
# *********************************************************************
# *********************************************************************
# If desired by user, also create model parameters for supp surround
if varRat is not None:
aryMdlParamsSur = np.copy(aryMdlParams)
aryMdlParamsSur[:, 2] = aryMdlParamsSur[:, 2] * varRat
# *********************************************************************
# *********************************************************************
# *** Create 2D Gauss model responses to spatial conditions.
aryMdlRsp = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
aryMdlParams, cfg.varPar, lgcPrint=lgcPrint)
# If desired by user, also create model responses for supp surround
if varRat is not None:
aryMdlRspSur = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
aryMdlParamsSur, cfg.varPar,
lgcPrint=lgcPrint)
del(arySptExpInf)
# *********************************************************************
# *********************************************************************
# *** Create prf time course models
# Check whether path to npy file with hrf parameters was provided
if strPathHrf is not None:
if lgcPrint:
print('---------Load custom hrf parameters')
aryCstPrm = np.load(strPathHrf)
dctPrm = {}
dctPrm['peak_delay'] = aryCstPrm[0]
dctPrm['under_delay'] = aryCstPrm[1]
dctPrm['peak_disp'] = aryCstPrm[2]
dctPrm['under_disp'] = aryCstPrm[3]
dctPrm['p_u_ratio'] = aryCstPrm[4]
# If not, set dctPrm to None, which will result in default hrf params
else:
if lgcPrint:
print('---------Use default hrf parameters')
dctPrm = None
aryPrfTc = crt_prf_ftr_tc(aryMdlRsp, aryTmpExpInf, cfg.varNumVol,
cfg.varTr, cfg.varTmpOvsmpl,
cfg.switchHrfSet, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
cfg.varPar, dctPrm=dctPrm, lgcPrint=lgcPrint)
# If desired by user, create prf time course models for supp surround
if varRat is not None:
if lgcPrint:
print('---------Add suppressive surround')
aryPrfTcSur = crt_prf_ftr_tc(aryMdlRspSur, aryTmpExpInf,
cfg.varNumVol, cfg.varTr,
cfg.varTmpOvsmpl, cfg.switchHrfSet,
(int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
cfg.varPar, dctPrm=dctPrm,
lgcPrint=lgcPrint)
# Concatenate aryPrfTc and aryPrfTcSur
aryPrfTc = np.concatenate((aryPrfTc, aryPrfTcSur), axis=1)
# *********************************************************************
return aryPrfTc | [
"def",
"model_creation_opt",
"(",
"dicCnfg",
",",
"aryMdlParams",
",",
"strPathHrf",
"=",
"None",
",",
"varRat",
"=",
"None",
",",
"lgcPrint",
"=",
"True",
")",
":",
"# *************************************************************************",
"# *** Load parameters from config file",
"# Load config parameters from dictionary into namespace:",
"cfg",
"=",
"cls_set_config",
"(",
"dicCnfg",
")",
"# *************************************************************************",
"if",
"cfg",
".",
"lgcCrteMdl",
":",
"# *********************************************************************",
"# *** Load spatial condition information",
"arySptExpInf",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strSptExpInf",
")",
"# Here we assume scientific convention and orientation of images where",
"# the origin should fall in the lower left corner, the x-axis occupies",
"# the width and the y-axis occupies the height dimension of the screen.",
"# We also assume that the first dimension that the user provides",
"# indexes x and the second indexes the y-axis. Since python is column",
"# major (i.e. first indexes columns, only then rows), we need to rotate",
"# arySptExpInf by 90 degrees rightward. This will insure that with the",
"# 0th axis we index the scientific x-axis and higher values move us to",
"# the right on that x-axis. It will also ensure that the 1st",
"# python axis indexes the scientific y-axis and higher values will",
"# move us up.",
"arySptExpInf",
"=",
"np",
".",
"rot90",
"(",
"arySptExpInf",
",",
"k",
"=",
"3",
")",
"# *********************************************************************",
"# *********************************************************************",
"# *** Load temporal condition information",
"# load temporal information about presented stimuli",
"aryTmpExpInf",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strTmpExpInf",
")",
"# add fourth column to make it appropriate for pyprf_feature",
"if",
"aryTmpExpInf",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"3",
":",
"vecNewCol",
"=",
"np",
".",
"greater",
"(",
"aryTmpExpInf",
"[",
":",
",",
"0",
"]",
",",
"0",
")",
".",
"astype",
"(",
"np",
".",
"float16",
")",
"aryTmpExpInf",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryTmpExpInf",
",",
"np",
".",
"expand_dims",
"(",
"vecNewCol",
",",
"axis",
"=",
"1",
")",
")",
",",
"axis",
"=",
"1",
")",
"# *********************************************************************",
"# *********************************************************************",
"# If desired by user, also create model parameters for supp surround",
"if",
"varRat",
"is",
"not",
"None",
":",
"aryMdlParamsSur",
"=",
"np",
".",
"copy",
"(",
"aryMdlParams",
")",
"aryMdlParamsSur",
"[",
":",
",",
"2",
"]",
"=",
"aryMdlParamsSur",
"[",
":",
",",
"2",
"]",
"*",
"varRat",
"# *********************************************************************",
"# *********************************************************************",
"# *** Create 2D Gauss model responses to spatial conditions.",
"aryMdlRsp",
"=",
"crt_mdl_rsp",
"(",
"arySptExpInf",
",",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"aryMdlParams",
",",
"cfg",
".",
"varPar",
",",
"lgcPrint",
"=",
"lgcPrint",
")",
"# If desired by user, also create model responses for supp surround",
"if",
"varRat",
"is",
"not",
"None",
":",
"aryMdlRspSur",
"=",
"crt_mdl_rsp",
"(",
"arySptExpInf",
",",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"aryMdlParamsSur",
",",
"cfg",
".",
"varPar",
",",
"lgcPrint",
"=",
"lgcPrint",
")",
"del",
"(",
"arySptExpInf",
")",
"# *********************************************************************",
"# *********************************************************************",
"# *** Create prf time course models",
"# Check whether path to npy file with hrf parameters was provided",
"if",
"strPathHrf",
"is",
"not",
"None",
":",
"if",
"lgcPrint",
":",
"print",
"(",
"'---------Load custom hrf parameters'",
")",
"aryCstPrm",
"=",
"np",
".",
"load",
"(",
"strPathHrf",
")",
"dctPrm",
"=",
"{",
"}",
"dctPrm",
"[",
"'peak_delay'",
"]",
"=",
"aryCstPrm",
"[",
"0",
"]",
"dctPrm",
"[",
"'under_delay'",
"]",
"=",
"aryCstPrm",
"[",
"1",
"]",
"dctPrm",
"[",
"'peak_disp'",
"]",
"=",
"aryCstPrm",
"[",
"2",
"]",
"dctPrm",
"[",
"'under_disp'",
"]",
"=",
"aryCstPrm",
"[",
"3",
"]",
"dctPrm",
"[",
"'p_u_ratio'",
"]",
"=",
"aryCstPrm",
"[",
"4",
"]",
"# If not, set dctPrm to None, which will result in default hrf params",
"else",
":",
"if",
"lgcPrint",
":",
"print",
"(",
"'---------Use default hrf parameters'",
")",
"dctPrm",
"=",
"None",
"aryPrfTc",
"=",
"crt_prf_ftr_tc",
"(",
"aryMdlRsp",
",",
"aryTmpExpInf",
",",
"cfg",
".",
"varNumVol",
",",
"cfg",
".",
"varTr",
",",
"cfg",
".",
"varTmpOvsmpl",
",",
"cfg",
".",
"switchHrfSet",
",",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"cfg",
".",
"varPar",
",",
"dctPrm",
"=",
"dctPrm",
",",
"lgcPrint",
"=",
"lgcPrint",
")",
"# If desired by user, create prf time course models for supp surround",
"if",
"varRat",
"is",
"not",
"None",
":",
"if",
"lgcPrint",
":",
"print",
"(",
"'---------Add suppressive surround'",
")",
"aryPrfTcSur",
"=",
"crt_prf_ftr_tc",
"(",
"aryMdlRspSur",
",",
"aryTmpExpInf",
",",
"cfg",
".",
"varNumVol",
",",
"cfg",
".",
"varTr",
",",
"cfg",
".",
"varTmpOvsmpl",
",",
"cfg",
".",
"switchHrfSet",
",",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"cfg",
".",
"varPar",
",",
"dctPrm",
"=",
"dctPrm",
",",
"lgcPrint",
"=",
"lgcPrint",
")",
"# Concatenate aryPrfTc and aryPrfTcSur",
"aryPrfTc",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryPrfTc",
",",
"aryPrfTcSur",
")",
",",
"axis",
"=",
"1",
")",
"# *********************************************************************",
"return",
"aryPrfTc"
] | Create or load pRF model time courses.
Parameters
----------
dicCnfg : dict
Dictionary containing config parameters.
aryMdlParams : numpy arrays
x, y and sigma parameters.
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
`aryPrfTc[x-position, y-position, SD, volume]`. | [
"Create",
"or",
"load",
"pRF",
"model",
"time",
"courses",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/model_creation_opt.py#L27-L163 |
askedrelic/journal | journal/main.py | parse_config | def parse_config(args):
"""
Try to load config, to load other journal locations
Otherwise, return default location
Returns journal location
"""
# Try user config or return default location early
config_path = path.expanduser(args.config_file)
if not path.exists(config_path):
# Complain if they provided non-existant config
if args.config_file != DEFAULT_JOURNAL_RC:
print("journal: error: config file '" + args.config_file + "' not found")
sys.exit()
else:
# If no config file, use default journal location
return DEFAULT_JOURNAL
# If we get here, assume valid config file
config = ConfigParser.SafeConfigParser({
'journal':{'default':'__journal'},
'__journal':{'location':DEFAULT_JOURNAL}
})
config.read(config_path)
journal_location = config.get(config.get('journal', 'default'), 'location');
if args.journal:
journal_location = config.get(args.journal, 'location');
return journal_location | python | def parse_config(args):
"""
Try to load config, to load other journal locations
Otherwise, return default location
Returns journal location
"""
# Try user config or return default location early
config_path = path.expanduser(args.config_file)
if not path.exists(config_path):
# Complain if they provided non-existant config
if args.config_file != DEFAULT_JOURNAL_RC:
print("journal: error: config file '" + args.config_file + "' not found")
sys.exit()
else:
# If no config file, use default journal location
return DEFAULT_JOURNAL
# If we get here, assume valid config file
config = ConfigParser.SafeConfigParser({
'journal':{'default':'__journal'},
'__journal':{'location':DEFAULT_JOURNAL}
})
config.read(config_path)
journal_location = config.get(config.get('journal', 'default'), 'location');
if args.journal:
journal_location = config.get(args.journal, 'location');
return journal_location | [
"def",
"parse_config",
"(",
"args",
")",
":",
"# Try user config or return default location early",
"config_path",
"=",
"path",
".",
"expanduser",
"(",
"args",
".",
"config_file",
")",
"if",
"not",
"path",
".",
"exists",
"(",
"config_path",
")",
":",
"# Complain if they provided non-existant config",
"if",
"args",
".",
"config_file",
"!=",
"DEFAULT_JOURNAL_RC",
":",
"print",
"(",
"\"journal: error: config file '\"",
"+",
"args",
".",
"config_file",
"+",
"\"' not found\"",
")",
"sys",
".",
"exit",
"(",
")",
"else",
":",
"# If no config file, use default journal location",
"return",
"DEFAULT_JOURNAL",
"# If we get here, assume valid config file",
"config",
"=",
"ConfigParser",
".",
"SafeConfigParser",
"(",
"{",
"'journal'",
":",
"{",
"'default'",
":",
"'__journal'",
"}",
",",
"'__journal'",
":",
"{",
"'location'",
":",
"DEFAULT_JOURNAL",
"}",
"}",
")",
"config",
".",
"read",
"(",
"config_path",
")",
"journal_location",
"=",
"config",
".",
"get",
"(",
"config",
".",
"get",
"(",
"'journal'",
",",
"'default'",
")",
",",
"'location'",
")",
"if",
"args",
".",
"journal",
":",
"journal_location",
"=",
"config",
".",
"get",
"(",
"args",
".",
"journal",
",",
"'location'",
")",
"return",
"journal_location"
] | Try to load config, to load other journal locations
Otherwise, return default location
Returns journal location | [
"Try",
"to",
"load",
"config",
"to",
"load",
"other",
"journal",
"locations",
"Otherwise",
"return",
"default",
"location"
] | train | https://github.com/askedrelic/journal/blob/848b8ec67ed124ec112926211ebeccbc8d11f2b0/journal/main.py#L72-L100 |
askedrelic/journal | journal/main.py | record_entries | def record_entries(journal_location, entries):
"""
args
entry - list of entries to record
"""
check_journal_dest(journal_location)
current_date = datetime.datetime.today()
date_header = current_date.strftime("%a %H:%M:%S %Y-%m-%d") + "\n"
with open(build_journal_path(journal_location, current_date), "a") as date_file:
entry_output = date_header
# old style
# for entry in entries:
# entry_output += "-" + entry + "\n"
# new style
entry_output += '-' + ' '.join(entries) + "\n"
entry_output += "\n"
date_file.write(entry_output) | python | def record_entries(journal_location, entries):
"""
args
entry - list of entries to record
"""
check_journal_dest(journal_location)
current_date = datetime.datetime.today()
date_header = current_date.strftime("%a %H:%M:%S %Y-%m-%d") + "\n"
with open(build_journal_path(journal_location, current_date), "a") as date_file:
entry_output = date_header
# old style
# for entry in entries:
# entry_output += "-" + entry + "\n"
# new style
entry_output += '-' + ' '.join(entries) + "\n"
entry_output += "\n"
date_file.write(entry_output) | [
"def",
"record_entries",
"(",
"journal_location",
",",
"entries",
")",
":",
"check_journal_dest",
"(",
"journal_location",
")",
"current_date",
"=",
"datetime",
".",
"datetime",
".",
"today",
"(",
")",
"date_header",
"=",
"current_date",
".",
"strftime",
"(",
"\"%a %H:%M:%S %Y-%m-%d\"",
")",
"+",
"\"\\n\"",
"with",
"open",
"(",
"build_journal_path",
"(",
"journal_location",
",",
"current_date",
")",
",",
"\"a\"",
")",
"as",
"date_file",
":",
"entry_output",
"=",
"date_header",
"# old style",
"# for entry in entries:",
"# entry_output += \"-\" + entry + \"\\n\"",
"# new style",
"entry_output",
"+=",
"'-'",
"+",
"' '",
".",
"join",
"(",
"entries",
")",
"+",
"\"\\n\"",
"entry_output",
"+=",
"\"\\n\"",
"date_file",
".",
"write",
"(",
"entry_output",
")"
] | args
entry - list of entries to record | [
"args",
"entry",
"-",
"list",
"of",
"entries",
"to",
"record"
] | train | https://github.com/askedrelic/journal/blob/848b8ec67ed124ec112926211ebeccbc8d11f2b0/journal/main.py#L111-L128 |
askedrelic/journal | journal/main.py | get_entry | def get_entry(journal_location, date):
"""
args
date - date object
returns entry text or None if entry doesn't exist
"""
if not isinstance(date, datetime.date):
return None
try:
with open(build_journal_path(journal_location, date), "r") as entry_file:
return entry_file.read()
except IOError:
return None | python | def get_entry(journal_location, date):
"""
args
date - date object
returns entry text or None if entry doesn't exist
"""
if not isinstance(date, datetime.date):
return None
try:
with open(build_journal_path(journal_location, date), "r") as entry_file:
return entry_file.read()
except IOError:
return None | [
"def",
"get_entry",
"(",
"journal_location",
",",
"date",
")",
":",
"if",
"not",
"isinstance",
"(",
"date",
",",
"datetime",
".",
"date",
")",
":",
"return",
"None",
"try",
":",
"with",
"open",
"(",
"build_journal_path",
"(",
"journal_location",
",",
"date",
")",
",",
"\"r\"",
")",
"as",
"entry_file",
":",
"return",
"entry_file",
".",
"read",
"(",
")",
"except",
"IOError",
":",
"return",
"None"
] | args
date - date object
returns entry text or None if entry doesn't exist | [
"args",
"date",
"-",
"date",
"object",
"returns",
"entry",
"text",
"or",
"None",
"if",
"entry",
"doesn",
"t",
"exist"
] | train | https://github.com/askedrelic/journal/blob/848b8ec67ed124ec112926211ebeccbc8d11f2b0/journal/main.py#L136-L148 |
pmacosta/pcsv | pcsv/replace.py | replace | def replace(
fname1,
fname2,
dfilter1,
dfilter2,
has_header1=True,
has_header2=True,
frow1=0,
frow2=0,
ofname=None,
ocols=None,
):
r"""
Replace data in one file with data from another file.
:param fname1: Name of the input comma-separated values file, the file
that contains the columns to be replaced
:type fname1: FileNameExists_
:param fname2: Name of the replacement comma-separated values file, the
file that contains the replacement data
:type fname2: FileNameExists_
:param dfilter1: Row and/or column filter for the input file
:type dfilter1: :ref:`CsvDataFilter`
:param dfilter2: Row and/or column filter for the replacement file
:type dfilter2: :ref:`CsvDataFilter`
:param has_header1: Flag that indicates whether the input comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header1: boolean
:param has_header2: Flag that indicates whether the replacement
comma-separated values file has column headers in its
first line (True) or not (False)
:type has_header2: boolean
:param frow1: Input comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow1: NonNegativeInteger_
:param frow2: Replacement comma-separated values file first data row
(starting from 1). If 0 the row where data starts is
auto-detected as the first row that has a number (integer of
float) in at least one of its columns
:type frow2: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the input file data but with some columns
replaced with data from the replacement file. If None the
input file is replaced "in place"
:type ofname: FileName_
:param ocols: Names of the replaced columns in the output comma-separated
values file. If None the column names in the input file are
used if **has_header1** is True, otherwise no header is used
:type ocols: list or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.replace.replace
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`dfilter1\` is not valid)
* RuntimeError (Argument \`dfilter2\` is not valid)
* RuntimeError (Argument \`fname1\` is not valid)
* RuntimeError (Argument \`fname2\` is not valid)
* RuntimeError (Argument \`frow1\` is not valid)
* RuntimeError (Argument \`frow2\` is not valid)
* RuntimeError (Argument \`ocols\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* RuntimeError (Number of input and output columns are different)
* RuntimeError (Number of input and replacement columns are
different)
* ValueError (Column *[column_identifier]* not found)
* ValueError (Number of rows mismatch between input and replacement
data)
.. [[[end]]]
"""
# pylint: disable=R0913,R0914
irmm_ex = pexdoc.exh.addex(
RuntimeError, "Number of input and replacement columns are different"
)
iomm_ex = pexdoc.exh.addex(
RuntimeError, "Number of input and output columns are different"
)
# Read and validate input data
iobj = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1)
# Read and validate replacement data
robj = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2)
# Assign output data structure
ofname = fname1 if ofname is None else ofname
icfilter = iobj.header() if iobj.cfilter is None else iobj.cfilter
rcfilter = robj.header() if robj.cfilter is None else robj.cfilter
ocols = icfilter if ocols is None else ocols
# Miscellaneous data validation
irmm_ex(len(icfilter) != len(rcfilter))
iomm_ex(len(icfilter) != len(ocols))
# Replace data
iobj.replace(rdata=robj.data(filtered=True), filtered=True)
iheader_upper = [
item.upper() if isinstance(item, str) else item for item in iobj.header()
]
icfilter_index = [
iheader_upper.index(item.upper() if isinstance(item, str) else item)
for item in icfilter
]
# Create new header
orow = []
if has_header1:
for col_num, idata in enumerate(iobj.header()):
orow.append(
ocols[icfilter_index.index(col_num)]
if col_num in icfilter_index
else idata
)
# Write (new) file
iobj.write(fname=ofname, header=orow if orow else False, append=False) | python | def replace(
fname1,
fname2,
dfilter1,
dfilter2,
has_header1=True,
has_header2=True,
frow1=0,
frow2=0,
ofname=None,
ocols=None,
):
r"""
Replace data in one file with data from another file.
:param fname1: Name of the input comma-separated values file, the file
that contains the columns to be replaced
:type fname1: FileNameExists_
:param fname2: Name of the replacement comma-separated values file, the
file that contains the replacement data
:type fname2: FileNameExists_
:param dfilter1: Row and/or column filter for the input file
:type dfilter1: :ref:`CsvDataFilter`
:param dfilter2: Row and/or column filter for the replacement file
:type dfilter2: :ref:`CsvDataFilter`
:param has_header1: Flag that indicates whether the input comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header1: boolean
:param has_header2: Flag that indicates whether the replacement
comma-separated values file has column headers in its
first line (True) or not (False)
:type has_header2: boolean
:param frow1: Input comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow1: NonNegativeInteger_
:param frow2: Replacement comma-separated values file first data row
(starting from 1). If 0 the row where data starts is
auto-detected as the first row that has a number (integer of
float) in at least one of its columns
:type frow2: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the input file data but with some columns
replaced with data from the replacement file. If None the
input file is replaced "in place"
:type ofname: FileName_
:param ocols: Names of the replaced columns in the output comma-separated
values file. If None the column names in the input file are
used if **has_header1** is True, otherwise no header is used
:type ocols: list or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.replace.replace
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`dfilter1\` is not valid)
* RuntimeError (Argument \`dfilter2\` is not valid)
* RuntimeError (Argument \`fname1\` is not valid)
* RuntimeError (Argument \`fname2\` is not valid)
* RuntimeError (Argument \`frow1\` is not valid)
* RuntimeError (Argument \`frow2\` is not valid)
* RuntimeError (Argument \`ocols\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* RuntimeError (Number of input and output columns are different)
* RuntimeError (Number of input and replacement columns are
different)
* ValueError (Column *[column_identifier]* not found)
* ValueError (Number of rows mismatch between input and replacement
data)
.. [[[end]]]
"""
# pylint: disable=R0913,R0914
irmm_ex = pexdoc.exh.addex(
RuntimeError, "Number of input and replacement columns are different"
)
iomm_ex = pexdoc.exh.addex(
RuntimeError, "Number of input and output columns are different"
)
# Read and validate input data
iobj = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1)
# Read and validate replacement data
robj = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2)
# Assign output data structure
ofname = fname1 if ofname is None else ofname
icfilter = iobj.header() if iobj.cfilter is None else iobj.cfilter
rcfilter = robj.header() if robj.cfilter is None else robj.cfilter
ocols = icfilter if ocols is None else ocols
# Miscellaneous data validation
irmm_ex(len(icfilter) != len(rcfilter))
iomm_ex(len(icfilter) != len(ocols))
# Replace data
iobj.replace(rdata=robj.data(filtered=True), filtered=True)
iheader_upper = [
item.upper() if isinstance(item, str) else item for item in iobj.header()
]
icfilter_index = [
iheader_upper.index(item.upper() if isinstance(item, str) else item)
for item in icfilter
]
# Create new header
orow = []
if has_header1:
for col_num, idata in enumerate(iobj.header()):
orow.append(
ocols[icfilter_index.index(col_num)]
if col_num in icfilter_index
else idata
)
# Write (new) file
iobj.write(fname=ofname, header=orow if orow else False, append=False) | [
"def",
"replace",
"(",
"fname1",
",",
"fname2",
",",
"dfilter1",
",",
"dfilter2",
",",
"has_header1",
"=",
"True",
",",
"has_header2",
"=",
"True",
",",
"frow1",
"=",
"0",
",",
"frow2",
"=",
"0",
",",
"ofname",
"=",
"None",
",",
"ocols",
"=",
"None",
",",
")",
":",
"# pylint: disable=R0913,R0914",
"irmm_ex",
"=",
"pexdoc",
".",
"exh",
".",
"addex",
"(",
"RuntimeError",
",",
"\"Number of input and replacement columns are different\"",
")",
"iomm_ex",
"=",
"pexdoc",
".",
"exh",
".",
"addex",
"(",
"RuntimeError",
",",
"\"Number of input and output columns are different\"",
")",
"# Read and validate input data",
"iobj",
"=",
"CsvFile",
"(",
"fname",
"=",
"fname1",
",",
"dfilter",
"=",
"dfilter1",
",",
"has_header",
"=",
"has_header1",
",",
"frow",
"=",
"frow1",
")",
"# Read and validate replacement data",
"robj",
"=",
"CsvFile",
"(",
"fname",
"=",
"fname2",
",",
"dfilter",
"=",
"dfilter2",
",",
"has_header",
"=",
"has_header2",
",",
"frow",
"=",
"frow2",
")",
"# Assign output data structure",
"ofname",
"=",
"fname1",
"if",
"ofname",
"is",
"None",
"else",
"ofname",
"icfilter",
"=",
"iobj",
".",
"header",
"(",
")",
"if",
"iobj",
".",
"cfilter",
"is",
"None",
"else",
"iobj",
".",
"cfilter",
"rcfilter",
"=",
"robj",
".",
"header",
"(",
")",
"if",
"robj",
".",
"cfilter",
"is",
"None",
"else",
"robj",
".",
"cfilter",
"ocols",
"=",
"icfilter",
"if",
"ocols",
"is",
"None",
"else",
"ocols",
"# Miscellaneous data validation",
"irmm_ex",
"(",
"len",
"(",
"icfilter",
")",
"!=",
"len",
"(",
"rcfilter",
")",
")",
"iomm_ex",
"(",
"len",
"(",
"icfilter",
")",
"!=",
"len",
"(",
"ocols",
")",
")",
"# Replace data",
"iobj",
".",
"replace",
"(",
"rdata",
"=",
"robj",
".",
"data",
"(",
"filtered",
"=",
"True",
")",
",",
"filtered",
"=",
"True",
")",
"iheader_upper",
"=",
"[",
"item",
".",
"upper",
"(",
")",
"if",
"isinstance",
"(",
"item",
",",
"str",
")",
"else",
"item",
"for",
"item",
"in",
"iobj",
".",
"header",
"(",
")",
"]",
"icfilter_index",
"=",
"[",
"iheader_upper",
".",
"index",
"(",
"item",
".",
"upper",
"(",
")",
"if",
"isinstance",
"(",
"item",
",",
"str",
")",
"else",
"item",
")",
"for",
"item",
"in",
"icfilter",
"]",
"# Create new header",
"orow",
"=",
"[",
"]",
"if",
"has_header1",
":",
"for",
"col_num",
",",
"idata",
"in",
"enumerate",
"(",
"iobj",
".",
"header",
"(",
")",
")",
":",
"orow",
".",
"append",
"(",
"ocols",
"[",
"icfilter_index",
".",
"index",
"(",
"col_num",
")",
"]",
"if",
"col_num",
"in",
"icfilter_index",
"else",
"idata",
")",
"# Write (new) file",
"iobj",
".",
"write",
"(",
"fname",
"=",
"ofname",
",",
"header",
"=",
"orow",
"if",
"orow",
"else",
"False",
",",
"append",
"=",
"False",
")"
] | r"""
Replace data in one file with data from another file.
:param fname1: Name of the input comma-separated values file, the file
that contains the columns to be replaced
:type fname1: FileNameExists_
:param fname2: Name of the replacement comma-separated values file, the
file that contains the replacement data
:type fname2: FileNameExists_
:param dfilter1: Row and/or column filter for the input file
:type dfilter1: :ref:`CsvDataFilter`
:param dfilter2: Row and/or column filter for the replacement file
:type dfilter2: :ref:`CsvDataFilter`
:param has_header1: Flag that indicates whether the input comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header1: boolean
:param has_header2: Flag that indicates whether the replacement
comma-separated values file has column headers in its
first line (True) or not (False)
:type has_header2: boolean
:param frow1: Input comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow1: NonNegativeInteger_
:param frow2: Replacement comma-separated values file first data row
(starting from 1). If 0 the row where data starts is
auto-detected as the first row that has a number (integer of
float) in at least one of its columns
:type frow2: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the input file data but with some columns
replaced with data from the replacement file. If None the
input file is replaced "in place"
:type ofname: FileName_
:param ocols: Names of the replaced columns in the output comma-separated
values file. If None the column names in the input file are
used if **has_header1** is True, otherwise no header is used
:type ocols: list or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.replace.replace
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`dfilter1\` is not valid)
* RuntimeError (Argument \`dfilter2\` is not valid)
* RuntimeError (Argument \`fname1\` is not valid)
* RuntimeError (Argument \`fname2\` is not valid)
* RuntimeError (Argument \`frow1\` is not valid)
* RuntimeError (Argument \`frow2\` is not valid)
* RuntimeError (Argument \`ocols\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* RuntimeError (Number of input and output columns are different)
* RuntimeError (Number of input and replacement columns are
different)
* ValueError (Column *[column_identifier]* not found)
* ValueError (Number of rows mismatch between input and replacement
data)
.. [[[end]]] | [
"r",
"Replace",
"data",
"in",
"one",
"file",
"with",
"data",
"from",
"another",
"file",
"."
] | train | https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/replace.py#L43-L186 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_hrf.py | spmt | def spmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1,
p_u_ratio=6):
"""Normalized SPM HRF function from sum of two gamma PDFs
Parameters
----------
t : array-like
vector of times at which to sample HRF
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
[1] This is the canonical HRF function as used in SPM. It
has the following defaults:
- delay of response (relative to onset) : 6s
- delay of undershoot (relative to onset) : 16s
- dispersion of response : 1s
- dispersion of undershoot : 1s
- ratio of response to undershoot : 6s
- onset : 0s
- length of kernel : 32s
References:
-----
[1] http://nipy.org/
[2] https://github.com/fabianp/hrf_estimation
"""
return spm_hrf_compat(t, peak_delay=peak_delay, under_delay=under_delay,
peak_disp=peak_disp, under_disp=under_disp,
p_u_ratio=p_u_ratio, normalize=True) | python | def spmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1,
p_u_ratio=6):
"""Normalized SPM HRF function from sum of two gamma PDFs
Parameters
----------
t : array-like
vector of times at which to sample HRF
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
[1] This is the canonical HRF function as used in SPM. It
has the following defaults:
- delay of response (relative to onset) : 6s
- delay of undershoot (relative to onset) : 16s
- dispersion of response : 1s
- dispersion of undershoot : 1s
- ratio of response to undershoot : 6s
- onset : 0s
- length of kernel : 32s
References:
-----
[1] http://nipy.org/
[2] https://github.com/fabianp/hrf_estimation
"""
return spm_hrf_compat(t, peak_delay=peak_delay, under_delay=under_delay,
peak_disp=peak_disp, under_disp=under_disp,
p_u_ratio=p_u_ratio, normalize=True) | [
"def",
"spmt",
"(",
"t",
",",
"peak_delay",
"=",
"6",
",",
"under_delay",
"=",
"16",
",",
"peak_disp",
"=",
"1",
",",
"under_disp",
"=",
"1",
",",
"p_u_ratio",
"=",
"6",
")",
":",
"return",
"spm_hrf_compat",
"(",
"t",
",",
"peak_delay",
"=",
"peak_delay",
",",
"under_delay",
"=",
"under_delay",
",",
"peak_disp",
"=",
"peak_disp",
",",
"under_disp",
"=",
"under_disp",
",",
"p_u_ratio",
"=",
"p_u_ratio",
",",
"normalize",
"=",
"True",
")"
] | Normalized SPM HRF function from sum of two gamma PDFs
Parameters
----------
t : array-like
vector of times at which to sample HRF
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
[1] This is the canonical HRF function as used in SPM. It
has the following defaults:
- delay of response (relative to onset) : 6s
- delay of undershoot (relative to onset) : 16s
- dispersion of response : 1s
- dispersion of undershoot : 1s
- ratio of response to undershoot : 6s
- onset : 0s
- length of kernel : 32s
References:
-----
[1] http://nipy.org/
[2] https://github.com/fabianp/hrf_estimation | [
"Normalized",
"SPM",
"HRF",
"function",
"from",
"sum",
"of",
"two",
"gamma",
"PDFs"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_hrf.py#L96-L129 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_hrf.py | dspmt | def dspmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1,
p_u_ratio=6):
""" SPM canonical HRF derivative, HRF derivative values for time values `t`
Parameters
----------
t : array-like
vector of times at which to sample HRF
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
[1] This is the canonical HRF derivative function as used in SPM.
[2] It is the numerical difference of the HRF sampled at time `t` minus the
values sampled at time `t` -1
References:
-----
[1] http://nipy.org/
[2] https://github.com/fabianp/hrf_estimation
"""
t = np.asarray(t)
aryRsp1 = spmt(t, peak_delay=peak_delay, under_delay=under_delay,
peak_disp=peak_disp, under_disp=under_disp,
p_u_ratio=p_u_ratio)
aryRsp2 = spmt(t-1, peak_delay=peak_delay, under_delay=under_delay,
peak_disp=peak_disp, under_disp=under_disp,
p_u_ratio=p_u_ratio)
return aryRsp1 - aryRsp2 | python | def dspmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1,
p_u_ratio=6):
""" SPM canonical HRF derivative, HRF derivative values for time values `t`
Parameters
----------
t : array-like
vector of times at which to sample HRF
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
[1] This is the canonical HRF derivative function as used in SPM.
[2] It is the numerical difference of the HRF sampled at time `t` minus the
values sampled at time `t` -1
References:
-----
[1] http://nipy.org/
[2] https://github.com/fabianp/hrf_estimation
"""
t = np.asarray(t)
aryRsp1 = spmt(t, peak_delay=peak_delay, under_delay=under_delay,
peak_disp=peak_disp, under_disp=under_disp,
p_u_ratio=p_u_ratio)
aryRsp2 = spmt(t-1, peak_delay=peak_delay, under_delay=under_delay,
peak_disp=peak_disp, under_disp=under_disp,
p_u_ratio=p_u_ratio)
return aryRsp1 - aryRsp2 | [
"def",
"dspmt",
"(",
"t",
",",
"peak_delay",
"=",
"6",
",",
"under_delay",
"=",
"16",
",",
"peak_disp",
"=",
"1",
",",
"under_disp",
"=",
"1",
",",
"p_u_ratio",
"=",
"6",
")",
":",
"t",
"=",
"np",
".",
"asarray",
"(",
"t",
")",
"aryRsp1",
"=",
"spmt",
"(",
"t",
",",
"peak_delay",
"=",
"peak_delay",
",",
"under_delay",
"=",
"under_delay",
",",
"peak_disp",
"=",
"peak_disp",
",",
"under_disp",
"=",
"under_disp",
",",
"p_u_ratio",
"=",
"p_u_ratio",
")",
"aryRsp2",
"=",
"spmt",
"(",
"t",
"-",
"1",
",",
"peak_delay",
"=",
"peak_delay",
",",
"under_delay",
"=",
"under_delay",
",",
"peak_disp",
"=",
"peak_disp",
",",
"under_disp",
"=",
"under_disp",
",",
"p_u_ratio",
"=",
"p_u_ratio",
")",
"return",
"aryRsp1",
"-",
"aryRsp2"
] | SPM canonical HRF derivative, HRF derivative values for time values `t`
Parameters
----------
t : array-like
vector of times at which to sample HRF
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
[1] This is the canonical HRF derivative function as used in SPM.
[2] It is the numerical difference of the HRF sampled at time `t` minus the
values sampled at time `t` -1
References:
-----
[1] http://nipy.org/
[2] https://github.com/fabianp/hrf_estimation | [
"SPM",
"canonical",
"HRF",
"derivative",
"HRF",
"derivative",
"values",
"for",
"time",
"values",
"t"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_hrf.py#L132-L165 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_hrf.py | ddspmt | def ddspmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1,
p_u_ratio=6):
""" SPM canonical HRF dispersion derivative, values for time values `t`
Parameters
----------
t : array-like
vector of times at which to sample HRF
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
[1] This is the canonical HRF dispersion derivative function as used in SPM
[2] It is the numerical difference between the HRF sampled at time `t`, and
values at `t` for another HRF shape with a small change in the peak
dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`).
References:
-----
[1] http://nipy.org/
[2] https://github.com/fabianp/hrf_estimation
"""
_spm_dd_func = partial(spmt, peak_delay=peak_delay,
under_delay=under_delay,
under_disp=under_disp, p_u_ratio=p_u_ratio,
peak_disp=1.01)
return (spmt(t) - _spm_dd_func(t)) / 0.01 | python | def ddspmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1,
p_u_ratio=6):
""" SPM canonical HRF dispersion derivative, values for time values `t`
Parameters
----------
t : array-like
vector of times at which to sample HRF
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
[1] This is the canonical HRF dispersion derivative function as used in SPM
[2] It is the numerical difference between the HRF sampled at time `t`, and
values at `t` for another HRF shape with a small change in the peak
dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`).
References:
-----
[1] http://nipy.org/
[2] https://github.com/fabianp/hrf_estimation
"""
_spm_dd_func = partial(spmt, peak_delay=peak_delay,
under_delay=under_delay,
under_disp=under_disp, p_u_ratio=p_u_ratio,
peak_disp=1.01)
return (spmt(t) - _spm_dd_func(t)) / 0.01 | [
"def",
"ddspmt",
"(",
"t",
",",
"peak_delay",
"=",
"6",
",",
"under_delay",
"=",
"16",
",",
"peak_disp",
"=",
"1",
",",
"under_disp",
"=",
"1",
",",
"p_u_ratio",
"=",
"6",
")",
":",
"_spm_dd_func",
"=",
"partial",
"(",
"spmt",
",",
"peak_delay",
"=",
"peak_delay",
",",
"under_delay",
"=",
"under_delay",
",",
"under_disp",
"=",
"under_disp",
",",
"p_u_ratio",
"=",
"p_u_ratio",
",",
"peak_disp",
"=",
"1.01",
")",
"return",
"(",
"spmt",
"(",
"t",
")",
"-",
"_spm_dd_func",
"(",
"t",
")",
")",
"/",
"0.01"
] | SPM canonical HRF dispersion derivative, values for time values `t`
Parameters
----------
t : array-like
vector of times at which to sample HRF
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
[1] This is the canonical HRF dispersion derivative function as used in SPM
[2] It is the numerical difference between the HRF sampled at time `t`, and
values at `t` for another HRF shape with a small change in the peak
dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`).
References:
-----
[1] http://nipy.org/
[2] https://github.com/fabianp/hrf_estimation | [
"SPM",
"canonical",
"HRF",
"dispersion",
"derivative",
"values",
"for",
"time",
"values",
"t"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_hrf.py#L168-L200 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_hrf.py | create_boxcar | def create_boxcar(aryCnd, aryOns, aryDrt, varTr, varNumVol,
aryExclCnd=None, varTmpOvsmpl=1000.):
"""
Creation of condition time courses in temporally upsampled space.
Parameters
----------
aryCnd : np.array
1D array with condition identifiers (every condition has its own int)
aryOns : np.array, same len as aryCnd
1D array with condition onset times in seconds.
aryDrt : np.array, same len as aryCnd
1D array with condition durations of different conditions in seconds.
varTr : float, positive
Time to repeat (TR) of the (fMRI) experiment.
varNumVol : float, positive
Number of volumes of the (fMRI) data.
aryExclCnd : array
1D array containing condition identifiers for conditions to be excluded
varTmpOvsmpl : float, positive
Factor by which the time courses should be temporally upsampled.
Returns
-------
aryBxCrOut : np.array, float16
Condition time courses in temporally upsampled space.
References:
-----
[1] https://github.com/fabianp/hrf_estimation
"""
if aryExclCnd is not None:
for cond in aryExclCnd:
aryOns = aryOns[aryCnd != cond]
aryDrt = aryDrt[aryCnd != cond]
aryCnd = aryCnd[aryCnd != cond]
resolution = varTr / float(varTmpOvsmpl)
aryCnd = np.asarray(aryCnd)
aryOns = np.asarray(aryOns, dtype=np.float)
unique_conditions = np.sort(np.unique(aryCnd))
boxcar = []
for c in unique_conditions:
tmp = np.zeros(int(varNumVol * varTr/resolution))
onset_c = aryOns[aryCnd == c]
duration_c = aryDrt[aryCnd == c]
onset_idx = np.round(onset_c / resolution).astype(np.int)
duration_idx = np.round(duration_c / resolution).astype(np.int)
aux = np.arange(int(varNumVol * varTr/resolution))
for start, dur in zip(onset_idx, duration_idx):
lgc = np.logical_and(aux >= start, aux < start + dur)
tmp = tmp + lgc
assert np.all(np.less(tmp, 2))
boxcar.append(tmp)
aryBxCrOut = np.array(boxcar).T
if aryBxCrOut.shape[1] == 1:
aryBxCrOut = np.squeeze(aryBxCrOut)
return aryBxCrOut.astype('float16') | python | def create_boxcar(aryCnd, aryOns, aryDrt, varTr, varNumVol,
aryExclCnd=None, varTmpOvsmpl=1000.):
"""
Creation of condition time courses in temporally upsampled space.
Parameters
----------
aryCnd : np.array
1D array with condition identifiers (every condition has its own int)
aryOns : np.array, same len as aryCnd
1D array with condition onset times in seconds.
aryDrt : np.array, same len as aryCnd
1D array with condition durations of different conditions in seconds.
varTr : float, positive
Time to repeat (TR) of the (fMRI) experiment.
varNumVol : float, positive
Number of volumes of the (fMRI) data.
aryExclCnd : array
1D array containing condition identifiers for conditions to be excluded
varTmpOvsmpl : float, positive
Factor by which the time courses should be temporally upsampled.
Returns
-------
aryBxCrOut : np.array, float16
Condition time courses in temporally upsampled space.
References:
-----
[1] https://github.com/fabianp/hrf_estimation
"""
if aryExclCnd is not None:
for cond in aryExclCnd:
aryOns = aryOns[aryCnd != cond]
aryDrt = aryDrt[aryCnd != cond]
aryCnd = aryCnd[aryCnd != cond]
resolution = varTr / float(varTmpOvsmpl)
aryCnd = np.asarray(aryCnd)
aryOns = np.asarray(aryOns, dtype=np.float)
unique_conditions = np.sort(np.unique(aryCnd))
boxcar = []
for c in unique_conditions:
tmp = np.zeros(int(varNumVol * varTr/resolution))
onset_c = aryOns[aryCnd == c]
duration_c = aryDrt[aryCnd == c]
onset_idx = np.round(onset_c / resolution).astype(np.int)
duration_idx = np.round(duration_c / resolution).astype(np.int)
aux = np.arange(int(varNumVol * varTr/resolution))
for start, dur in zip(onset_idx, duration_idx):
lgc = np.logical_and(aux >= start, aux < start + dur)
tmp = tmp + lgc
assert np.all(np.less(tmp, 2))
boxcar.append(tmp)
aryBxCrOut = np.array(boxcar).T
if aryBxCrOut.shape[1] == 1:
aryBxCrOut = np.squeeze(aryBxCrOut)
return aryBxCrOut.astype('float16') | [
"def",
"create_boxcar",
"(",
"aryCnd",
",",
"aryOns",
",",
"aryDrt",
",",
"varTr",
",",
"varNumVol",
",",
"aryExclCnd",
"=",
"None",
",",
"varTmpOvsmpl",
"=",
"1000.",
")",
":",
"if",
"aryExclCnd",
"is",
"not",
"None",
":",
"for",
"cond",
"in",
"aryExclCnd",
":",
"aryOns",
"=",
"aryOns",
"[",
"aryCnd",
"!=",
"cond",
"]",
"aryDrt",
"=",
"aryDrt",
"[",
"aryCnd",
"!=",
"cond",
"]",
"aryCnd",
"=",
"aryCnd",
"[",
"aryCnd",
"!=",
"cond",
"]",
"resolution",
"=",
"varTr",
"/",
"float",
"(",
"varTmpOvsmpl",
")",
"aryCnd",
"=",
"np",
".",
"asarray",
"(",
"aryCnd",
")",
"aryOns",
"=",
"np",
".",
"asarray",
"(",
"aryOns",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"unique_conditions",
"=",
"np",
".",
"sort",
"(",
"np",
".",
"unique",
"(",
"aryCnd",
")",
")",
"boxcar",
"=",
"[",
"]",
"for",
"c",
"in",
"unique_conditions",
":",
"tmp",
"=",
"np",
".",
"zeros",
"(",
"int",
"(",
"varNumVol",
"*",
"varTr",
"/",
"resolution",
")",
")",
"onset_c",
"=",
"aryOns",
"[",
"aryCnd",
"==",
"c",
"]",
"duration_c",
"=",
"aryDrt",
"[",
"aryCnd",
"==",
"c",
"]",
"onset_idx",
"=",
"np",
".",
"round",
"(",
"onset_c",
"/",
"resolution",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
"duration_idx",
"=",
"np",
".",
"round",
"(",
"duration_c",
"/",
"resolution",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
"aux",
"=",
"np",
".",
"arange",
"(",
"int",
"(",
"varNumVol",
"*",
"varTr",
"/",
"resolution",
")",
")",
"for",
"start",
",",
"dur",
"in",
"zip",
"(",
"onset_idx",
",",
"duration_idx",
")",
":",
"lgc",
"=",
"np",
".",
"logical_and",
"(",
"aux",
">=",
"start",
",",
"aux",
"<",
"start",
"+",
"dur",
")",
"tmp",
"=",
"tmp",
"+",
"lgc",
"assert",
"np",
".",
"all",
"(",
"np",
".",
"less",
"(",
"tmp",
",",
"2",
")",
")",
"boxcar",
".",
"append",
"(",
"tmp",
")",
"aryBxCrOut",
"=",
"np",
".",
"array",
"(",
"boxcar",
")",
".",
"T",
"if",
"aryBxCrOut",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
":",
"aryBxCrOut",
"=",
"np",
".",
"squeeze",
"(",
"aryBxCrOut",
")",
"return",
"aryBxCrOut",
".",
"astype",
"(",
"'float16'",
")"
] | Creation of condition time courses in temporally upsampled space.
Parameters
----------
aryCnd : np.array
1D array with condition identifiers (every condition has its own int)
aryOns : np.array, same len as aryCnd
1D array with condition onset times in seconds.
aryDrt : np.array, same len as aryCnd
1D array with condition durations of different conditions in seconds.
varTr : float, positive
Time to repeat (TR) of the (fMRI) experiment.
varNumVol : float, positive
Number of volumes of the (fMRI) data.
aryExclCnd : array
1D array containing condition identifiers for conditions to be excluded
varTmpOvsmpl : float, positive
Factor by which the time courses should be temporally upsampled.
Returns
-------
aryBxCrOut : np.array, float16
Condition time courses in temporally upsampled space.
References:
-----
[1] https://github.com/fabianp/hrf_estimation | [
"Creation",
"of",
"condition",
"time",
"courses",
"in",
"temporally",
"upsampled",
"space",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_hrf.py#L203-L263 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_hrf.py | cnvl_tc | def cnvl_tc(idxPrc, aryPrfTcChunk, lstHrf, varTr, varNumVol, varTmpOvsmpl,
queOut, varHrfLen=32., dctPrm=None):
"""Convolution of time courses with HRF model.
Parameters
----------
idxPrc : int, positive
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryPrfTcChunk : np.array
2D array with model time course to be convolved with HRF.
lstHrf : list
List containing the different HRF functions.
varTr : float, positive
Time to repeat (TR) of the (fMRI) experiment.
varNumVol : float, positive
Number of volumes of the (fMRI) data.
varTmpOvsmpl : float, positive
Factor by which the time courses should be temporally upsampled.
queOut : multiprocessing.queues.Queue
Queue to put the results on.
varHrfLen : float, positive, default=32
Length of the HRF time course in seconds.
dctPrm : dictionary, default None
Dictionary with customized hrf parameters. If this is None, default
hrf parameters will be used.
Returns
-------
lstOut : list
int, positive : Process ID of the process calling this function.
2D np.array, float16 : Model time course convolved with HRF.
References:
-----
[1] https://github.com/fabianp/hrf_estimation
"""
# Adjust the input, if necessary, such that input is 2D, with last dim time
tplInpShp = aryPrfTcChunk.shape
aryPrfTcChunk = aryPrfTcChunk.reshape((-1, aryPrfTcChunk.shape[-1]))
# Prepare list to collect hrf basis functions
lstBse = []
# Prepare array that contains time intervals
aryTme = np.linspace(0, varHrfLen, (varHrfLen // varTr) * varTmpOvsmpl)
for fnHrf in lstHrf:
# If hrf parameter dictionary is None, run with default parameters
if dctPrm is None:
vecTmpBse = fnHrf(aryTme)
# Otherwise, run with custom parameters
else:
vecTmpBse = fnHrf(aryTme, **dctPrm)
# Normalise HRF so that the sum of values is 1 (see FSL)
# otherwise, after convolution values for predictors are very high
vecTmpBse = np.divide(vecTmpBse, np.sum(vecTmpBse))
lstBse.append(vecTmpBse)
# Get frame times, i.e. start point of every volume in seconds
vecFrms = np.arange(0, varTr * varNumVol, varTr)
# Get supersampled frames times, i.e. start point of every volume in
# upsampled res, since convolution takes place in temp. upsampled space
vecFrmTms = np.arange(0, varTr * varNumVol, varTr / varTmpOvsmpl)
# Prepare an empty array for ouput
aryConv = np.zeros((aryPrfTcChunk.shape[0], len(lstHrf), varNumVol),
dtype=np.float16)
# Each time course is convolved with the HRF separately, because the
# numpy convolution function can only be used on one-dimensional data.
# Thus, we have to loop through time courses:
for idxTc in range(0, aryConv.shape[0]):
# Extract the current time course (already in upsampled space):
vecTcUps = aryPrfTcChunk[idxTc, :]
# *** convolve
for indBase, base in enumerate(lstBse):
# Make sure base and vecTcUps are float64 to avoid overflow
base = base.astype(np.float64)
vecTcUps = vecTcUps.astype(np.float64)
# Perform the convolution (previously: np.convolve)
col = fftconvolve(base, vecTcUps, mode='full')[:vecTcUps.size]
# Get function for downsampling
f = interp1d(vecFrmTms, col)
# Downsample to original resoltuion to match res of data
# take the value from the centre of each volume's period (see FSL)
aryConv[idxTc, indBase, :] = f(vecFrms + varTr/2.
).astype(np.float16)
# Determine output shape
tplOutShp = tplInpShp[:-1] + (len(lstHrf), ) + (varNumVol, )
if queOut is None:
# if user is not using multiprocessing, return the array directly
return aryConv.reshape(tplOutShp)
else:
# Create list containing the convolved timecourses, and the process ID:
lstOut = [idxPrc,
aryConv.reshape(tplOutShp)]
# Put output to queue:
queOut.put(lstOut) | python | def cnvl_tc(idxPrc, aryPrfTcChunk, lstHrf, varTr, varNumVol, varTmpOvsmpl,
queOut, varHrfLen=32., dctPrm=None):
"""Convolution of time courses with HRF model.
Parameters
----------
idxPrc : int, positive
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryPrfTcChunk : np.array
2D array with model time course to be convolved with HRF.
lstHrf : list
List containing the different HRF functions.
varTr : float, positive
Time to repeat (TR) of the (fMRI) experiment.
varNumVol : float, positive
Number of volumes of the (fMRI) data.
varTmpOvsmpl : float, positive
Factor by which the time courses should be temporally upsampled.
queOut : multiprocessing.queues.Queue
Queue to put the results on.
varHrfLen : float, positive, default=32
Length of the HRF time course in seconds.
dctPrm : dictionary, default None
Dictionary with customized hrf parameters. If this is None, default
hrf parameters will be used.
Returns
-------
lstOut : list
int, positive : Process ID of the process calling this function.
2D np.array, float16 : Model time course convolved with HRF.
References:
-----
[1] https://github.com/fabianp/hrf_estimation
"""
# Adjust the input, if necessary, such that input is 2D, with last dim time
tplInpShp = aryPrfTcChunk.shape
aryPrfTcChunk = aryPrfTcChunk.reshape((-1, aryPrfTcChunk.shape[-1]))
# Prepare list to collect hrf basis functions
lstBse = []
# Prepare array that contains time intervals
aryTme = np.linspace(0, varHrfLen, (varHrfLen // varTr) * varTmpOvsmpl)
for fnHrf in lstHrf:
# If hrf parameter dictionary is None, run with default parameters
if dctPrm is None:
vecTmpBse = fnHrf(aryTme)
# Otherwise, run with custom parameters
else:
vecTmpBse = fnHrf(aryTme, **dctPrm)
# Normalise HRF so that the sum of values is 1 (see FSL)
# otherwise, after convolution values for predictors are very high
vecTmpBse = np.divide(vecTmpBse, np.sum(vecTmpBse))
lstBse.append(vecTmpBse)
# Get frame times, i.e. start point of every volume in seconds
vecFrms = np.arange(0, varTr * varNumVol, varTr)
# Get supersampled frames times, i.e. start point of every volume in
# upsampled res, since convolution takes place in temp. upsampled space
vecFrmTms = np.arange(0, varTr * varNumVol, varTr / varTmpOvsmpl)
# Prepare an empty array for ouput
aryConv = np.zeros((aryPrfTcChunk.shape[0], len(lstHrf), varNumVol),
dtype=np.float16)
# Each time course is convolved with the HRF separately, because the
# numpy convolution function can only be used on one-dimensional data.
# Thus, we have to loop through time courses:
for idxTc in range(0, aryConv.shape[0]):
# Extract the current time course (already in upsampled space):
vecTcUps = aryPrfTcChunk[idxTc, :]
# *** convolve
for indBase, base in enumerate(lstBse):
# Make sure base and vecTcUps are float64 to avoid overflow
base = base.astype(np.float64)
vecTcUps = vecTcUps.astype(np.float64)
# Perform the convolution (previously: np.convolve)
col = fftconvolve(base, vecTcUps, mode='full')[:vecTcUps.size]
# Get function for downsampling
f = interp1d(vecFrmTms, col)
# Downsample to original resoltuion to match res of data
# take the value from the centre of each volume's period (see FSL)
aryConv[idxTc, indBase, :] = f(vecFrms + varTr/2.
).astype(np.float16)
# Determine output shape
tplOutShp = tplInpShp[:-1] + (len(lstHrf), ) + (varNumVol, )
if queOut is None:
# if user is not using multiprocessing, return the array directly
return aryConv.reshape(tplOutShp)
else:
# Create list containing the convolved timecourses, and the process ID:
lstOut = [idxPrc,
aryConv.reshape(tplOutShp)]
# Put output to queue:
queOut.put(lstOut) | [
"def",
"cnvl_tc",
"(",
"idxPrc",
",",
"aryPrfTcChunk",
",",
"lstHrf",
",",
"varTr",
",",
"varNumVol",
",",
"varTmpOvsmpl",
",",
"queOut",
",",
"varHrfLen",
"=",
"32.",
",",
"dctPrm",
"=",
"None",
")",
":",
"# Adjust the input, if necessary, such that input is 2D, with last dim time",
"tplInpShp",
"=",
"aryPrfTcChunk",
".",
"shape",
"aryPrfTcChunk",
"=",
"aryPrfTcChunk",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"aryPrfTcChunk",
".",
"shape",
"[",
"-",
"1",
"]",
")",
")",
"# Prepare list to collect hrf basis functions",
"lstBse",
"=",
"[",
"]",
"# Prepare array that contains time intervals",
"aryTme",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"varHrfLen",
",",
"(",
"varHrfLen",
"//",
"varTr",
")",
"*",
"varTmpOvsmpl",
")",
"for",
"fnHrf",
"in",
"lstHrf",
":",
"# If hrf parameter dictionary is None, run with default parameters",
"if",
"dctPrm",
"is",
"None",
":",
"vecTmpBse",
"=",
"fnHrf",
"(",
"aryTme",
")",
"# Otherwise, run with custom parameters",
"else",
":",
"vecTmpBse",
"=",
"fnHrf",
"(",
"aryTme",
",",
"*",
"*",
"dctPrm",
")",
"# Normalise HRF so that the sum of values is 1 (see FSL)",
"# otherwise, after convolution values for predictors are very high",
"vecTmpBse",
"=",
"np",
".",
"divide",
"(",
"vecTmpBse",
",",
"np",
".",
"sum",
"(",
"vecTmpBse",
")",
")",
"lstBse",
".",
"append",
"(",
"vecTmpBse",
")",
"# Get frame times, i.e. start point of every volume in seconds",
"vecFrms",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"varTr",
"*",
"varNumVol",
",",
"varTr",
")",
"# Get supersampled frames times, i.e. start point of every volume in",
"# upsampled res, since convolution takes place in temp. upsampled space",
"vecFrmTms",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"varTr",
"*",
"varNumVol",
",",
"varTr",
"/",
"varTmpOvsmpl",
")",
"# Prepare an empty array for ouput",
"aryConv",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryPrfTcChunk",
".",
"shape",
"[",
"0",
"]",
",",
"len",
"(",
"lstHrf",
")",
",",
"varNumVol",
")",
",",
"dtype",
"=",
"np",
".",
"float16",
")",
"# Each time course is convolved with the HRF separately, because the",
"# numpy convolution function can only be used on one-dimensional data.",
"# Thus, we have to loop through time courses:",
"for",
"idxTc",
"in",
"range",
"(",
"0",
",",
"aryConv",
".",
"shape",
"[",
"0",
"]",
")",
":",
"# Extract the current time course (already in upsampled space):",
"vecTcUps",
"=",
"aryPrfTcChunk",
"[",
"idxTc",
",",
":",
"]",
"# *** convolve",
"for",
"indBase",
",",
"base",
"in",
"enumerate",
"(",
"lstBse",
")",
":",
"# Make sure base and vecTcUps are float64 to avoid overflow",
"base",
"=",
"base",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"vecTcUps",
"=",
"vecTcUps",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"# Perform the convolution (previously: np.convolve)",
"col",
"=",
"fftconvolve",
"(",
"base",
",",
"vecTcUps",
",",
"mode",
"=",
"'full'",
")",
"[",
":",
"vecTcUps",
".",
"size",
"]",
"# Get function for downsampling",
"f",
"=",
"interp1d",
"(",
"vecFrmTms",
",",
"col",
")",
"# Downsample to original resoltuion to match res of data",
"# take the value from the centre of each volume's period (see FSL)",
"aryConv",
"[",
"idxTc",
",",
"indBase",
",",
":",
"]",
"=",
"f",
"(",
"vecFrms",
"+",
"varTr",
"/",
"2.",
")",
".",
"astype",
"(",
"np",
".",
"float16",
")",
"# Determine output shape",
"tplOutShp",
"=",
"tplInpShp",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"len",
"(",
"lstHrf",
")",
",",
")",
"+",
"(",
"varNumVol",
",",
")",
"if",
"queOut",
"is",
"None",
":",
"# if user is not using multiprocessing, return the array directly",
"return",
"aryConv",
".",
"reshape",
"(",
"tplOutShp",
")",
"else",
":",
"# Create list containing the convolved timecourses, and the process ID:",
"lstOut",
"=",
"[",
"idxPrc",
",",
"aryConv",
".",
"reshape",
"(",
"tplOutShp",
")",
"]",
"# Put output to queue:",
"queOut",
".",
"put",
"(",
"lstOut",
")"
] | Convolution of time courses with HRF model.
Parameters
----------
idxPrc : int, positive
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryPrfTcChunk : np.array
2D array with model time course to be convolved with HRF.
lstHrf : list
List containing the different HRF functions.
varTr : float, positive
Time to repeat (TR) of the (fMRI) experiment.
varNumVol : float, positive
Number of volumes of the (fMRI) data.
varTmpOvsmpl : float, positive
Factor by which the time courses should be temporally upsampled.
queOut : multiprocessing.queues.Queue
Queue to put the results on.
varHrfLen : float, positive, default=32
Length of the HRF time course in seconds.
dctPrm : dictionary, default None
Dictionary with customized hrf parameters. If this is None, default
hrf parameters will be used.
Returns
-------
lstOut : list
int, positive : Process ID of the process calling this function.
2D np.array, float16 : Model time course convolved with HRF.
References:
-----
[1] https://github.com/fabianp/hrf_estimation | [
"Convolution",
"of",
"time",
"courses",
"with",
"HRF",
"model",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_hrf.py#L266-L371 |
curious-containers/cc-core | cc_core/commons/input_references.py | create_inputs_to_reference | def create_inputs_to_reference(job_data, input_files, input_directories):
"""
Creates a dictionary with the summarized information in job_data, input_files and input_directories
:param job_data: The job data specifying input parameters other than files and directories.
:param input_files: A dictionary describing the input files.
:param input_directories: A dictionary describing the input directories.
:return: A summarized dictionary containing information about all given inputs.
"""
return {**deepcopy(job_data), **deepcopy(input_files), **deepcopy(input_directories)} | python | def create_inputs_to_reference(job_data, input_files, input_directories):
"""
Creates a dictionary with the summarized information in job_data, input_files and input_directories
:param job_data: The job data specifying input parameters other than files and directories.
:param input_files: A dictionary describing the input files.
:param input_directories: A dictionary describing the input directories.
:return: A summarized dictionary containing information about all given inputs.
"""
return {**deepcopy(job_data), **deepcopy(input_files), **deepcopy(input_directories)} | [
"def",
"create_inputs_to_reference",
"(",
"job_data",
",",
"input_files",
",",
"input_directories",
")",
":",
"return",
"{",
"*",
"*",
"deepcopy",
"(",
"job_data",
")",
",",
"*",
"*",
"deepcopy",
"(",
"input_files",
")",
",",
"*",
"*",
"deepcopy",
"(",
"input_directories",
")",
"}"
] | Creates a dictionary with the summarized information in job_data, input_files and input_directories
:param job_data: The job data specifying input parameters other than files and directories.
:param input_files: A dictionary describing the input files.
:param input_directories: A dictionary describing the input directories.
:return: A summarized dictionary containing information about all given inputs. | [
"Creates",
"a",
"dictionary",
"with",
"the",
"summarized",
"information",
"in",
"job_data",
"input_files",
"and",
"input_directories"
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/input_references.py#L22-L32 |
curious-containers/cc-core | cc_core/commons/input_references.py | _partition_all_internal | def _partition_all_internal(s, sep):
"""
Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings.
:param s: The string to split.
:param sep: A separator string.
:return: A list of parts split by sep
"""
parts = list(s.partition(sep))
# if sep found
if parts[1] == sep:
new_parts = partition_all(parts[2], sep)
parts.pop()
parts.extend(new_parts)
return [p for p in parts if p]
else:
if parts[0]:
return [parts[0]]
else:
return [] | python | def _partition_all_internal(s, sep):
"""
Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings.
:param s: The string to split.
:param sep: A separator string.
:return: A list of parts split by sep
"""
parts = list(s.partition(sep))
# if sep found
if parts[1] == sep:
new_parts = partition_all(parts[2], sep)
parts.pop()
parts.extend(new_parts)
return [p for p in parts if p]
else:
if parts[0]:
return [parts[0]]
else:
return [] | [
"def",
"_partition_all_internal",
"(",
"s",
",",
"sep",
")",
":",
"parts",
"=",
"list",
"(",
"s",
".",
"partition",
"(",
"sep",
")",
")",
"# if sep found",
"if",
"parts",
"[",
"1",
"]",
"==",
"sep",
":",
"new_parts",
"=",
"partition_all",
"(",
"parts",
"[",
"2",
"]",
",",
"sep",
")",
"parts",
".",
"pop",
"(",
")",
"parts",
".",
"extend",
"(",
"new_parts",
")",
"return",
"[",
"p",
"for",
"p",
"in",
"parts",
"if",
"p",
"]",
"else",
":",
"if",
"parts",
"[",
"0",
"]",
":",
"return",
"[",
"parts",
"[",
"0",
"]",
"]",
"else",
":",
"return",
"[",
"]"
] | Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings.
:param s: The string to split.
:param sep: A separator string.
:return: A list of parts split by sep | [
"Uses",
"str",
".",
"partition",
"()",
"to",
"split",
"every",
"occurrence",
"of",
"sep",
"in",
"s",
".",
"The",
"returned",
"list",
"does",
"not",
"contain",
"empty",
"strings",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/input_references.py#L35-L55 |
curious-containers/cc-core | cc_core/commons/input_references.py | partition_all | def partition_all(s, sep):
"""
Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings.
If sep is a list, all separators are evaluated.
:param s: The string to split.
:param sep: A separator string or a list of separator strings.
:return: A list of parts split by sep
"""
if isinstance(sep, list):
parts = _partition_all_internal(s, sep[0])
sep = sep[1:]
for s in sep:
tmp = []
for p in parts:
tmp.extend(_partition_all_internal(p, s))
parts = tmp
return parts
else:
return _partition_all_internal(s, sep) | python | def partition_all(s, sep):
"""
Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings.
If sep is a list, all separators are evaluated.
:param s: The string to split.
:param sep: A separator string or a list of separator strings.
:return: A list of parts split by sep
"""
if isinstance(sep, list):
parts = _partition_all_internal(s, sep[0])
sep = sep[1:]
for s in sep:
tmp = []
for p in parts:
tmp.extend(_partition_all_internal(p, s))
parts = tmp
return parts
else:
return _partition_all_internal(s, sep) | [
"def",
"partition_all",
"(",
"s",
",",
"sep",
")",
":",
"if",
"isinstance",
"(",
"sep",
",",
"list",
")",
":",
"parts",
"=",
"_partition_all_internal",
"(",
"s",
",",
"sep",
"[",
"0",
"]",
")",
"sep",
"=",
"sep",
"[",
"1",
":",
"]",
"for",
"s",
"in",
"sep",
":",
"tmp",
"=",
"[",
"]",
"for",
"p",
"in",
"parts",
":",
"tmp",
".",
"extend",
"(",
"_partition_all_internal",
"(",
"p",
",",
"s",
")",
")",
"parts",
"=",
"tmp",
"return",
"parts",
"else",
":",
"return",
"_partition_all_internal",
"(",
"s",
",",
"sep",
")"
] | Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings.
If sep is a list, all separators are evaluated.
:param s: The string to split.
:param sep: A separator string or a list of separator strings.
:return: A list of parts split by sep | [
"Uses",
"str",
".",
"partition",
"()",
"to",
"split",
"every",
"occurrence",
"of",
"sep",
"in",
"s",
".",
"The",
"returned",
"list",
"does",
"not",
"contain",
"empty",
"strings",
".",
"If",
"sep",
"is",
"a",
"list",
"all",
"separators",
"are",
"evaluated",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/input_references.py#L58-L79 |
curious-containers/cc-core | cc_core/commons/input_references.py | split_input_references | def split_input_references(to_split):
"""
Returns the given string in normal strings and unresolved input references.
An input reference is identified as something of the following form $(...).
Example:
split_input_reference("a$(b)cde()$(fg)") == ["a", "$(b)", "cde()", "$(fg)"]
:param to_split: The string to split
:raise InvalidInputReference: If an input reference is not closed and a new reference starts or the string ends.
:return: A list of normal strings and unresolved input references.
"""
parts = partition_all(to_split, [INPUT_REFERENCE_START, INPUT_REFERENCE_END])
result = []
part = []
in_reference = False
for p in parts:
if in_reference:
if p == INPUT_REFERENCE_START:
raise InvalidInputReference('A new input reference has been started, although the old input reference'
'has not yet been completed.\n{}'.format(to_split))
elif p == ")":
part.append(")")
result.append(''.join(part))
part = []
in_reference = False
else:
part.append(p)
else:
if p == INPUT_REFERENCE_START:
if part:
result.append(''.join(part))
part = [INPUT_REFERENCE_START]
in_reference = True
else:
part.append(p)
if in_reference:
raise InvalidInputReference('Input reference not closed.\n{}'.format(to_split))
elif part:
result.append(''.join(part))
return result | python | def split_input_references(to_split):
"""
Returns the given string in normal strings and unresolved input references.
An input reference is identified as something of the following form $(...).
Example:
split_input_reference("a$(b)cde()$(fg)") == ["a", "$(b)", "cde()", "$(fg)"]
:param to_split: The string to split
:raise InvalidInputReference: If an input reference is not closed and a new reference starts or the string ends.
:return: A list of normal strings and unresolved input references.
"""
parts = partition_all(to_split, [INPUT_REFERENCE_START, INPUT_REFERENCE_END])
result = []
part = []
in_reference = False
for p in parts:
if in_reference:
if p == INPUT_REFERENCE_START:
raise InvalidInputReference('A new input reference has been started, although the old input reference'
'has not yet been completed.\n{}'.format(to_split))
elif p == ")":
part.append(")")
result.append(''.join(part))
part = []
in_reference = False
else:
part.append(p)
else:
if p == INPUT_REFERENCE_START:
if part:
result.append(''.join(part))
part = [INPUT_REFERENCE_START]
in_reference = True
else:
part.append(p)
if in_reference:
raise InvalidInputReference('Input reference not closed.\n{}'.format(to_split))
elif part:
result.append(''.join(part))
return result | [
"def",
"split_input_references",
"(",
"to_split",
")",
":",
"parts",
"=",
"partition_all",
"(",
"to_split",
",",
"[",
"INPUT_REFERENCE_START",
",",
"INPUT_REFERENCE_END",
"]",
")",
"result",
"=",
"[",
"]",
"part",
"=",
"[",
"]",
"in_reference",
"=",
"False",
"for",
"p",
"in",
"parts",
":",
"if",
"in_reference",
":",
"if",
"p",
"==",
"INPUT_REFERENCE_START",
":",
"raise",
"InvalidInputReference",
"(",
"'A new input reference has been started, although the old input reference'",
"'has not yet been completed.\\n{}'",
".",
"format",
"(",
"to_split",
")",
")",
"elif",
"p",
"==",
"\")\"",
":",
"part",
".",
"append",
"(",
"\")\"",
")",
"result",
".",
"append",
"(",
"''",
".",
"join",
"(",
"part",
")",
")",
"part",
"=",
"[",
"]",
"in_reference",
"=",
"False",
"else",
":",
"part",
".",
"append",
"(",
"p",
")",
"else",
":",
"if",
"p",
"==",
"INPUT_REFERENCE_START",
":",
"if",
"part",
":",
"result",
".",
"append",
"(",
"''",
".",
"join",
"(",
"part",
")",
")",
"part",
"=",
"[",
"INPUT_REFERENCE_START",
"]",
"in_reference",
"=",
"True",
"else",
":",
"part",
".",
"append",
"(",
"p",
")",
"if",
"in_reference",
":",
"raise",
"InvalidInputReference",
"(",
"'Input reference not closed.\\n{}'",
".",
"format",
"(",
"to_split",
")",
")",
"elif",
"part",
":",
"result",
".",
"append",
"(",
"''",
".",
"join",
"(",
"part",
")",
")",
"return",
"result"
] | Returns the given string in normal strings and unresolved input references.
An input reference is identified as something of the following form $(...).
Example:
split_input_reference("a$(b)cde()$(fg)") == ["a", "$(b)", "cde()", "$(fg)"]
:param to_split: The string to split
:raise InvalidInputReference: If an input reference is not closed and a new reference starts or the string ends.
:return: A list of normal strings and unresolved input references. | [
"Returns",
"the",
"given",
"string",
"in",
"normal",
"strings",
"and",
"unresolved",
"input",
"references",
".",
"An",
"input",
"reference",
"is",
"identified",
"as",
"something",
"of",
"the",
"following",
"form",
"$",
"(",
"...",
")",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/input_references.py#L82-L125 |
curious-containers/cc-core | cc_core/commons/input_references.py | split_all | def split_all(reference, sep):
"""
Splits a given string at a given separator or list of separators.
:param reference: The reference to split.
:param sep: Separator string or list of separator strings.
:return: A list of split strings
"""
parts = partition_all(reference, sep)
return [p for p in parts if p not in sep] | python | def split_all(reference, sep):
"""
Splits a given string at a given separator or list of separators.
:param reference: The reference to split.
:param sep: Separator string or list of separator strings.
:return: A list of split strings
"""
parts = partition_all(reference, sep)
return [p for p in parts if p not in sep] | [
"def",
"split_all",
"(",
"reference",
",",
"sep",
")",
":",
"parts",
"=",
"partition_all",
"(",
"reference",
",",
"sep",
")",
"return",
"[",
"p",
"for",
"p",
"in",
"parts",
"if",
"p",
"not",
"in",
"sep",
"]"
] | Splits a given string at a given separator or list of separators.
:param reference: The reference to split.
:param sep: Separator string or list of separator strings.
:return: A list of split strings | [
"Splits",
"a",
"given",
"string",
"at",
"a",
"given",
"separator",
"or",
"list",
"of",
"separators",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/input_references.py#L138-L147 |
curious-containers/cc-core | cc_core/commons/input_references.py | _resolve_file | def _resolve_file(attributes, input_file, input_identifier, input_reference):
"""
Returns the attributes in demand of the input file.
:param attributes: A list of attributes to get from the input_file.
:param input_file: The file from which to get the attributes.
:param input_identifier: The input identifier of the given file.
:param input_reference: The reference string
:return: The attribute in demand
"""
if input_file['isArray']:
raise InvalidInputReference('Input References to Arrays of input files are currently not supported.\n'
'"{}" is an array of files and can not be resolved for input references:'
'\n{}'.format(input_identifier, input_reference))
single_file = input_file['files'][0]
try:
return _get_dict_element(single_file, attributes)
except KeyError:
raise InvalidInputReference('Could not get attributes "{}" from input file "{}", needed in input reference:'
'\n{}'.format(attributes, input_identifier, input_reference)) | python | def _resolve_file(attributes, input_file, input_identifier, input_reference):
"""
Returns the attributes in demand of the input file.
:param attributes: A list of attributes to get from the input_file.
:param input_file: The file from which to get the attributes.
:param input_identifier: The input identifier of the given file.
:param input_reference: The reference string
:return: The attribute in demand
"""
if input_file['isArray']:
raise InvalidInputReference('Input References to Arrays of input files are currently not supported.\n'
'"{}" is an array of files and can not be resolved for input references:'
'\n{}'.format(input_identifier, input_reference))
single_file = input_file['files'][0]
try:
return _get_dict_element(single_file, attributes)
except KeyError:
raise InvalidInputReference('Could not get attributes "{}" from input file "{}", needed in input reference:'
'\n{}'.format(attributes, input_identifier, input_reference)) | [
"def",
"_resolve_file",
"(",
"attributes",
",",
"input_file",
",",
"input_identifier",
",",
"input_reference",
")",
":",
"if",
"input_file",
"[",
"'isArray'",
"]",
":",
"raise",
"InvalidInputReference",
"(",
"'Input References to Arrays of input files are currently not supported.\\n'",
"'\"{}\" is an array of files and can not be resolved for input references:'",
"'\\n{}'",
".",
"format",
"(",
"input_identifier",
",",
"input_reference",
")",
")",
"single_file",
"=",
"input_file",
"[",
"'files'",
"]",
"[",
"0",
"]",
"try",
":",
"return",
"_get_dict_element",
"(",
"single_file",
",",
"attributes",
")",
"except",
"KeyError",
":",
"raise",
"InvalidInputReference",
"(",
"'Could not get attributes \"{}\" from input file \"{}\", needed in input reference:'",
"'\\n{}'",
".",
"format",
"(",
"attributes",
",",
"input_identifier",
",",
"input_reference",
")",
")"
] | Returns the attributes in demand of the input file.
:param attributes: A list of attributes to get from the input_file.
:param input_file: The file from which to get the attributes.
:param input_identifier: The input identifier of the given file.
:param input_reference: The reference string
:return: The attribute in demand | [
"Returns",
"the",
"attributes",
"in",
"demand",
"of",
"the",
"input",
"file",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/input_references.py#L150-L170 |
curious-containers/cc-core | cc_core/commons/input_references.py | _resolve_directory | def _resolve_directory(attributes, input_directory, input_identifier, input_reference):
"""
Returns the attributes in demand of the input directory.
:param attributes: A list of attributes to get from the input directory.
:param input_directory: The directory from which to get the attributes.
:param input_identifier: The input identifier of the given directory.
:param input_reference: The reference string
:return: The attribute in demand
"""
if input_directory['isArray']:
raise InvalidInputReference('Input References to Arrays of input directories are currently not supported.\n'
'input directory "{}" is an array of directories and can not be resolved for input'
'references:\n{}'.format(input_identifier, input_reference))
single_directory = input_directory['directories'][0]
try:
return _get_dict_element(single_directory, attributes)
except KeyError:
raise InvalidInputReference('Could not get attributes "{}" from input directory "{}", needed in input'
'reference:\n{}'.format(attributes, input_identifier, input_reference)) | python | def _resolve_directory(attributes, input_directory, input_identifier, input_reference):
"""
Returns the attributes in demand of the input directory.
:param attributes: A list of attributes to get from the input directory.
:param input_directory: The directory from which to get the attributes.
:param input_identifier: The input identifier of the given directory.
:param input_reference: The reference string
:return: The attribute in demand
"""
if input_directory['isArray']:
raise InvalidInputReference('Input References to Arrays of input directories are currently not supported.\n'
'input directory "{}" is an array of directories and can not be resolved for input'
'references:\n{}'.format(input_identifier, input_reference))
single_directory = input_directory['directories'][0]
try:
return _get_dict_element(single_directory, attributes)
except KeyError:
raise InvalidInputReference('Could not get attributes "{}" from input directory "{}", needed in input'
'reference:\n{}'.format(attributes, input_identifier, input_reference)) | [
"def",
"_resolve_directory",
"(",
"attributes",
",",
"input_directory",
",",
"input_identifier",
",",
"input_reference",
")",
":",
"if",
"input_directory",
"[",
"'isArray'",
"]",
":",
"raise",
"InvalidInputReference",
"(",
"'Input References to Arrays of input directories are currently not supported.\\n'",
"'input directory \"{}\" is an array of directories and can not be resolved for input'",
"'references:\\n{}'",
".",
"format",
"(",
"input_identifier",
",",
"input_reference",
")",
")",
"single_directory",
"=",
"input_directory",
"[",
"'directories'",
"]",
"[",
"0",
"]",
"try",
":",
"return",
"_get_dict_element",
"(",
"single_directory",
",",
"attributes",
")",
"except",
"KeyError",
":",
"raise",
"InvalidInputReference",
"(",
"'Could not get attributes \"{}\" from input directory \"{}\", needed in input'",
"'reference:\\n{}'",
".",
"format",
"(",
"attributes",
",",
"input_identifier",
",",
"input_reference",
")",
")"
] | Returns the attributes in demand of the input directory.
:param attributes: A list of attributes to get from the input directory.
:param input_directory: The directory from which to get the attributes.
:param input_identifier: The input identifier of the given directory.
:param input_reference: The reference string
:return: The attribute in demand | [
"Returns",
"the",
"attributes",
"in",
"demand",
"of",
"the",
"input",
"directory",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/input_references.py#L173-L193 |
curious-containers/cc-core | cc_core/commons/input_references.py | resolve_input_reference | def resolve_input_reference(reference, inputs_to_reference):
"""
Replaces a given input_reference by a string extracted from inputs_to_reference.
:param reference: The input reference to resolve.
:param inputs_to_reference: A dictionary containing information about the given inputs.
:raise InvalidInputReference: If the given input reference could not be resolved.
:return: A string which is the resolved input reference.
"""
if not reference.startswith('{}inputs.'.format(INPUT_REFERENCE_START)):
raise InvalidInputReference('An input reference must have the following form'
'"$(inputs.<input_name>[.<attribute>]".\n'
'The invalid reference is: "{}"'.format(reference))
# remove "$(inputs." and ")"
reference = reference[2:-1]
parts = split_all(reference, ATTRIBUTE_SEPARATOR_SYMBOLS)
if len(parts) < 2:
raise InvalidInputReference('InputReference should at least contain "$(inputs.identifier)". The following input'
'reference does not comply with it:\n{}'.format(reference))
elif parts[0] != "inputs":
raise InvalidInputReference('InputReference should at least contain "$(inputs.identifier)". The following input'
' reference does not comply with it:\n$({})'.format(reference))
else:
input_identifier = parts[1]
input_to_reference = inputs_to_reference.get(input_identifier)
if input_to_reference is None:
raise InvalidInputReference('Input identifier "{}" not found in inputs, but needed in input reference:\n{}'
.format(input_identifier, reference))
elif isinstance(input_to_reference, dict):
if 'files' in input_to_reference:
return _resolve_file(parts[2:], input_to_reference, input_identifier, reference)
elif 'directories' in input_to_reference:
return _resolve_directory(parts[2:], input_to_reference, input_identifier, reference)
else:
raise InvalidInputReference('Unknown input type for input identifier "{}"'.format(input_identifier))
else:
if len(parts) > 2:
raise InvalidInputReference('Attribute "{}" of input reference "{}" could not be resolved'
.format(parts[2], reference))
else:
return parts[1] | python | def resolve_input_reference(reference, inputs_to_reference):
"""
Replaces a given input_reference by a string extracted from inputs_to_reference.
:param reference: The input reference to resolve.
:param inputs_to_reference: A dictionary containing information about the given inputs.
:raise InvalidInputReference: If the given input reference could not be resolved.
:return: A string which is the resolved input reference.
"""
if not reference.startswith('{}inputs.'.format(INPUT_REFERENCE_START)):
raise InvalidInputReference('An input reference must have the following form'
'"$(inputs.<input_name>[.<attribute>]".\n'
'The invalid reference is: "{}"'.format(reference))
# remove "$(inputs." and ")"
reference = reference[2:-1]
parts = split_all(reference, ATTRIBUTE_SEPARATOR_SYMBOLS)
if len(parts) < 2:
raise InvalidInputReference('InputReference should at least contain "$(inputs.identifier)". The following input'
'reference does not comply with it:\n{}'.format(reference))
elif parts[0] != "inputs":
raise InvalidInputReference('InputReference should at least contain "$(inputs.identifier)". The following input'
' reference does not comply with it:\n$({})'.format(reference))
else:
input_identifier = parts[1]
input_to_reference = inputs_to_reference.get(input_identifier)
if input_to_reference is None:
raise InvalidInputReference('Input identifier "{}" not found in inputs, but needed in input reference:\n{}'
.format(input_identifier, reference))
elif isinstance(input_to_reference, dict):
if 'files' in input_to_reference:
return _resolve_file(parts[2:], input_to_reference, input_identifier, reference)
elif 'directories' in input_to_reference:
return _resolve_directory(parts[2:], input_to_reference, input_identifier, reference)
else:
raise InvalidInputReference('Unknown input type for input identifier "{}"'.format(input_identifier))
else:
if len(parts) > 2:
raise InvalidInputReference('Attribute "{}" of input reference "{}" could not be resolved'
.format(parts[2], reference))
else:
return parts[1] | [
"def",
"resolve_input_reference",
"(",
"reference",
",",
"inputs_to_reference",
")",
":",
"if",
"not",
"reference",
".",
"startswith",
"(",
"'{}inputs.'",
".",
"format",
"(",
"INPUT_REFERENCE_START",
")",
")",
":",
"raise",
"InvalidInputReference",
"(",
"'An input reference must have the following form'",
"'\"$(inputs.<input_name>[.<attribute>]\".\\n'",
"'The invalid reference is: \"{}\"'",
".",
"format",
"(",
"reference",
")",
")",
"# remove \"$(inputs.\" and \")\"",
"reference",
"=",
"reference",
"[",
"2",
":",
"-",
"1",
"]",
"parts",
"=",
"split_all",
"(",
"reference",
",",
"ATTRIBUTE_SEPARATOR_SYMBOLS",
")",
"if",
"len",
"(",
"parts",
")",
"<",
"2",
":",
"raise",
"InvalidInputReference",
"(",
"'InputReference should at least contain \"$(inputs.identifier)\". The following input'",
"'reference does not comply with it:\\n{}'",
".",
"format",
"(",
"reference",
")",
")",
"elif",
"parts",
"[",
"0",
"]",
"!=",
"\"inputs\"",
":",
"raise",
"InvalidInputReference",
"(",
"'InputReference should at least contain \"$(inputs.identifier)\". The following input'",
"' reference does not comply with it:\\n$({})'",
".",
"format",
"(",
"reference",
")",
")",
"else",
":",
"input_identifier",
"=",
"parts",
"[",
"1",
"]",
"input_to_reference",
"=",
"inputs_to_reference",
".",
"get",
"(",
"input_identifier",
")",
"if",
"input_to_reference",
"is",
"None",
":",
"raise",
"InvalidInputReference",
"(",
"'Input identifier \"{}\" not found in inputs, but needed in input reference:\\n{}'",
".",
"format",
"(",
"input_identifier",
",",
"reference",
")",
")",
"elif",
"isinstance",
"(",
"input_to_reference",
",",
"dict",
")",
":",
"if",
"'files'",
"in",
"input_to_reference",
":",
"return",
"_resolve_file",
"(",
"parts",
"[",
"2",
":",
"]",
",",
"input_to_reference",
",",
"input_identifier",
",",
"reference",
")",
"elif",
"'directories'",
"in",
"input_to_reference",
":",
"return",
"_resolve_directory",
"(",
"parts",
"[",
"2",
":",
"]",
",",
"input_to_reference",
",",
"input_identifier",
",",
"reference",
")",
"else",
":",
"raise",
"InvalidInputReference",
"(",
"'Unknown input type for input identifier \"{}\"'",
".",
"format",
"(",
"input_identifier",
")",
")",
"else",
":",
"if",
"len",
"(",
"parts",
")",
">",
"2",
":",
"raise",
"InvalidInputReference",
"(",
"'Attribute \"{}\" of input reference \"{}\" could not be resolved'",
".",
"format",
"(",
"parts",
"[",
"2",
"]",
",",
"reference",
")",
")",
"else",
":",
"return",
"parts",
"[",
"1",
"]"
] | Replaces a given input_reference by a string extracted from inputs_to_reference.
:param reference: The input reference to resolve.
:param inputs_to_reference: A dictionary containing information about the given inputs.
:raise InvalidInputReference: If the given input reference could not be resolved.
:return: A string which is the resolved input reference. | [
"Replaces",
"a",
"given",
"input_reference",
"by",
"a",
"string",
"extracted",
"from",
"inputs_to_reference",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/input_references.py#L196-L239 |
curious-containers/cc-core | cc_core/commons/input_references.py | resolve_input_references | def resolve_input_references(to_resolve, inputs_to_reference):
"""
Resolves input references given in the string to_resolve by using the inputs_to_reference.
See http://www.commonwl.org/user_guide/06-params/index.html for more information.
Example:
"$(inputs.my_file.nameroot).md" -> "filename.md"
:param to_resolve: The path to match
:param inputs_to_reference: Inputs which are used to resolve input references like $(inputs.my_input_file.basename).
:return: A string in which the input references are replaced with actual values.
"""
splitted = split_input_references(to_resolve)
result = []
for part in splitted:
if is_input_reference(part):
result.append(str(resolve_input_reference(part, inputs_to_reference)))
else:
result.append(part)
return ''.join(result) | python | def resolve_input_references(to_resolve, inputs_to_reference):
"""
Resolves input references given in the string to_resolve by using the inputs_to_reference.
See http://www.commonwl.org/user_guide/06-params/index.html for more information.
Example:
"$(inputs.my_file.nameroot).md" -> "filename.md"
:param to_resolve: The path to match
:param inputs_to_reference: Inputs which are used to resolve input references like $(inputs.my_input_file.basename).
:return: A string in which the input references are replaced with actual values.
"""
splitted = split_input_references(to_resolve)
result = []
for part in splitted:
if is_input_reference(part):
result.append(str(resolve_input_reference(part, inputs_to_reference)))
else:
result.append(part)
return ''.join(result) | [
"def",
"resolve_input_references",
"(",
"to_resolve",
",",
"inputs_to_reference",
")",
":",
"splitted",
"=",
"split_input_references",
"(",
"to_resolve",
")",
"result",
"=",
"[",
"]",
"for",
"part",
"in",
"splitted",
":",
"if",
"is_input_reference",
"(",
"part",
")",
":",
"result",
".",
"append",
"(",
"str",
"(",
"resolve_input_reference",
"(",
"part",
",",
"inputs_to_reference",
")",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"part",
")",
"return",
"''",
".",
"join",
"(",
"result",
")"
] | Resolves input references given in the string to_resolve by using the inputs_to_reference.
See http://www.commonwl.org/user_guide/06-params/index.html for more information.
Example:
"$(inputs.my_file.nameroot).md" -> "filename.md"
:param to_resolve: The path to match
:param inputs_to_reference: Inputs which are used to resolve input references like $(inputs.my_input_file.basename).
:return: A string in which the input references are replaced with actual values. | [
"Resolves",
"input",
"references",
"given",
"in",
"the",
"string",
"to_resolve",
"by",
"using",
"the",
"inputs_to_reference",
"."
] | train | https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/input_references.py#L242-L267 |
olivier-m/rafter | rafter/contrib/schematics/exceptions.py | ValidationErrors.data | def data(self):
"""
Returns a dictionnary containing all the passed data and an item
``error_list`` which holds the result of :attr:`error_list`.
"""
res = {'error_list': self.error_list}
res.update(super(ValidationErrors, self).data)
return res | python | def data(self):
"""
Returns a dictionnary containing all the passed data and an item
``error_list`` which holds the result of :attr:`error_list`.
"""
res = {'error_list': self.error_list}
res.update(super(ValidationErrors, self).data)
return res | [
"def",
"data",
"(",
"self",
")",
":",
"res",
"=",
"{",
"'error_list'",
":",
"self",
".",
"error_list",
"}",
"res",
".",
"update",
"(",
"super",
"(",
"ValidationErrors",
",",
"self",
")",
".",
"data",
")",
"return",
"res"
] | Returns a dictionnary containing all the passed data and an item
``error_list`` which holds the result of :attr:`error_list`. | [
"Returns",
"a",
"dictionnary",
"containing",
"all",
"the",
"passed",
"data",
"and",
"an",
"item",
"error_list",
"which",
"holds",
"the",
"result",
"of",
":",
"attr",
":",
"error_list",
"."
] | train | https://github.com/olivier-m/rafter/blob/aafcf8fd019f24abcf519307c4484cc6b4697c04/rafter/contrib/schematics/exceptions.py#L24-L31 |
MSchnei/pyprf_feature | pyprf_feature/simulation/pRF_sim_compTngCrvs.py | circDiff | def circDiff(length, ary1, ary2):
"""calculate the circular difference between two paired arrays.
This function will return the difference between pairs of numbers; however
the difference that is output will be minimal in the sense that if we
assume an array with length = 4: [0, 1, 2, 3], the difference between
0 and 3 will not be 3, but 1 (i.e. circular difference)"""
x = np.arange(length)
mod = length % 2
if mod == 0:
temp = np.ones(length)
temp[length/2:] = -1
else:
x = x - np.floor(length/2)
temp = np.copy(x)
temp[np.less(x, 0)] = 1
temp[np.greater(x, 0)] = -1
x = np.cumsum(temp)
diagDiffmat = np.empty((length, length))
for idx in np.arange(length):
x = np.roll(x, 1)
diagDiffmat[idx, :] = x
# return diagDiffmat[ary1][ary2]
flat = diagDiffmat.flatten()
ind = ary1*diagDiffmat.shape[0] + ary2
ind = ind.astype('int')
return flat[ind] | python | def circDiff(length, ary1, ary2):
"""calculate the circular difference between two paired arrays.
This function will return the difference between pairs of numbers; however
the difference that is output will be minimal in the sense that if we
assume an array with length = 4: [0, 1, 2, 3], the difference between
0 and 3 will not be 3, but 1 (i.e. circular difference)"""
x = np.arange(length)
mod = length % 2
if mod == 0:
temp = np.ones(length)
temp[length/2:] = -1
else:
x = x - np.floor(length/2)
temp = np.copy(x)
temp[np.less(x, 0)] = 1
temp[np.greater(x, 0)] = -1
x = np.cumsum(temp)
diagDiffmat = np.empty((length, length))
for idx in np.arange(length):
x = np.roll(x, 1)
diagDiffmat[idx, :] = x
# return diagDiffmat[ary1][ary2]
flat = diagDiffmat.flatten()
ind = ary1*diagDiffmat.shape[0] + ary2
ind = ind.astype('int')
return flat[ind] | [
"def",
"circDiff",
"(",
"length",
",",
"ary1",
",",
"ary2",
")",
":",
"x",
"=",
"np",
".",
"arange",
"(",
"length",
")",
"mod",
"=",
"length",
"%",
"2",
"if",
"mod",
"==",
"0",
":",
"temp",
"=",
"np",
".",
"ones",
"(",
"length",
")",
"temp",
"[",
"length",
"/",
"2",
":",
"]",
"=",
"-",
"1",
"else",
":",
"x",
"=",
"x",
"-",
"np",
".",
"floor",
"(",
"length",
"/",
"2",
")",
"temp",
"=",
"np",
".",
"copy",
"(",
"x",
")",
"temp",
"[",
"np",
".",
"less",
"(",
"x",
",",
"0",
")",
"]",
"=",
"1",
"temp",
"[",
"np",
".",
"greater",
"(",
"x",
",",
"0",
")",
"]",
"=",
"-",
"1",
"x",
"=",
"np",
".",
"cumsum",
"(",
"temp",
")",
"diagDiffmat",
"=",
"np",
".",
"empty",
"(",
"(",
"length",
",",
"length",
")",
")",
"for",
"idx",
"in",
"np",
".",
"arange",
"(",
"length",
")",
":",
"x",
"=",
"np",
".",
"roll",
"(",
"x",
",",
"1",
")",
"diagDiffmat",
"[",
"idx",
",",
":",
"]",
"=",
"x",
"# return diagDiffmat[ary1][ary2]",
"flat",
"=",
"diagDiffmat",
".",
"flatten",
"(",
")",
"ind",
"=",
"ary1",
"*",
"diagDiffmat",
".",
"shape",
"[",
"0",
"]",
"+",
"ary2",
"ind",
"=",
"ind",
".",
"astype",
"(",
"'int'",
")",
"return",
"flat",
"[",
"ind",
"]"
] | calculate the circular difference between two paired arrays.
This function will return the difference between pairs of numbers; however
the difference that is output will be minimal in the sense that if we
assume an array with length = 4: [0, 1, 2, 3], the difference between
0 and 3 will not be 3, but 1 (i.e. circular difference) | [
"calculate",
"the",
"circular",
"difference",
"between",
"two",
"paired",
"arrays",
".",
"This",
"function",
"will",
"return",
"the",
"difference",
"between",
"pairs",
"of",
"numbers",
";",
"however",
"the",
"difference",
"that",
"is",
"output",
"will",
"be",
"minimal",
"in",
"the",
"sense",
"that",
"if",
"we",
"assume",
"an",
"array",
"with",
"length",
"=",
"4",
":",
"[",
"0",
"1",
"2",
"3",
"]",
"the",
"difference",
"between",
"0",
"and",
"3",
"will",
"not",
"be",
"3",
"but",
"1",
"(",
"i",
".",
"e",
".",
"circular",
"difference",
")"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/simulation/pRF_sim_compTngCrvs.py#L54-L80 |
linkhub-sdk/popbill.py | popbill/base.py | PopbillBase.getPartnerURL | def getPartnerURL(self, CorpNum, TOGO):
""" 팝빌 회원 잔여포인트 확인
args
CorpNum : 팝빌회원 사업자번호
TOGO : "CHRG"
return
URL
raise
PopbillException
"""
try:
return linkhub.getPartnerURL(self._getToken(CorpNum), TOGO)
except LinkhubException as LE:
raise PopbillException(LE.code, LE.message) | python | def getPartnerURL(self, CorpNum, TOGO):
""" 팝빌 회원 잔여포인트 확인
args
CorpNum : 팝빌회원 사업자번호
TOGO : "CHRG"
return
URL
raise
PopbillException
"""
try:
return linkhub.getPartnerURL(self._getToken(CorpNum), TOGO)
except LinkhubException as LE:
raise PopbillException(LE.code, LE.message) | [
"def",
"getPartnerURL",
"(",
"self",
",",
"CorpNum",
",",
"TOGO",
")",
":",
"try",
":",
"return",
"linkhub",
".",
"getPartnerURL",
"(",
"self",
".",
"_getToken",
"(",
"CorpNum",
")",
",",
"TOGO",
")",
"except",
"LinkhubException",
"as",
"LE",
":",
"raise",
"PopbillException",
"(",
"LE",
".",
"code",
",",
"LE",
".",
"message",
")"
] | 팝빌 회원 잔여포인트 확인
args
CorpNum : 팝빌회원 사업자번호
TOGO : "CHRG"
return
URL
raise
PopbillException | [
"팝빌",
"회원",
"잔여포인트",
"확인",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"TOGO",
":",
"CHRG",
"return",
"URL",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/base.py#L80-L93 |
linkhub-sdk/popbill.py | popbill/base.py | PopbillBase.getBalance | def getBalance(self, CorpNum):
""" 팝빌 회원 잔여포인트 확인
args
CorpNum : 확인하고자 하는 회원 사업자번호
return
잔여포인트 by float
raise
PopbillException
"""
try:
return linkhub.getBalance(self._getToken(CorpNum))
except LinkhubException as LE:
raise PopbillException(LE.code, LE.message) | python | def getBalance(self, CorpNum):
""" 팝빌 회원 잔여포인트 확인
args
CorpNum : 확인하고자 하는 회원 사업자번호
return
잔여포인트 by float
raise
PopbillException
"""
try:
return linkhub.getBalance(self._getToken(CorpNum))
except LinkhubException as LE:
raise PopbillException(LE.code, LE.message) | [
"def",
"getBalance",
"(",
"self",
",",
"CorpNum",
")",
":",
"try",
":",
"return",
"linkhub",
".",
"getBalance",
"(",
"self",
".",
"_getToken",
"(",
"CorpNum",
")",
")",
"except",
"LinkhubException",
"as",
"LE",
":",
"raise",
"PopbillException",
"(",
"LE",
".",
"code",
",",
"LE",
".",
"message",
")"
] | 팝빌 회원 잔여포인트 확인
args
CorpNum : 확인하고자 하는 회원 사업자번호
return
잔여포인트 by float
raise
PopbillException | [
"팝빌",
"회원",
"잔여포인트",
"확인",
"args",
"CorpNum",
":",
"확인하고자",
"하는",
"회원",
"사업자번호",
"return",
"잔여포인트",
"by",
"float",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/base.py#L95-L107 |
linkhub-sdk/popbill.py | popbill/base.py | PopbillBase.getAccessURL | def getAccessURL(self, CorpNum, UserID):
""" 팝빌 로그인 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/?TG=LOGIN', CorpNum, UserID)
return result.url | python | def getAccessURL(self, CorpNum, UserID):
""" 팝빌 로그인 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/?TG=LOGIN', CorpNum, UserID)
return result.url | [
"def",
"getAccessURL",
"(",
"self",
",",
"CorpNum",
",",
"UserID",
")",
":",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/?TG=LOGIN'",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"url"
] | 팝빌 로그인 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException | [
"팝빌",
"로그인",
"URL",
"args",
"CorpNum",
":",
"회원",
"사업자번호",
"UserID",
":",
"회원",
"팝빌아이디",
"return",
"30초",
"보안",
"토큰을",
"포함한",
"url",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/base.py#L140-L151 |
linkhub-sdk/popbill.py | popbill/base.py | PopbillBase.getChargeURL | def getChargeURL(self, CorpNum, UserID):
""" 팝빌 연동회원 포인트 충전 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/?TG=CHRG', CorpNum, UserID)
return result.url | python | def getChargeURL(self, CorpNum, UserID):
""" 팝빌 연동회원 포인트 충전 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/?TG=CHRG', CorpNum, UserID)
return result.url | [
"def",
"getChargeURL",
"(",
"self",
",",
"CorpNum",
",",
"UserID",
")",
":",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/?TG=CHRG'",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"url"
] | 팝빌 연동회원 포인트 충전 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException | [
"팝빌",
"연동회원",
"포인트",
"충전",
"URL",
"args",
"CorpNum",
":",
"회원",
"사업자번호",
"UserID",
":",
"회원",
"팝빌아이디",
"return",
"30초",
"보안",
"토큰을",
"포함한",
"url",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/base.py#L153-L164 |
linkhub-sdk/popbill.py | popbill/base.py | PopbillBase.checkIsMember | def checkIsMember(self, CorpNum):
""" 회원가입여부 확인
args
CorpNum : 회원 사업자번호
return
회원가입여부 True/False
raise
PopbillException
"""
if CorpNum == None or CorpNum == '':
raise PopbillException(-99999999, "사업자번호가 입력되지 않았습니다.")
return self._httpget('/Join?CorpNum=' + CorpNum + '&LID=' + self.__linkID, None, None) | python | def checkIsMember(self, CorpNum):
""" 회원가입여부 확인
args
CorpNum : 회원 사업자번호
return
회원가입여부 True/False
raise
PopbillException
"""
if CorpNum == None or CorpNum == '':
raise PopbillException(-99999999, "사업자번호가 입력되지 않았습니다.")
return self._httpget('/Join?CorpNum=' + CorpNum + '&LID=' + self.__linkID, None, None) | [
"def",
"checkIsMember",
"(",
"self",
",",
"CorpNum",
")",
":",
"if",
"CorpNum",
"==",
"None",
"or",
"CorpNum",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"사업자번호가 입력되지 않았습니다.\")",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Join?CorpNum='",
"+",
"CorpNum",
"+",
"'&LID='",
"+",
"self",
".",
"__linkID",
",",
"None",
",",
"None",
")"
] | 회원가입여부 확인
args
CorpNum : 회원 사업자번호
return
회원가입여부 True/False
raise
PopbillException | [
"회원가입여부",
"확인",
"args",
"CorpNum",
":",
"회원",
"사업자번호",
"return",
"회원가입여부",
"True",
"/",
"False",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/base.py#L166-L178 |
linkhub-sdk/popbill.py | popbill/base.py | PopbillBase.joinMember | def joinMember(self, JoinInfo):
""" 팝빌 회원가입
args
JoinInfo : 회원가입정보. Reference JoinForm class
return
처리결과. consist of code and message
raise
PopbillException
"""
JoinInfo.LinkID = self.__linkID
postData = self._stringtify(JoinInfo)
return self._httppost('/Join', postData) | python | def joinMember(self, JoinInfo):
""" 팝빌 회원가입
args
JoinInfo : 회원가입정보. Reference JoinForm class
return
처리결과. consist of code and message
raise
PopbillException
"""
JoinInfo.LinkID = self.__linkID
postData = self._stringtify(JoinInfo)
return self._httppost('/Join', postData) | [
"def",
"joinMember",
"(",
"self",
",",
"JoinInfo",
")",
":",
"JoinInfo",
".",
"LinkID",
"=",
"self",
".",
"__linkID",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"JoinInfo",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Join'",
",",
"postData",
")"
] | 팝빌 회원가입
args
JoinInfo : 회원가입정보. Reference JoinForm class
return
처리결과. consist of code and message
raise
PopbillException | [
"팝빌",
"회원가입",
"args",
"JoinInfo",
":",
"회원가입정보",
".",
"Reference",
"JoinForm",
"class",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/base.py#L180-L191 |
linkhub-sdk/popbill.py | popbill/base.py | PopbillBase.updateContact | def updateContact(self, CorpNum, ContactInfo, UserID=None):
""" 담당자 정보 수정
args
CorpNum : 회원 사업자번호
ContactInfo : 담당자 정보, Reference ContactInfo class
UserID : 회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
postData = self._stringtify(ContactInfo)
return self._httppost('/IDs', postData, CorpNum, UserID) | python | def updateContact(self, CorpNum, ContactInfo, UserID=None):
""" 담당자 정보 수정
args
CorpNum : 회원 사업자번호
ContactInfo : 담당자 정보, Reference ContactInfo class
UserID : 회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
postData = self._stringtify(ContactInfo)
return self._httppost('/IDs', postData, CorpNum, UserID) | [
"def",
"updateContact",
"(",
"self",
",",
"CorpNum",
",",
"ContactInfo",
",",
"UserID",
"=",
"None",
")",
":",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"ContactInfo",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/IDs'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")"
] | 담당자 정보 수정
args
CorpNum : 회원 사업자번호
ContactInfo : 담당자 정보, Reference ContactInfo class
UserID : 회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"담당자",
"정보",
"수정",
"args",
"CorpNum",
":",
"회원",
"사업자번호",
"ContactInfo",
":",
"담당자",
"정보",
"Reference",
"ContactInfo",
"class",
"UserID",
":",
"회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/base.py#L219-L231 |
linkhub-sdk/popbill.py | popbill/base.py | PopbillBase.updateCorpInfo | def updateCorpInfo(self, CorpNum, CorpInfo, UserID=None):
""" 담당자 정보 수정
args
CorpNum : 회원 사업자번호
CorpInfo : 회사 정보, Reference CorpInfo class
UserID : 회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
postData = self._stringtify(CorpInfo)
return self._httppost('/CorpInfo', postData, CorpNum, UserID) | python | def updateCorpInfo(self, CorpNum, CorpInfo, UserID=None):
""" 담당자 정보 수정
args
CorpNum : 회원 사업자번호
CorpInfo : 회사 정보, Reference CorpInfo class
UserID : 회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
postData = self._stringtify(CorpInfo)
return self._httppost('/CorpInfo', postData, CorpNum, UserID) | [
"def",
"updateCorpInfo",
"(",
"self",
",",
"CorpNum",
",",
"CorpInfo",
",",
"UserID",
"=",
"None",
")",
":",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"CorpInfo",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/CorpInfo'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")"
] | 담당자 정보 수정
args
CorpNum : 회원 사업자번호
CorpInfo : 회사 정보, Reference CorpInfo class
UserID : 회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"담당자",
"정보",
"수정",
"args",
"CorpNum",
":",
"회원",
"사업자번호",
"CorpInfo",
":",
"회사",
"정보",
"Reference",
"CorpInfo",
"class",
"UserID",
":",
"회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/base.py#L245-L257 |
linkhub-sdk/popbill.py | popbill/base.py | PopbillBase.registContact | def registContact(self, CorpNum, ContactInfo, UserID=None):
""" 담당자 추가
args
CorpNum : 회원 사업자번호
ContactInfo : 담당자 정보, Reference ContactInfo class
UserID : 회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
postData = self._stringtify(ContactInfo)
return self._httppost('/IDs/New', postData, CorpNum, UserID) | python | def registContact(self, CorpNum, ContactInfo, UserID=None):
""" 담당자 추가
args
CorpNum : 회원 사업자번호
ContactInfo : 담당자 정보, Reference ContactInfo class
UserID : 회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
postData = self._stringtify(ContactInfo)
return self._httppost('/IDs/New', postData, CorpNum, UserID) | [
"def",
"registContact",
"(",
"self",
",",
"CorpNum",
",",
"ContactInfo",
",",
"UserID",
"=",
"None",
")",
":",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"ContactInfo",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/IDs/New'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")"
] | 담당자 추가
args
CorpNum : 회원 사업자번호
ContactInfo : 담당자 정보, Reference ContactInfo class
UserID : 회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"담당자",
"추가",
"args",
"CorpNum",
":",
"회원",
"사업자번호",
"ContactInfo",
":",
"담당자",
"정보",
"Reference",
"ContactInfo",
"class",
"UserID",
":",
"회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/base.py#L259-L271 |
mpasternak/django-flexible-reports | flexible_reports/models/validators.py | TemplateValidator | def TemplateValidator(value):
"""Try to compile a string into a Django template"""
try:
Template(value)
except Exception as e:
raise ValidationError(
_("Cannot compile template (%(exception)s)"),
params={"exception": e}
) | python | def TemplateValidator(value):
"""Try to compile a string into a Django template"""
try:
Template(value)
except Exception as e:
raise ValidationError(
_("Cannot compile template (%(exception)s)"),
params={"exception": e}
) | [
"def",
"TemplateValidator",
"(",
"value",
")",
":",
"try",
":",
"Template",
"(",
"value",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValidationError",
"(",
"_",
"(",
"\"Cannot compile template (%(exception)s)\"",
")",
",",
"params",
"=",
"{",
"\"exception\"",
":",
"e",
"}",
")"
] | Try to compile a string into a Django template | [
"Try",
"to",
"compile",
"a",
"string",
"into",
"a",
"Django",
"template"
] | train | https://github.com/mpasternak/django-flexible-reports/blob/7a9a657bc3778c150357f1efe7208077b1a1490b/flexible_reports/models/validators.py#L7-L16 |
pmacosta/pcsv | pcsv/merge.py | merge | def merge(
fname1,
fname2,
dfilter1=None,
dfilter2=None,
has_header1=True,
has_header2=True,
frow1=0,
frow2=0,
ofname=None,
ocols=None,
):
r"""
Merge two comma-separated values files.
Data columns from the second file are appended after data columns from the
first file. Empty values in columns are used if the files have different
number of rows
:param fname1: Name of the first comma-separated values file, the file
whose columns appear first in the output file
:type fname1: FileNameExists_
:param fname2: Name of the second comma-separated values file, the file
whose columns appear last in the output file
:type fname2: FileNameExists_
:param dfilter1: Row and/or column filter for the first file. If None no
data filtering is done on the file
:type dfilter1: :ref:`CsvDataFilter` or None
:param dfilter2: Row and/or column filter for the second file. If None no
data filtering is done on the file
:type dfilter2: :ref:`CsvDataFilter` or None
:param has_header1: Flag that indicates whether the first comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header1: boolean
:param has_header2: Flag that indicates whether the second comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header2: boolean
:param frow1: First comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow1: NonNegativeInteger_
:param frow2: Second comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow2: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the data from the first and second files.
If None the first file is replaced "in place"
:type ofname: FileName_ or None
:param ocols: Column names of the output comma-separated values file.
If None the column names in the first and second files are
used if **has_header1** and/or **has_header2** are True. The
column labels :code:`'Column [column_number]'` are used when
one of the two files does not have a header, where
:code:`[column_number]` is an integer representing the column
number (column 0 is the leftmost column). No header is used
if **has_header1** and **has_header2** are False
:type ocols: list or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.merge.merge
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`dfilter1\` is not valid)
* RuntimeError (Argument \`dfilter2\` is not valid)
* RuntimeError (Argument \`fname1\` is not valid)
* RuntimeError (Argument \`fname2\` is not valid)
* RuntimeError (Argument \`frow1\` is not valid)
* RuntimeError (Argument \`frow2\` is not valid)
* RuntimeError (Argument \`ocols\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (Combined columns in data files and output columns are
different)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
# pylint: disable=R0913,R0914
iomm_ex = pexdoc.exh.addex(
RuntimeError, "Combined columns in data files and output columns are different"
)
# Read and validate file 1
obj1 = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1)
# Read and validate file 2
obj2 = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2)
# Assign output data structure
ofname = fname1 if ofname is None else ofname
cfilter1 = obj1.header() if obj1.cfilter is None else obj1.cfilter
cfilter2 = obj2.header() if obj1.cfilter is None else obj2.cfilter
# Create new header
cols1 = len(cfilter1)
cols2 = len(cfilter2)
if (ocols is None) and has_header1 and has_header2:
ocols = [cfilter1 + cfilter2]
elif (ocols is None) and has_header1 and (not has_header2):
ocols = [
cfilter1
+ [
"Column {0}".format(item)
for item in range(cols1 + 1, cols1 + cols2 + 1)
]
]
elif (ocols is None) and (not has_header1) and has_header2:
ocols = [["Column {0}".format(item) for item in range(1, cols1 + 1)] + cfilter2]
elif ocols is None:
ocols = []
else:
iomm_ex(cols1 + cols2 != len(ocols))
ocols = [ocols]
# Even out rows
delta = obj1.rows(filtered=True) - obj2.rows(filtered=True)
data1 = obj1.data(filtered=True)
data2 = obj2.data(filtered=True)
if delta > 0:
row = [cols2 * [None]]
data2 += delta * row
elif delta < 0:
row = [cols1 * [None]]
data1 += abs(delta) * row
data = ocols
for item1, item2 in zip(data1, data2):
data.append(item1 + item2)
write(fname=ofname, data=data, append=False) | python | def merge(
fname1,
fname2,
dfilter1=None,
dfilter2=None,
has_header1=True,
has_header2=True,
frow1=0,
frow2=0,
ofname=None,
ocols=None,
):
r"""
Merge two comma-separated values files.
Data columns from the second file are appended after data columns from the
first file. Empty values in columns are used if the files have different
number of rows
:param fname1: Name of the first comma-separated values file, the file
whose columns appear first in the output file
:type fname1: FileNameExists_
:param fname2: Name of the second comma-separated values file, the file
whose columns appear last in the output file
:type fname2: FileNameExists_
:param dfilter1: Row and/or column filter for the first file. If None no
data filtering is done on the file
:type dfilter1: :ref:`CsvDataFilter` or None
:param dfilter2: Row and/or column filter for the second file. If None no
data filtering is done on the file
:type dfilter2: :ref:`CsvDataFilter` or None
:param has_header1: Flag that indicates whether the first comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header1: boolean
:param has_header2: Flag that indicates whether the second comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header2: boolean
:param frow1: First comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow1: NonNegativeInteger_
:param frow2: Second comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow2: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the data from the first and second files.
If None the first file is replaced "in place"
:type ofname: FileName_ or None
:param ocols: Column names of the output comma-separated values file.
If None the column names in the first and second files are
used if **has_header1** and/or **has_header2** are True. The
column labels :code:`'Column [column_number]'` are used when
one of the two files does not have a header, where
:code:`[column_number]` is an integer representing the column
number (column 0 is the leftmost column). No header is used
if **has_header1** and **has_header2** are False
:type ocols: list or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.merge.merge
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`dfilter1\` is not valid)
* RuntimeError (Argument \`dfilter2\` is not valid)
* RuntimeError (Argument \`fname1\` is not valid)
* RuntimeError (Argument \`fname2\` is not valid)
* RuntimeError (Argument \`frow1\` is not valid)
* RuntimeError (Argument \`frow2\` is not valid)
* RuntimeError (Argument \`ocols\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (Combined columns in data files and output columns are
different)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
# pylint: disable=R0913,R0914
iomm_ex = pexdoc.exh.addex(
RuntimeError, "Combined columns in data files and output columns are different"
)
# Read and validate file 1
obj1 = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1)
# Read and validate file 2
obj2 = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2)
# Assign output data structure
ofname = fname1 if ofname is None else ofname
cfilter1 = obj1.header() if obj1.cfilter is None else obj1.cfilter
cfilter2 = obj2.header() if obj1.cfilter is None else obj2.cfilter
# Create new header
cols1 = len(cfilter1)
cols2 = len(cfilter2)
if (ocols is None) and has_header1 and has_header2:
ocols = [cfilter1 + cfilter2]
elif (ocols is None) and has_header1 and (not has_header2):
ocols = [
cfilter1
+ [
"Column {0}".format(item)
for item in range(cols1 + 1, cols1 + cols2 + 1)
]
]
elif (ocols is None) and (not has_header1) and has_header2:
ocols = [["Column {0}".format(item) for item in range(1, cols1 + 1)] + cfilter2]
elif ocols is None:
ocols = []
else:
iomm_ex(cols1 + cols2 != len(ocols))
ocols = [ocols]
# Even out rows
delta = obj1.rows(filtered=True) - obj2.rows(filtered=True)
data1 = obj1.data(filtered=True)
data2 = obj2.data(filtered=True)
if delta > 0:
row = [cols2 * [None]]
data2 += delta * row
elif delta < 0:
row = [cols1 * [None]]
data1 += abs(delta) * row
data = ocols
for item1, item2 in zip(data1, data2):
data.append(item1 + item2)
write(fname=ofname, data=data, append=False) | [
"def",
"merge",
"(",
"fname1",
",",
"fname2",
",",
"dfilter1",
"=",
"None",
",",
"dfilter2",
"=",
"None",
",",
"has_header1",
"=",
"True",
",",
"has_header2",
"=",
"True",
",",
"frow1",
"=",
"0",
",",
"frow2",
"=",
"0",
",",
"ofname",
"=",
"None",
",",
"ocols",
"=",
"None",
",",
")",
":",
"# pylint: disable=R0913,R0914",
"iomm_ex",
"=",
"pexdoc",
".",
"exh",
".",
"addex",
"(",
"RuntimeError",
",",
"\"Combined columns in data files and output columns are different\"",
")",
"# Read and validate file 1",
"obj1",
"=",
"CsvFile",
"(",
"fname",
"=",
"fname1",
",",
"dfilter",
"=",
"dfilter1",
",",
"has_header",
"=",
"has_header1",
",",
"frow",
"=",
"frow1",
")",
"# Read and validate file 2",
"obj2",
"=",
"CsvFile",
"(",
"fname",
"=",
"fname2",
",",
"dfilter",
"=",
"dfilter2",
",",
"has_header",
"=",
"has_header2",
",",
"frow",
"=",
"frow2",
")",
"# Assign output data structure",
"ofname",
"=",
"fname1",
"if",
"ofname",
"is",
"None",
"else",
"ofname",
"cfilter1",
"=",
"obj1",
".",
"header",
"(",
")",
"if",
"obj1",
".",
"cfilter",
"is",
"None",
"else",
"obj1",
".",
"cfilter",
"cfilter2",
"=",
"obj2",
".",
"header",
"(",
")",
"if",
"obj1",
".",
"cfilter",
"is",
"None",
"else",
"obj2",
".",
"cfilter",
"# Create new header",
"cols1",
"=",
"len",
"(",
"cfilter1",
")",
"cols2",
"=",
"len",
"(",
"cfilter2",
")",
"if",
"(",
"ocols",
"is",
"None",
")",
"and",
"has_header1",
"and",
"has_header2",
":",
"ocols",
"=",
"[",
"cfilter1",
"+",
"cfilter2",
"]",
"elif",
"(",
"ocols",
"is",
"None",
")",
"and",
"has_header1",
"and",
"(",
"not",
"has_header2",
")",
":",
"ocols",
"=",
"[",
"cfilter1",
"+",
"[",
"\"Column {0}\"",
".",
"format",
"(",
"item",
")",
"for",
"item",
"in",
"range",
"(",
"cols1",
"+",
"1",
",",
"cols1",
"+",
"cols2",
"+",
"1",
")",
"]",
"]",
"elif",
"(",
"ocols",
"is",
"None",
")",
"and",
"(",
"not",
"has_header1",
")",
"and",
"has_header2",
":",
"ocols",
"=",
"[",
"[",
"\"Column {0}\"",
".",
"format",
"(",
"item",
")",
"for",
"item",
"in",
"range",
"(",
"1",
",",
"cols1",
"+",
"1",
")",
"]",
"+",
"cfilter2",
"]",
"elif",
"ocols",
"is",
"None",
":",
"ocols",
"=",
"[",
"]",
"else",
":",
"iomm_ex",
"(",
"cols1",
"+",
"cols2",
"!=",
"len",
"(",
"ocols",
")",
")",
"ocols",
"=",
"[",
"ocols",
"]",
"# Even out rows",
"delta",
"=",
"obj1",
".",
"rows",
"(",
"filtered",
"=",
"True",
")",
"-",
"obj2",
".",
"rows",
"(",
"filtered",
"=",
"True",
")",
"data1",
"=",
"obj1",
".",
"data",
"(",
"filtered",
"=",
"True",
")",
"data2",
"=",
"obj2",
".",
"data",
"(",
"filtered",
"=",
"True",
")",
"if",
"delta",
">",
"0",
":",
"row",
"=",
"[",
"cols2",
"*",
"[",
"None",
"]",
"]",
"data2",
"+=",
"delta",
"*",
"row",
"elif",
"delta",
"<",
"0",
":",
"row",
"=",
"[",
"cols1",
"*",
"[",
"None",
"]",
"]",
"data1",
"+=",
"abs",
"(",
"delta",
")",
"*",
"row",
"data",
"=",
"ocols",
"for",
"item1",
",",
"item2",
"in",
"zip",
"(",
"data1",
",",
"data2",
")",
":",
"data",
".",
"append",
"(",
"item1",
"+",
"item2",
")",
"write",
"(",
"fname",
"=",
"ofname",
",",
"data",
"=",
"data",
",",
"append",
"=",
"False",
")"
] | r"""
Merge two comma-separated values files.
Data columns from the second file are appended after data columns from the
first file. Empty values in columns are used if the files have different
number of rows
:param fname1: Name of the first comma-separated values file, the file
whose columns appear first in the output file
:type fname1: FileNameExists_
:param fname2: Name of the second comma-separated values file, the file
whose columns appear last in the output file
:type fname2: FileNameExists_
:param dfilter1: Row and/or column filter for the first file. If None no
data filtering is done on the file
:type dfilter1: :ref:`CsvDataFilter` or None
:param dfilter2: Row and/or column filter for the second file. If None no
data filtering is done on the file
:type dfilter2: :ref:`CsvDataFilter` or None
:param has_header1: Flag that indicates whether the first comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header1: boolean
:param has_header2: Flag that indicates whether the second comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header2: boolean
:param frow1: First comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow1: NonNegativeInteger_
:param frow2: Second comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow2: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the data from the first and second files.
If None the first file is replaced "in place"
:type ofname: FileName_ or None
:param ocols: Column names of the output comma-separated values file.
If None the column names in the first and second files are
used if **has_header1** and/or **has_header2** are True. The
column labels :code:`'Column [column_number]'` are used when
one of the two files does not have a header, where
:code:`[column_number]` is an integer representing the column
number (column 0 is the leftmost column). No header is used
if **has_header1** and **has_header2** are False
:type ocols: list or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.merge.merge
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`dfilter1\` is not valid)
* RuntimeError (Argument \`dfilter2\` is not valid)
* RuntimeError (Argument \`fname1\` is not valid)
* RuntimeError (Argument \`fname2\` is not valid)
* RuntimeError (Argument \`frow1\` is not valid)
* RuntimeError (Argument \`frow2\` is not valid)
* RuntimeError (Argument \`ocols\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (Combined columns in data files and output columns are
different)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]] | [
"r",
"Merge",
"two",
"comma",
"-",
"separated",
"values",
"files",
"."
] | train | https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/merge.py#L43-L197 |
MSchnei/pyprf_feature | pyprf_feature/analysis/pyprf_opt_brute.py | pyprf_opt_brute | def pyprf_opt_brute(strCsvCnfg, objNspc, lgcTest=False, strPathHrf=None,
varRat=None):
"""
Function for optimizing given pRF paramaters using brute-force grid search.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file.
objNspc : object
Name space from command line arguments.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
"""
# *************************************************************************
# *** Check time
print('---pRF analysis')
varTme01 = time.time()
# *************************************************************************
# *************************************************************************
# *** Preparations
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# Conditional imports:
if cfg.strVersion == 'gpu':
from pyprf_feature.analysis.find_prf_gpu import find_prf_gpu
if ((cfg.strVersion == 'cython') or (cfg.strVersion == 'numpy')):
from pyprf_feature.analysis.find_prf_cpu import find_prf_cpu
# Convert preprocessing parameters (for temporal smoothing)
# from SI units (i.e. [s]) into units of data array (volumes):
cfg.varSdSmthTmp = np.divide(cfg.varSdSmthTmp, cfg.varTr)
# *************************************************************************
# *************************************************************************
# *** Preprocessing
# The functional data will be masked and demeaned:
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100.)
# set the precision of the header to np.float32 so that the prf results
# will be saved in this precision later
hdrMsk.set_data_dtype(np.float32)
print('---Number of voxels included in analysis: ' +
str(np.sum(aryLgcVar)))
# *************************************************************************
# *** Checks
# Make sure that if gpu fitting is used, the number of cross-validations is
# set to 1, not higher
if cfg.strVersion == 'gpu':
strErrMsg = 'Stopping program. ' + \
'Cross-validation on GPU is currently not supported. ' + \
'Set varNumXval equal to 1 in csv file in order to continue. '
assert cfg.varNumXval == 1, strErrMsg
# For the GPU version, we need to set down the parallelisation to 1 now,
# because no separate CPU threads are to be created. We may still use CPU
# parallelisation for preprocessing, which is why the parallelisation
# factor is only reduced now, not earlier.
if cfg.strVersion == 'gpu':
cfg.varPar = 1
# check whether we need to crossvalidate
if np.greater(cfg.varNumXval, 1):
cfg.lgcXval = True
elif np.equal(cfg.varNumXval, 1):
cfg.lgcXval = False
strErrMsg = 'Stopping program. ' + \
'Set numXval (number of crossvalidation folds) to 1 or higher'
assert np.greater_equal(cfg.varNumXval, 1), strErrMsg
# derive number of feature for fitting
if varRat is not None:
# since there will be a beta parameter estimate both for the center and
# the surround, we multiply by 2
varNumFtr = int(2*cfg.switchHrfSet)
else:
varNumFtr = cfg.switchHrfSet
# *************************************************************************
# *************************************************************************
# Load previous pRF fitting results
print('---String to prior results provided by user:')
print(objNspc.strPthPrior)
# Load the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [objNspc.strPthPrior + '_x_pos.nii.gz',
objNspc.strPthPrior + '_y_pos.nii.gz',
objNspc.strPthPrior + '_SD.nii.gz',
objNspc.strPthPrior + '_eccentricity.nii.gz']
lstPrmInt, objHdr, aryAff = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])
# Convert list to array
assert len(lstPrmInt) == 1
aryIntGssPrm = lstPrmInt[0]
del(lstPrmInt)
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their nitial parameters, too
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
# *************************************************************************
# *************************************************************************
# *** Sort voxels by polar angle/previous parameters
# Calculate the polar angles that were found in independent localiser
aryPlrAng = np.arctan2(aryIntGssPrm[:, 1], aryIntGssPrm[:, 0])
# Calculate the unique polar angles that are expected from grid search
aryUnqPlrAng = np.linspace(0.0, 2*np.pi, objNspc.varNumOpt2,
endpoint=False)
# Expected polar angle values are range from 0 to 2*pi, while
# the calculated angle values will range from -pi to pi
# Thus, bring empirical values from range -pi, pi to range 0, 2pi
aryPlrAng = (aryPlrAng + 2 * np.pi) % (2 * np.pi)
# For every empirically found polar angle get the index of the nearest
# theoretically expected polar angle, this is to offset small imprecisions
aryUnqPlrAngInd, aryDstPlrAng = find_near_pol_ang(aryPlrAng, aryUnqPlrAng)
# Make sure that the maximum distance from a found polar angle to a grid
# point is smaller than the distance between two neighbor grid points
assert np.max(aryDstPlrAng) < np.divide(2*np.pi, objNspc.varNumOpt2)
# Update unique polar angles such that it contains only the ones which
# were found in data
aryUnqPlrAng = aryUnqPlrAng[np.unique(aryUnqPlrAngInd)]
# Update indices
aryUnqPlrAngInd, aryDstPlrAng = find_near_pol_ang(aryPlrAng, aryUnqPlrAng)
# Get logical arrays that index voxels with particular polar angle
lstLgcUnqPlrAng = []
for indPlrAng in range(len(aryUnqPlrAng)):
lstLgcUnqPlrAng.append([aryUnqPlrAngInd == indPlrAng][0])
print('---Number of radial position options provided by user: ' +
str(objNspc.varNumOpt1))
print('---Number of angular position options provided by user: ' +
str(objNspc.varNumOpt2))
print('---Number of unique polar angles found in prior estimates: ' +
str(len(aryUnqPlrAng)))
print('---Maximum displacement in radial direction that is allowed: ' +
str(objNspc.varNumOpt3))
print('---Fitted modelled are restricted to stimulated area: ' +
str(objNspc.lgcRstrCentre))
# *************************************************************************
# *** Perform prf fitting
# Create array for collecting winner parameters
aryBstXpos = np.zeros((aryPlrAng.shape[0]))
aryBstYpos = np.zeros((aryPlrAng.shape[0]))
aryBstSd = np.zeros((aryPlrAng.shape[0]))
aryBstR2 = np.zeros((aryPlrAng.shape[0]))
aryBstBts = np.zeros((aryPlrAng.shape[0], varNumFtr))
if np.greater(cfg.varNumXval, 1):
aryBstR2Single = np.zeros((aryPlrAng.shape[0],
len(cfg.lstPathNiiFunc)))
# loop over all found instances of polar angle/previous parameters
for indPlrAng in range(len(aryUnqPlrAng)):
print('------Polar angle number ' + str(indPlrAng+1) + ' out of ' +
str(len(aryUnqPlrAng)))
# get the polar angle for the current voxel batch
varPlrAng = np.array(aryUnqPlrAng[indPlrAng])
# get logical array to index voxels with this particular polar angle
lgcUnqPlrAng = lstLgcUnqPlrAng[indPlrAng]
# get prior eccentricities for current voxel batch
vecPrrEcc = aryIntGssPrm[lgcUnqPlrAng, 3]
print('---------Number of voxels of this polar angle: ' +
str(np.sum(lgcUnqPlrAng)))
# *********************************************************************
# *********************************************************************
# *** Create time course models for this particular polar angle
# Vector with the radial position:
vecRad = np.linspace(0.0, cfg.varExtXmax, objNspc.varNumOpt1,
endpoint=True)
# Get all possible combinations on the grid, using matrix indexing ij
# of output
aryRad, aryTht = np.meshgrid(vecRad, varPlrAng, indexing='ij')
# Flatten arrays to be able to combine them with meshgrid
vecRad = aryRad.flatten()
vecTht = aryTht.flatten()
# Convert from polar to cartesian
vecX, vecY = map_pol_to_crt(vecTht, vecRad)
# Vector with standard deviations pRF models (in degree of vis angle):
vecPrfSd = np.linspace(cfg.varPrfStdMin, cfg.varPrfStdMax,
cfg.varNumPrfSizes, endpoint=True)
# Create model parameters
varNumMdls = len(vecX) * len(vecPrfSd)
aryMdlParams = np.zeros((varNumMdls, 3), dtype=np.float32)
varCntMdlPrms = 0
# Loop through x-positions:
for idxXY in range(0, len(vecX)):
# Loop through standard deviations (of Gaussian pRF models):
for idxSd in range(0, len(vecPrfSd)):
# Place index and parameters in array:
aryMdlParams[varCntMdlPrms, 0] = vecX[idxXY]
aryMdlParams[varCntMdlPrms, 1] = vecY[idxXY]
aryMdlParams[varCntMdlPrms, 2] = vecPrfSd[idxSd]
# Increment parameter index:
varCntMdlPrms += 1
# Convert winner parameters from degrees of visual angle to pixel
vecIntX, vecIntY, vecIntSd = rmp_deg_pixel_xys(aryMdlParams[:, 0],
aryMdlParams[:, 1],
aryMdlParams[:, 2],
cfg.tplVslSpcSze,
cfg.varExtXmin,
cfg.varExtXmax,
cfg.varExtYmin,
cfg.varExtYmax)
aryMdlParamsPxl = np.column_stack((vecIntX, vecIntY, vecIntSd))
if objNspc.lgcRstrCentre:
# Calculate the areas that were stimulated during the experiment
arySptExpInf = np.load(cfg.strSptExpInf)
arySptExpInf = np.rot90(arySptExpInf, k=3)
aryStimArea = np.sum(arySptExpInf, axis=-1).astype(np.bool)
# Get logical to exclude models with pRF centre outside stim area
lgcMdlInc = aryStimArea[aryMdlParamsPxl[:, 0].astype(np.int32),
aryMdlParamsPxl[:, 1].astype(np.int32)]
# Exclude models with prf center outside stimulated area
aryMdlParams = aryMdlParams[lgcMdlInc, :]
aryMdlParamsPxl = aryMdlParamsPxl[lgcMdlInc, :]
# Create model time courses
aryPrfTc = model_creation_opt(dicCnfg, aryMdlParamsPxl,
strPathHrf=strPathHrf, varRat=varRat,
lgcPrint=False)
# The model time courses will be preprocessed such that they are
# smoothed (temporally) with same factor as the data and that they will
# be z-scored:
aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp,
lgcPrint=False)
# *********************************************************************
# *** Create logical to restrict model fitting in radial direction
if objNspc.varNumOpt3 is not None:
# Calculate eccentricity of currently tested model parameters
vecMdlEcc = np.sqrt(np.add(np.square(aryMdlParams[:, 0]),
np.square(aryMdlParams[:, 1])))
# Compare model eccentricity against prior eccentricity
vecPrrEccGrd, vecMdlEccGrd = np.meshgrid(vecPrrEcc, vecMdlEcc,
indexing='ij')
# Consider allowed eccentricity shift as specified by user
lgcRstr = np.logical_and(np.less_equal(vecMdlEccGrd,
np.add(vecPrrEccGrd,
objNspc.varNumOpt3)),
np.greater(vecMdlEccGrd,
np.subtract(vecPrrEccGrd,
objNspc.varNumOpt3)
)
)
else:
lgcRstr = np.ones((np.sum(lgcUnqPlrAng),
aryMdlParams.shape[0]), dtype=np.bool)
# *********************************************************************
# *** Check for every voxel there is at least one model being tried
# Is there at least 1 model for each voxel?
lgcMdlPerVxl = np.greater(np.sum(lgcRstr, axis=1), 0)
print('---------Number of voxels fitted: ' + str(np.sum(lgcMdlPerVxl)))
# Those voxels for which no model would be tried, for example because
# the pRF parameters estimated in the prior were outside the stimulated
# area, are escluded from model fitting by setting their logical False
lgcUnqPlrAng[lgcUnqPlrAng] = lgcMdlPerVxl
# We need to update the index table for restricting model fitting
lgcRstr = lgcRstr[lgcMdlPerVxl, :]
# *********************************************************************
# *** Find best model for voxels with this particular polar angle
# Only perform the fitting if there are voxels with models to optimize
if np.any(lgcUnqPlrAng):
# Empty list for results (parameters of best fitting pRF model):
lstPrfRes = [None] * cfg.varPar
# Empty list for processes:
lstPrcs = [None] * cfg.varPar
# Create a queue to put the results in:
queOut = mp.Queue()
# Put logical for model restriction in list
lstRst = np.array_split(lgcRstr, cfg.varPar)
del(lgcRstr)
# Create list with chunks of func data for parallel processes:
lstFunc = np.array_split(aryFunc[lgcUnqPlrAng, :], cfg.varPar)
# CPU version (using numpy or cython for pRF finding):
if ((cfg.strVersion == 'numpy') or (cfg.strVersion == 'cython')):
# Create processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc] = mp.Process(target=find_prf_cpu,
args=(idxPrc,
lstFunc[idxPrc],
aryPrfTc,
aryMdlParams,
cfg.strVersion,
cfg.lgcXval,
cfg.varNumXval,
queOut),
kwargs={'lgcRstr':
lstRst[idxPrc],
'lgcPrint': False},
)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True
# GPU version (using tensorflow for pRF finding):
elif cfg.strVersion == 'gpu':
# Create processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc] = mp.Process(target=find_prf_gpu,
args=(idxPrc,
aryMdlParams,
lstFunc[idxPrc],
aryPrfTc,
queOut),
kwargs={'lgcRstr':
lstRst[idxPrc],
'lgcPrint': False},
)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True
# Start processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc].start()
# Delete reference to list with function data (the data continues
# to exists in child process):
del(lstFunc)
# Collect results from queue:
for idxPrc in range(0, cfg.varPar):
lstPrfRes[idxPrc] = queOut.get(True)
# Join processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc].join()
# *****************************************************************
# *****************************************************************
# *** Prepare pRF finding results for export
# Put output into correct order:
lstPrfRes = sorted(lstPrfRes)
# collect results from parallelization
aryBstTmpXpos = joinRes(lstPrfRes, cfg.varPar, 1, inFormat='1D')
aryBstTmpYpos = joinRes(lstPrfRes, cfg.varPar, 2, inFormat='1D')
aryBstTmpSd = joinRes(lstPrfRes, cfg.varPar, 3, inFormat='1D')
aryBstTmpR2 = joinRes(lstPrfRes, cfg.varPar, 4, inFormat='1D')
aryBstTmpBts = joinRes(lstPrfRes, cfg.varPar, 5, inFormat='2D')
if np.greater(cfg.varNumXval, 1):
aryTmpBstR2Single = joinRes(lstPrfRes, cfg.varPar, 6,
inFormat='2D')
# Delete unneeded large objects:
del(lstPrfRes)
# *****************************************************************
# *****************************************************************
# Put findings for voxels with specific polar angle into ary with
# result for all voxels
aryBstXpos[lgcUnqPlrAng] = aryBstTmpXpos
aryBstYpos[lgcUnqPlrAng] = aryBstTmpYpos
aryBstSd[lgcUnqPlrAng] = aryBstTmpSd
aryBstR2[lgcUnqPlrAng] = aryBstTmpR2
aryBstBts[lgcUnqPlrAng, :] = aryBstTmpBts
if np.greater(cfg.varNumXval, 1):
aryBstR2Single[lgcUnqPlrAng, :] = aryTmpBstR2Single
# *****************************************************************
# *************************************************************************
# Calculate polar angle map:
aryPlrAng = np.arctan2(aryBstYpos, aryBstXpos)
# Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ):
aryEcc = np.sqrt(np.add(np.square(aryBstXpos),
np.square(aryBstYpos)))
# It is possible that after optimization the pRF has moved to location 0, 0
# In this cases, the polar angle parameter is arbitrary and will be
# assigned either 0 or pi. To preserve smoothness of the map, assign the
# initial polar angle value from independent localiser
lgcMvdOrgn = np.logical_and(aryBstXpos == 0.0, aryBstYpos == 0.0)
lgcMvdOrgn = np.logical_and(lgcMvdOrgn, aryBstSd > 0)
aryIntPlrAng = np.arctan2(aryIntGssPrm[:, 1], aryIntGssPrm[:, 0])
aryPlrAng[lgcMvdOrgn] = np.copy(aryIntPlrAng[lgcMvdOrgn])
# *************************************************************************
# *************************************************************************
# Export each map of best parameters as a 3D nii file
print('---------Exporting results')
# Append 'hrf' to cfg.strPathOut, if fitting was done with custom hrf
if strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
# Xoncatenate all the best voxel maps
aryBstMaps = np.stack([aryBstXpos, aryBstYpos, aryBstSd, aryBstR2,
aryPlrAng, aryEcc], axis=1)
# List with name suffices of output images:
lstNiiNames = ['_x_pos_brute',
'_y_pos_brute',
'_SD_brute',
'_R2_brute',
'_polar_angle_brute',
'_eccentricity_brute']
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export map results as seperate 3D nii files
export_nii(aryBstMaps, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='3D')
# *************************************************************************
# *************************************************************************
# Save beta parameter estimates for every feature:
# List with name suffices of output images:
lstNiiNames = ['_Betas_brute']
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
export_nii(aryBstBts, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
# *************************************************************************
# *************************************************************************
# Save R2 maps from crossvalidation (saved for every run) as nii:
if np.greater(cfg.varNumXval, 1):
# truncate extremely negative R2 values
aryBstR2Single[np.where(np.less_equal(aryBstR2Single, -1.0))] = -1.0
# List with name suffices of output images:
lstNiiNames = ['_R2_single_brute']
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in
lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export R2 maps as a single 4D nii file
export_nii(aryBstR2Single, lstNiiNames, aryLgcMsk, aryLgcVar,
tplNiiShp, aryAff, hdrMsk, outFormat='4D')
# *************************************************************************
# *************************************************************************
# *** Report time
varTme02 = time.time()
varTme03 = varTme02 - varTme01
print('---Elapsed time: ' + str(varTme03) + ' s')
print('---Done.') | python | def pyprf_opt_brute(strCsvCnfg, objNspc, lgcTest=False, strPathHrf=None,
varRat=None):
"""
Function for optimizing given pRF paramaters using brute-force grid search.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file.
objNspc : object
Name space from command line arguments.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
"""
# *************************************************************************
# *** Check time
print('---pRF analysis')
varTme01 = time.time()
# *************************************************************************
# *************************************************************************
# *** Preparations
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# Conditional imports:
if cfg.strVersion == 'gpu':
from pyprf_feature.analysis.find_prf_gpu import find_prf_gpu
if ((cfg.strVersion == 'cython') or (cfg.strVersion == 'numpy')):
from pyprf_feature.analysis.find_prf_cpu import find_prf_cpu
# Convert preprocessing parameters (for temporal smoothing)
# from SI units (i.e. [s]) into units of data array (volumes):
cfg.varSdSmthTmp = np.divide(cfg.varSdSmthTmp, cfg.varTr)
# *************************************************************************
# *************************************************************************
# *** Preprocessing
# The functional data will be masked and demeaned:
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100.)
# set the precision of the header to np.float32 so that the prf results
# will be saved in this precision later
hdrMsk.set_data_dtype(np.float32)
print('---Number of voxels included in analysis: ' +
str(np.sum(aryLgcVar)))
# *************************************************************************
# *** Checks
# Make sure that if gpu fitting is used, the number of cross-validations is
# set to 1, not higher
if cfg.strVersion == 'gpu':
strErrMsg = 'Stopping program. ' + \
'Cross-validation on GPU is currently not supported. ' + \
'Set varNumXval equal to 1 in csv file in order to continue. '
assert cfg.varNumXval == 1, strErrMsg
# For the GPU version, we need to set down the parallelisation to 1 now,
# because no separate CPU threads are to be created. We may still use CPU
# parallelisation for preprocessing, which is why the parallelisation
# factor is only reduced now, not earlier.
if cfg.strVersion == 'gpu':
cfg.varPar = 1
# check whether we need to crossvalidate
if np.greater(cfg.varNumXval, 1):
cfg.lgcXval = True
elif np.equal(cfg.varNumXval, 1):
cfg.lgcXval = False
strErrMsg = 'Stopping program. ' + \
'Set numXval (number of crossvalidation folds) to 1 or higher'
assert np.greater_equal(cfg.varNumXval, 1), strErrMsg
# derive number of feature for fitting
if varRat is not None:
# since there will be a beta parameter estimate both for the center and
# the surround, we multiply by 2
varNumFtr = int(2*cfg.switchHrfSet)
else:
varNumFtr = cfg.switchHrfSet
# *************************************************************************
# *************************************************************************
# Load previous pRF fitting results
print('---String to prior results provided by user:')
print(objNspc.strPthPrior)
# Load the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [objNspc.strPthPrior + '_x_pos.nii.gz',
objNspc.strPthPrior + '_y_pos.nii.gz',
objNspc.strPthPrior + '_SD.nii.gz',
objNspc.strPthPrior + '_eccentricity.nii.gz']
lstPrmInt, objHdr, aryAff = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])
# Convert list to array
assert len(lstPrmInt) == 1
aryIntGssPrm = lstPrmInt[0]
del(lstPrmInt)
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their nitial parameters, too
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
# *************************************************************************
# *************************************************************************
# *** Sort voxels by polar angle/previous parameters
# Calculate the polar angles that were found in independent localiser
aryPlrAng = np.arctan2(aryIntGssPrm[:, 1], aryIntGssPrm[:, 0])
# Calculate the unique polar angles that are expected from grid search
aryUnqPlrAng = np.linspace(0.0, 2*np.pi, objNspc.varNumOpt2,
endpoint=False)
# Expected polar angle values are range from 0 to 2*pi, while
# the calculated angle values will range from -pi to pi
# Thus, bring empirical values from range -pi, pi to range 0, 2pi
aryPlrAng = (aryPlrAng + 2 * np.pi) % (2 * np.pi)
# For every empirically found polar angle get the index of the nearest
# theoretically expected polar angle, this is to offset small imprecisions
aryUnqPlrAngInd, aryDstPlrAng = find_near_pol_ang(aryPlrAng, aryUnqPlrAng)
# Make sure that the maximum distance from a found polar angle to a grid
# point is smaller than the distance between two neighbor grid points
assert np.max(aryDstPlrAng) < np.divide(2*np.pi, objNspc.varNumOpt2)
# Update unique polar angles such that it contains only the ones which
# were found in data
aryUnqPlrAng = aryUnqPlrAng[np.unique(aryUnqPlrAngInd)]
# Update indices
aryUnqPlrAngInd, aryDstPlrAng = find_near_pol_ang(aryPlrAng, aryUnqPlrAng)
# Get logical arrays that index voxels with particular polar angle
lstLgcUnqPlrAng = []
for indPlrAng in range(len(aryUnqPlrAng)):
lstLgcUnqPlrAng.append([aryUnqPlrAngInd == indPlrAng][0])
print('---Number of radial position options provided by user: ' +
str(objNspc.varNumOpt1))
print('---Number of angular position options provided by user: ' +
str(objNspc.varNumOpt2))
print('---Number of unique polar angles found in prior estimates: ' +
str(len(aryUnqPlrAng)))
print('---Maximum displacement in radial direction that is allowed: ' +
str(objNspc.varNumOpt3))
print('---Fitted modelled are restricted to stimulated area: ' +
str(objNspc.lgcRstrCentre))
# *************************************************************************
# *** Perform prf fitting
# Create array for collecting winner parameters
aryBstXpos = np.zeros((aryPlrAng.shape[0]))
aryBstYpos = np.zeros((aryPlrAng.shape[0]))
aryBstSd = np.zeros((aryPlrAng.shape[0]))
aryBstR2 = np.zeros((aryPlrAng.shape[0]))
aryBstBts = np.zeros((aryPlrAng.shape[0], varNumFtr))
if np.greater(cfg.varNumXval, 1):
aryBstR2Single = np.zeros((aryPlrAng.shape[0],
len(cfg.lstPathNiiFunc)))
# loop over all found instances of polar angle/previous parameters
for indPlrAng in range(len(aryUnqPlrAng)):
print('------Polar angle number ' + str(indPlrAng+1) + ' out of ' +
str(len(aryUnqPlrAng)))
# get the polar angle for the current voxel batch
varPlrAng = np.array(aryUnqPlrAng[indPlrAng])
# get logical array to index voxels with this particular polar angle
lgcUnqPlrAng = lstLgcUnqPlrAng[indPlrAng]
# get prior eccentricities for current voxel batch
vecPrrEcc = aryIntGssPrm[lgcUnqPlrAng, 3]
print('---------Number of voxels of this polar angle: ' +
str(np.sum(lgcUnqPlrAng)))
# *********************************************************************
# *********************************************************************
# *** Create time course models for this particular polar angle
# Vector with the radial position:
vecRad = np.linspace(0.0, cfg.varExtXmax, objNspc.varNumOpt1,
endpoint=True)
# Get all possible combinations on the grid, using matrix indexing ij
# of output
aryRad, aryTht = np.meshgrid(vecRad, varPlrAng, indexing='ij')
# Flatten arrays to be able to combine them with meshgrid
vecRad = aryRad.flatten()
vecTht = aryTht.flatten()
# Convert from polar to cartesian
vecX, vecY = map_pol_to_crt(vecTht, vecRad)
# Vector with standard deviations pRF models (in degree of vis angle):
vecPrfSd = np.linspace(cfg.varPrfStdMin, cfg.varPrfStdMax,
cfg.varNumPrfSizes, endpoint=True)
# Create model parameters
varNumMdls = len(vecX) * len(vecPrfSd)
aryMdlParams = np.zeros((varNumMdls, 3), dtype=np.float32)
varCntMdlPrms = 0
# Loop through x-positions:
for idxXY in range(0, len(vecX)):
# Loop through standard deviations (of Gaussian pRF models):
for idxSd in range(0, len(vecPrfSd)):
# Place index and parameters in array:
aryMdlParams[varCntMdlPrms, 0] = vecX[idxXY]
aryMdlParams[varCntMdlPrms, 1] = vecY[idxXY]
aryMdlParams[varCntMdlPrms, 2] = vecPrfSd[idxSd]
# Increment parameter index:
varCntMdlPrms += 1
# Convert winner parameters from degrees of visual angle to pixel
vecIntX, vecIntY, vecIntSd = rmp_deg_pixel_xys(aryMdlParams[:, 0],
aryMdlParams[:, 1],
aryMdlParams[:, 2],
cfg.tplVslSpcSze,
cfg.varExtXmin,
cfg.varExtXmax,
cfg.varExtYmin,
cfg.varExtYmax)
aryMdlParamsPxl = np.column_stack((vecIntX, vecIntY, vecIntSd))
if objNspc.lgcRstrCentre:
# Calculate the areas that were stimulated during the experiment
arySptExpInf = np.load(cfg.strSptExpInf)
arySptExpInf = np.rot90(arySptExpInf, k=3)
aryStimArea = np.sum(arySptExpInf, axis=-1).astype(np.bool)
# Get logical to exclude models with pRF centre outside stim area
lgcMdlInc = aryStimArea[aryMdlParamsPxl[:, 0].astype(np.int32),
aryMdlParamsPxl[:, 1].astype(np.int32)]
# Exclude models with prf center outside stimulated area
aryMdlParams = aryMdlParams[lgcMdlInc, :]
aryMdlParamsPxl = aryMdlParamsPxl[lgcMdlInc, :]
# Create model time courses
aryPrfTc = model_creation_opt(dicCnfg, aryMdlParamsPxl,
strPathHrf=strPathHrf, varRat=varRat,
lgcPrint=False)
# The model time courses will be preprocessed such that they are
# smoothed (temporally) with same factor as the data and that they will
# be z-scored:
aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp,
lgcPrint=False)
# *********************************************************************
# *** Create logical to restrict model fitting in radial direction
if objNspc.varNumOpt3 is not None:
# Calculate eccentricity of currently tested model parameters
vecMdlEcc = np.sqrt(np.add(np.square(aryMdlParams[:, 0]),
np.square(aryMdlParams[:, 1])))
# Compare model eccentricity against prior eccentricity
vecPrrEccGrd, vecMdlEccGrd = np.meshgrid(vecPrrEcc, vecMdlEcc,
indexing='ij')
# Consider allowed eccentricity shift as specified by user
lgcRstr = np.logical_and(np.less_equal(vecMdlEccGrd,
np.add(vecPrrEccGrd,
objNspc.varNumOpt3)),
np.greater(vecMdlEccGrd,
np.subtract(vecPrrEccGrd,
objNspc.varNumOpt3)
)
)
else:
lgcRstr = np.ones((np.sum(lgcUnqPlrAng),
aryMdlParams.shape[0]), dtype=np.bool)
# *********************************************************************
# *** Check for every voxel there is at least one model being tried
# Is there at least 1 model for each voxel?
lgcMdlPerVxl = np.greater(np.sum(lgcRstr, axis=1), 0)
print('---------Number of voxels fitted: ' + str(np.sum(lgcMdlPerVxl)))
# Those voxels for which no model would be tried, for example because
# the pRF parameters estimated in the prior were outside the stimulated
# area, are escluded from model fitting by setting their logical False
lgcUnqPlrAng[lgcUnqPlrAng] = lgcMdlPerVxl
# We need to update the index table for restricting model fitting
lgcRstr = lgcRstr[lgcMdlPerVxl, :]
# *********************************************************************
# *** Find best model for voxels with this particular polar angle
# Only perform the fitting if there are voxels with models to optimize
if np.any(lgcUnqPlrAng):
# Empty list for results (parameters of best fitting pRF model):
lstPrfRes = [None] * cfg.varPar
# Empty list for processes:
lstPrcs = [None] * cfg.varPar
# Create a queue to put the results in:
queOut = mp.Queue()
# Put logical for model restriction in list
lstRst = np.array_split(lgcRstr, cfg.varPar)
del(lgcRstr)
# Create list with chunks of func data for parallel processes:
lstFunc = np.array_split(aryFunc[lgcUnqPlrAng, :], cfg.varPar)
# CPU version (using numpy or cython for pRF finding):
if ((cfg.strVersion == 'numpy') or (cfg.strVersion == 'cython')):
# Create processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc] = mp.Process(target=find_prf_cpu,
args=(idxPrc,
lstFunc[idxPrc],
aryPrfTc,
aryMdlParams,
cfg.strVersion,
cfg.lgcXval,
cfg.varNumXval,
queOut),
kwargs={'lgcRstr':
lstRst[idxPrc],
'lgcPrint': False},
)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True
# GPU version (using tensorflow for pRF finding):
elif cfg.strVersion == 'gpu':
# Create processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc] = mp.Process(target=find_prf_gpu,
args=(idxPrc,
aryMdlParams,
lstFunc[idxPrc],
aryPrfTc,
queOut),
kwargs={'lgcRstr':
lstRst[idxPrc],
'lgcPrint': False},
)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True
# Start processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc].start()
# Delete reference to list with function data (the data continues
# to exists in child process):
del(lstFunc)
# Collect results from queue:
for idxPrc in range(0, cfg.varPar):
lstPrfRes[idxPrc] = queOut.get(True)
# Join processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc].join()
# *****************************************************************
# *****************************************************************
# *** Prepare pRF finding results for export
# Put output into correct order:
lstPrfRes = sorted(lstPrfRes)
# collect results from parallelization
aryBstTmpXpos = joinRes(lstPrfRes, cfg.varPar, 1, inFormat='1D')
aryBstTmpYpos = joinRes(lstPrfRes, cfg.varPar, 2, inFormat='1D')
aryBstTmpSd = joinRes(lstPrfRes, cfg.varPar, 3, inFormat='1D')
aryBstTmpR2 = joinRes(lstPrfRes, cfg.varPar, 4, inFormat='1D')
aryBstTmpBts = joinRes(lstPrfRes, cfg.varPar, 5, inFormat='2D')
if np.greater(cfg.varNumXval, 1):
aryTmpBstR2Single = joinRes(lstPrfRes, cfg.varPar, 6,
inFormat='2D')
# Delete unneeded large objects:
del(lstPrfRes)
# *****************************************************************
# *****************************************************************
# Put findings for voxels with specific polar angle into ary with
# result for all voxels
aryBstXpos[lgcUnqPlrAng] = aryBstTmpXpos
aryBstYpos[lgcUnqPlrAng] = aryBstTmpYpos
aryBstSd[lgcUnqPlrAng] = aryBstTmpSd
aryBstR2[lgcUnqPlrAng] = aryBstTmpR2
aryBstBts[lgcUnqPlrAng, :] = aryBstTmpBts
if np.greater(cfg.varNumXval, 1):
aryBstR2Single[lgcUnqPlrAng, :] = aryTmpBstR2Single
# *****************************************************************
# *************************************************************************
# Calculate polar angle map:
aryPlrAng = np.arctan2(aryBstYpos, aryBstXpos)
# Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ):
aryEcc = np.sqrt(np.add(np.square(aryBstXpos),
np.square(aryBstYpos)))
# It is possible that after optimization the pRF has moved to location 0, 0
# In this cases, the polar angle parameter is arbitrary and will be
# assigned either 0 or pi. To preserve smoothness of the map, assign the
# initial polar angle value from independent localiser
lgcMvdOrgn = np.logical_and(aryBstXpos == 0.0, aryBstYpos == 0.0)
lgcMvdOrgn = np.logical_and(lgcMvdOrgn, aryBstSd > 0)
aryIntPlrAng = np.arctan2(aryIntGssPrm[:, 1], aryIntGssPrm[:, 0])
aryPlrAng[lgcMvdOrgn] = np.copy(aryIntPlrAng[lgcMvdOrgn])
# *************************************************************************
# *************************************************************************
# Export each map of best parameters as a 3D nii file
print('---------Exporting results')
# Append 'hrf' to cfg.strPathOut, if fitting was done with custom hrf
if strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
# Xoncatenate all the best voxel maps
aryBstMaps = np.stack([aryBstXpos, aryBstYpos, aryBstSd, aryBstR2,
aryPlrAng, aryEcc], axis=1)
# List with name suffices of output images:
lstNiiNames = ['_x_pos_brute',
'_y_pos_brute',
'_SD_brute',
'_R2_brute',
'_polar_angle_brute',
'_eccentricity_brute']
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export map results as seperate 3D nii files
export_nii(aryBstMaps, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='3D')
# *************************************************************************
# *************************************************************************
# Save beta parameter estimates for every feature:
# List with name suffices of output images:
lstNiiNames = ['_Betas_brute']
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
export_nii(aryBstBts, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
# *************************************************************************
# *************************************************************************
# Save R2 maps from crossvalidation (saved for every run) as nii:
if np.greater(cfg.varNumXval, 1):
# truncate extremely negative R2 values
aryBstR2Single[np.where(np.less_equal(aryBstR2Single, -1.0))] = -1.0
# List with name suffices of output images:
lstNiiNames = ['_R2_single_brute']
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in
lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export R2 maps as a single 4D nii file
export_nii(aryBstR2Single, lstNiiNames, aryLgcMsk, aryLgcVar,
tplNiiShp, aryAff, hdrMsk, outFormat='4D')
# *************************************************************************
# *************************************************************************
# *** Report time
varTme02 = time.time()
varTme03 = varTme02 - varTme01
print('---Elapsed time: ' + str(varTme03) + ' s')
print('---Done.') | [
"def",
"pyprf_opt_brute",
"(",
"strCsvCnfg",
",",
"objNspc",
",",
"lgcTest",
"=",
"False",
",",
"strPathHrf",
"=",
"None",
",",
"varRat",
"=",
"None",
")",
":",
"# *************************************************************************",
"# *** Check time",
"print",
"(",
"'---pRF analysis'",
")",
"varTme01",
"=",
"time",
".",
"time",
"(",
")",
"# *************************************************************************",
"# *************************************************************************",
"# *** Preparations",
"# Load config parameters from csv file into dictionary:",
"dicCnfg",
"=",
"load_config",
"(",
"strCsvCnfg",
",",
"lgcTest",
"=",
"lgcTest",
")",
"# Load config parameters from dictionary into namespace:",
"cfg",
"=",
"cls_set_config",
"(",
"dicCnfg",
")",
"# Conditional imports:",
"if",
"cfg",
".",
"strVersion",
"==",
"'gpu'",
":",
"from",
"pyprf_feature",
".",
"analysis",
".",
"find_prf_gpu",
"import",
"find_prf_gpu",
"if",
"(",
"(",
"cfg",
".",
"strVersion",
"==",
"'cython'",
")",
"or",
"(",
"cfg",
".",
"strVersion",
"==",
"'numpy'",
")",
")",
":",
"from",
"pyprf_feature",
".",
"analysis",
".",
"find_prf_cpu",
"import",
"find_prf_cpu",
"# Convert preprocessing parameters (for temporal smoothing)",
"# from SI units (i.e. [s]) into units of data array (volumes):",
"cfg",
".",
"varSdSmthTmp",
"=",
"np",
".",
"divide",
"(",
"cfg",
".",
"varSdSmthTmp",
",",
"cfg",
".",
"varTr",
")",
"# *************************************************************************",
"# *************************************************************************",
"# *** Preprocessing",
"# The functional data will be masked and demeaned:",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"hdrMsk",
",",
"aryAff",
",",
"aryFunc",
",",
"tplNiiShp",
"=",
"prep_func",
"(",
"cfg",
".",
"strPathNiiMask",
",",
"cfg",
".",
"lstPathNiiFunc",
",",
"varAvgThr",
"=",
"-",
"100.",
")",
"# set the precision of the header to np.float32 so that the prf results",
"# will be saved in this precision later",
"hdrMsk",
".",
"set_data_dtype",
"(",
"np",
".",
"float32",
")",
"print",
"(",
"'---Number of voxels included in analysis: '",
"+",
"str",
"(",
"np",
".",
"sum",
"(",
"aryLgcVar",
")",
")",
")",
"# *************************************************************************",
"# *** Checks",
"# Make sure that if gpu fitting is used, the number of cross-validations is",
"# set to 1, not higher",
"if",
"cfg",
".",
"strVersion",
"==",
"'gpu'",
":",
"strErrMsg",
"=",
"'Stopping program. '",
"+",
"'Cross-validation on GPU is currently not supported. '",
"+",
"'Set varNumXval equal to 1 in csv file in order to continue. '",
"assert",
"cfg",
".",
"varNumXval",
"==",
"1",
",",
"strErrMsg",
"# For the GPU version, we need to set down the parallelisation to 1 now,",
"# because no separate CPU threads are to be created. We may still use CPU",
"# parallelisation for preprocessing, which is why the parallelisation",
"# factor is only reduced now, not earlier.",
"if",
"cfg",
".",
"strVersion",
"==",
"'gpu'",
":",
"cfg",
".",
"varPar",
"=",
"1",
"# check whether we need to crossvalidate",
"if",
"np",
".",
"greater",
"(",
"cfg",
".",
"varNumXval",
",",
"1",
")",
":",
"cfg",
".",
"lgcXval",
"=",
"True",
"elif",
"np",
".",
"equal",
"(",
"cfg",
".",
"varNumXval",
",",
"1",
")",
":",
"cfg",
".",
"lgcXval",
"=",
"False",
"strErrMsg",
"=",
"'Stopping program. '",
"+",
"'Set numXval (number of crossvalidation folds) to 1 or higher'",
"assert",
"np",
".",
"greater_equal",
"(",
"cfg",
".",
"varNumXval",
",",
"1",
")",
",",
"strErrMsg",
"# derive number of feature for fitting",
"if",
"varRat",
"is",
"not",
"None",
":",
"# since there will be a beta parameter estimate both for the center and",
"# the surround, we multiply by 2",
"varNumFtr",
"=",
"int",
"(",
"2",
"*",
"cfg",
".",
"switchHrfSet",
")",
"else",
":",
"varNumFtr",
"=",
"cfg",
".",
"switchHrfSet",
"# *************************************************************************",
"# *************************************************************************",
"# Load previous pRF fitting results",
"print",
"(",
"'---String to prior results provided by user:'",
")",
"print",
"(",
"objNspc",
".",
"strPthPrior",
")",
"# Load the x, y, sigma winner parameters from pyprf_feature",
"lstWnrPrm",
"=",
"[",
"objNspc",
".",
"strPthPrior",
"+",
"'_x_pos.nii.gz'",
",",
"objNspc",
".",
"strPthPrior",
"+",
"'_y_pos.nii.gz'",
",",
"objNspc",
".",
"strPthPrior",
"+",
"'_SD.nii.gz'",
",",
"objNspc",
".",
"strPthPrior",
"+",
"'_eccentricity.nii.gz'",
"]",
"lstPrmInt",
",",
"objHdr",
",",
"aryAff",
"=",
"load_res_prm",
"(",
"lstWnrPrm",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"# Convert list to array",
"assert",
"len",
"(",
"lstPrmInt",
")",
"==",
"1",
"aryIntGssPrm",
"=",
"lstPrmInt",
"[",
"0",
"]",
"del",
"(",
"lstPrmInt",
")",
"# Some voxels were excluded because they did not have sufficient mean",
"# and/or variance - exclude their nitial parameters, too",
"aryIntGssPrm",
"=",
"aryIntGssPrm",
"[",
"aryLgcVar",
",",
":",
"]",
"# *************************************************************************",
"# *************************************************************************",
"# *** Sort voxels by polar angle/previous parameters",
"# Calculate the polar angles that were found in independent localiser",
"aryPlrAng",
"=",
"np",
".",
"arctan2",
"(",
"aryIntGssPrm",
"[",
":",
",",
"1",
"]",
",",
"aryIntGssPrm",
"[",
":",
",",
"0",
"]",
")",
"# Calculate the unique polar angles that are expected from grid search",
"aryUnqPlrAng",
"=",
"np",
".",
"linspace",
"(",
"0.0",
",",
"2",
"*",
"np",
".",
"pi",
",",
"objNspc",
".",
"varNumOpt2",
",",
"endpoint",
"=",
"False",
")",
"# Expected polar angle values are range from 0 to 2*pi, while",
"# the calculated angle values will range from -pi to pi",
"# Thus, bring empirical values from range -pi, pi to range 0, 2pi",
"aryPlrAng",
"=",
"(",
"aryPlrAng",
"+",
"2",
"*",
"np",
".",
"pi",
")",
"%",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"# For every empirically found polar angle get the index of the nearest",
"# theoretically expected polar angle, this is to offset small imprecisions",
"aryUnqPlrAngInd",
",",
"aryDstPlrAng",
"=",
"find_near_pol_ang",
"(",
"aryPlrAng",
",",
"aryUnqPlrAng",
")",
"# Make sure that the maximum distance from a found polar angle to a grid",
"# point is smaller than the distance between two neighbor grid points",
"assert",
"np",
".",
"max",
"(",
"aryDstPlrAng",
")",
"<",
"np",
".",
"divide",
"(",
"2",
"*",
"np",
".",
"pi",
",",
"objNspc",
".",
"varNumOpt2",
")",
"# Update unique polar angles such that it contains only the ones which",
"# were found in data",
"aryUnqPlrAng",
"=",
"aryUnqPlrAng",
"[",
"np",
".",
"unique",
"(",
"aryUnqPlrAngInd",
")",
"]",
"# Update indices",
"aryUnqPlrAngInd",
",",
"aryDstPlrAng",
"=",
"find_near_pol_ang",
"(",
"aryPlrAng",
",",
"aryUnqPlrAng",
")",
"# Get logical arrays that index voxels with particular polar angle",
"lstLgcUnqPlrAng",
"=",
"[",
"]",
"for",
"indPlrAng",
"in",
"range",
"(",
"len",
"(",
"aryUnqPlrAng",
")",
")",
":",
"lstLgcUnqPlrAng",
".",
"append",
"(",
"[",
"aryUnqPlrAngInd",
"==",
"indPlrAng",
"]",
"[",
"0",
"]",
")",
"print",
"(",
"'---Number of radial position options provided by user: '",
"+",
"str",
"(",
"objNspc",
".",
"varNumOpt1",
")",
")",
"print",
"(",
"'---Number of angular position options provided by user: '",
"+",
"str",
"(",
"objNspc",
".",
"varNumOpt2",
")",
")",
"print",
"(",
"'---Number of unique polar angles found in prior estimates: '",
"+",
"str",
"(",
"len",
"(",
"aryUnqPlrAng",
")",
")",
")",
"print",
"(",
"'---Maximum displacement in radial direction that is allowed: '",
"+",
"str",
"(",
"objNspc",
".",
"varNumOpt3",
")",
")",
"print",
"(",
"'---Fitted modelled are restricted to stimulated area: '",
"+",
"str",
"(",
"objNspc",
".",
"lgcRstrCentre",
")",
")",
"# *************************************************************************",
"# *** Perform prf fitting",
"# Create array for collecting winner parameters",
"aryBstXpos",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryPlrAng",
".",
"shape",
"[",
"0",
"]",
")",
")",
"aryBstYpos",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryPlrAng",
".",
"shape",
"[",
"0",
"]",
")",
")",
"aryBstSd",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryPlrAng",
".",
"shape",
"[",
"0",
"]",
")",
")",
"aryBstR2",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryPlrAng",
".",
"shape",
"[",
"0",
"]",
")",
")",
"aryBstBts",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryPlrAng",
".",
"shape",
"[",
"0",
"]",
",",
"varNumFtr",
")",
")",
"if",
"np",
".",
"greater",
"(",
"cfg",
".",
"varNumXval",
",",
"1",
")",
":",
"aryBstR2Single",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryPlrAng",
".",
"shape",
"[",
"0",
"]",
",",
"len",
"(",
"cfg",
".",
"lstPathNiiFunc",
")",
")",
")",
"# loop over all found instances of polar angle/previous parameters",
"for",
"indPlrAng",
"in",
"range",
"(",
"len",
"(",
"aryUnqPlrAng",
")",
")",
":",
"print",
"(",
"'------Polar angle number '",
"+",
"str",
"(",
"indPlrAng",
"+",
"1",
")",
"+",
"' out of '",
"+",
"str",
"(",
"len",
"(",
"aryUnqPlrAng",
")",
")",
")",
"# get the polar angle for the current voxel batch",
"varPlrAng",
"=",
"np",
".",
"array",
"(",
"aryUnqPlrAng",
"[",
"indPlrAng",
"]",
")",
"# get logical array to index voxels with this particular polar angle",
"lgcUnqPlrAng",
"=",
"lstLgcUnqPlrAng",
"[",
"indPlrAng",
"]",
"# get prior eccentricities for current voxel batch",
"vecPrrEcc",
"=",
"aryIntGssPrm",
"[",
"lgcUnqPlrAng",
",",
"3",
"]",
"print",
"(",
"'---------Number of voxels of this polar angle: '",
"+",
"str",
"(",
"np",
".",
"sum",
"(",
"lgcUnqPlrAng",
")",
")",
")",
"# *********************************************************************",
"# *********************************************************************",
"# *** Create time course models for this particular polar angle",
"# Vector with the radial position:",
"vecRad",
"=",
"np",
".",
"linspace",
"(",
"0.0",
",",
"cfg",
".",
"varExtXmax",
",",
"objNspc",
".",
"varNumOpt1",
",",
"endpoint",
"=",
"True",
")",
"# Get all possible combinations on the grid, using matrix indexing ij",
"# of output",
"aryRad",
",",
"aryTht",
"=",
"np",
".",
"meshgrid",
"(",
"vecRad",
",",
"varPlrAng",
",",
"indexing",
"=",
"'ij'",
")",
"# Flatten arrays to be able to combine them with meshgrid",
"vecRad",
"=",
"aryRad",
".",
"flatten",
"(",
")",
"vecTht",
"=",
"aryTht",
".",
"flatten",
"(",
")",
"# Convert from polar to cartesian",
"vecX",
",",
"vecY",
"=",
"map_pol_to_crt",
"(",
"vecTht",
",",
"vecRad",
")",
"# Vector with standard deviations pRF models (in degree of vis angle):",
"vecPrfSd",
"=",
"np",
".",
"linspace",
"(",
"cfg",
".",
"varPrfStdMin",
",",
"cfg",
".",
"varPrfStdMax",
",",
"cfg",
".",
"varNumPrfSizes",
",",
"endpoint",
"=",
"True",
")",
"# Create model parameters",
"varNumMdls",
"=",
"len",
"(",
"vecX",
")",
"*",
"len",
"(",
"vecPrfSd",
")",
"aryMdlParams",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumMdls",
",",
"3",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"varCntMdlPrms",
"=",
"0",
"# Loop through x-positions:",
"for",
"idxXY",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"vecX",
")",
")",
":",
"# Loop through standard deviations (of Gaussian pRF models):",
"for",
"idxSd",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"vecPrfSd",
")",
")",
":",
"# Place index and parameters in array:",
"aryMdlParams",
"[",
"varCntMdlPrms",
",",
"0",
"]",
"=",
"vecX",
"[",
"idxXY",
"]",
"aryMdlParams",
"[",
"varCntMdlPrms",
",",
"1",
"]",
"=",
"vecY",
"[",
"idxXY",
"]",
"aryMdlParams",
"[",
"varCntMdlPrms",
",",
"2",
"]",
"=",
"vecPrfSd",
"[",
"idxSd",
"]",
"# Increment parameter index:",
"varCntMdlPrms",
"+=",
"1",
"# Convert winner parameters from degrees of visual angle to pixel",
"vecIntX",
",",
"vecIntY",
",",
"vecIntSd",
"=",
"rmp_deg_pixel_xys",
"(",
"aryMdlParams",
"[",
":",
",",
"0",
"]",
",",
"aryMdlParams",
"[",
":",
",",
"1",
"]",
",",
"aryMdlParams",
"[",
":",
",",
"2",
"]",
",",
"cfg",
".",
"tplVslSpcSze",
",",
"cfg",
".",
"varExtXmin",
",",
"cfg",
".",
"varExtXmax",
",",
"cfg",
".",
"varExtYmin",
",",
"cfg",
".",
"varExtYmax",
")",
"aryMdlParamsPxl",
"=",
"np",
".",
"column_stack",
"(",
"(",
"vecIntX",
",",
"vecIntY",
",",
"vecIntSd",
")",
")",
"if",
"objNspc",
".",
"lgcRstrCentre",
":",
"# Calculate the areas that were stimulated during the experiment",
"arySptExpInf",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strSptExpInf",
")",
"arySptExpInf",
"=",
"np",
".",
"rot90",
"(",
"arySptExpInf",
",",
"k",
"=",
"3",
")",
"aryStimArea",
"=",
"np",
".",
"sum",
"(",
"arySptExpInf",
",",
"axis",
"=",
"-",
"1",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"# Get logical to exclude models with pRF centre outside stim area",
"lgcMdlInc",
"=",
"aryStimArea",
"[",
"aryMdlParamsPxl",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
",",
"aryMdlParamsPxl",
"[",
":",
",",
"1",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"]",
"# Exclude models with prf center outside stimulated area",
"aryMdlParams",
"=",
"aryMdlParams",
"[",
"lgcMdlInc",
",",
":",
"]",
"aryMdlParamsPxl",
"=",
"aryMdlParamsPxl",
"[",
"lgcMdlInc",
",",
":",
"]",
"# Create model time courses",
"aryPrfTc",
"=",
"model_creation_opt",
"(",
"dicCnfg",
",",
"aryMdlParamsPxl",
",",
"strPathHrf",
"=",
"strPathHrf",
",",
"varRat",
"=",
"varRat",
",",
"lgcPrint",
"=",
"False",
")",
"# The model time courses will be preprocessed such that they are",
"# smoothed (temporally) with same factor as the data and that they will",
"# be z-scored:",
"aryPrfTc",
"=",
"prep_models",
"(",
"aryPrfTc",
",",
"varSdSmthTmp",
"=",
"cfg",
".",
"varSdSmthTmp",
",",
"lgcPrint",
"=",
"False",
")",
"# *********************************************************************",
"# *** Create logical to restrict model fitting in radial direction",
"if",
"objNspc",
".",
"varNumOpt3",
"is",
"not",
"None",
":",
"# Calculate eccentricity of currently tested model parameters",
"vecMdlEcc",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"add",
"(",
"np",
".",
"square",
"(",
"aryMdlParams",
"[",
":",
",",
"0",
"]",
")",
",",
"np",
".",
"square",
"(",
"aryMdlParams",
"[",
":",
",",
"1",
"]",
")",
")",
")",
"# Compare model eccentricity against prior eccentricity",
"vecPrrEccGrd",
",",
"vecMdlEccGrd",
"=",
"np",
".",
"meshgrid",
"(",
"vecPrrEcc",
",",
"vecMdlEcc",
",",
"indexing",
"=",
"'ij'",
")",
"# Consider allowed eccentricity shift as specified by user",
"lgcRstr",
"=",
"np",
".",
"logical_and",
"(",
"np",
".",
"less_equal",
"(",
"vecMdlEccGrd",
",",
"np",
".",
"add",
"(",
"vecPrrEccGrd",
",",
"objNspc",
".",
"varNumOpt3",
")",
")",
",",
"np",
".",
"greater",
"(",
"vecMdlEccGrd",
",",
"np",
".",
"subtract",
"(",
"vecPrrEccGrd",
",",
"objNspc",
".",
"varNumOpt3",
")",
")",
")",
"else",
":",
"lgcRstr",
"=",
"np",
".",
"ones",
"(",
"(",
"np",
".",
"sum",
"(",
"lgcUnqPlrAng",
")",
",",
"aryMdlParams",
".",
"shape",
"[",
"0",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"# *********************************************************************",
"# *** Check for every voxel there is at least one model being tried",
"# Is there at least 1 model for each voxel?",
"lgcMdlPerVxl",
"=",
"np",
".",
"greater",
"(",
"np",
".",
"sum",
"(",
"lgcRstr",
",",
"axis",
"=",
"1",
")",
",",
"0",
")",
"print",
"(",
"'---------Number of voxels fitted: '",
"+",
"str",
"(",
"np",
".",
"sum",
"(",
"lgcMdlPerVxl",
")",
")",
")",
"# Those voxels for which no model would be tried, for example because",
"# the pRF parameters estimated in the prior were outside the stimulated",
"# area, are escluded from model fitting by setting their logical False",
"lgcUnqPlrAng",
"[",
"lgcUnqPlrAng",
"]",
"=",
"lgcMdlPerVxl",
"# We need to update the index table for restricting model fitting",
"lgcRstr",
"=",
"lgcRstr",
"[",
"lgcMdlPerVxl",
",",
":",
"]",
"# *********************************************************************",
"# *** Find best model for voxels with this particular polar angle",
"# Only perform the fitting if there are voxels with models to optimize",
"if",
"np",
".",
"any",
"(",
"lgcUnqPlrAng",
")",
":",
"# Empty list for results (parameters of best fitting pRF model):",
"lstPrfRes",
"=",
"[",
"None",
"]",
"*",
"cfg",
".",
"varPar",
"# Empty list for processes:",
"lstPrcs",
"=",
"[",
"None",
"]",
"*",
"cfg",
".",
"varPar",
"# Create a queue to put the results in:",
"queOut",
"=",
"mp",
".",
"Queue",
"(",
")",
"# Put logical for model restriction in list",
"lstRst",
"=",
"np",
".",
"array_split",
"(",
"lgcRstr",
",",
"cfg",
".",
"varPar",
")",
"del",
"(",
"lgcRstr",
")",
"# Create list with chunks of func data for parallel processes:",
"lstFunc",
"=",
"np",
".",
"array_split",
"(",
"aryFunc",
"[",
"lgcUnqPlrAng",
",",
":",
"]",
",",
"cfg",
".",
"varPar",
")",
"# CPU version (using numpy or cython for pRF finding):",
"if",
"(",
"(",
"cfg",
".",
"strVersion",
"==",
"'numpy'",
")",
"or",
"(",
"cfg",
".",
"strVersion",
"==",
"'cython'",
")",
")",
":",
"# Create processes:",
"for",
"idxPrc",
"in",
"range",
"(",
"0",
",",
"cfg",
".",
"varPar",
")",
":",
"lstPrcs",
"[",
"idxPrc",
"]",
"=",
"mp",
".",
"Process",
"(",
"target",
"=",
"find_prf_cpu",
",",
"args",
"=",
"(",
"idxPrc",
",",
"lstFunc",
"[",
"idxPrc",
"]",
",",
"aryPrfTc",
",",
"aryMdlParams",
",",
"cfg",
".",
"strVersion",
",",
"cfg",
".",
"lgcXval",
",",
"cfg",
".",
"varNumXval",
",",
"queOut",
")",
",",
"kwargs",
"=",
"{",
"'lgcRstr'",
":",
"lstRst",
"[",
"idxPrc",
"]",
",",
"'lgcPrint'",
":",
"False",
"}",
",",
")",
"# Daemon (kills processes when exiting):",
"lstPrcs",
"[",
"idxPrc",
"]",
".",
"Daemon",
"=",
"True",
"# GPU version (using tensorflow for pRF finding):",
"elif",
"cfg",
".",
"strVersion",
"==",
"'gpu'",
":",
"# Create processes:",
"for",
"idxPrc",
"in",
"range",
"(",
"0",
",",
"cfg",
".",
"varPar",
")",
":",
"lstPrcs",
"[",
"idxPrc",
"]",
"=",
"mp",
".",
"Process",
"(",
"target",
"=",
"find_prf_gpu",
",",
"args",
"=",
"(",
"idxPrc",
",",
"aryMdlParams",
",",
"lstFunc",
"[",
"idxPrc",
"]",
",",
"aryPrfTc",
",",
"queOut",
")",
",",
"kwargs",
"=",
"{",
"'lgcRstr'",
":",
"lstRst",
"[",
"idxPrc",
"]",
",",
"'lgcPrint'",
":",
"False",
"}",
",",
")",
"# Daemon (kills processes when exiting):",
"lstPrcs",
"[",
"idxPrc",
"]",
".",
"Daemon",
"=",
"True",
"# Start processes:",
"for",
"idxPrc",
"in",
"range",
"(",
"0",
",",
"cfg",
".",
"varPar",
")",
":",
"lstPrcs",
"[",
"idxPrc",
"]",
".",
"start",
"(",
")",
"# Delete reference to list with function data (the data continues",
"# to exists in child process):",
"del",
"(",
"lstFunc",
")",
"# Collect results from queue:",
"for",
"idxPrc",
"in",
"range",
"(",
"0",
",",
"cfg",
".",
"varPar",
")",
":",
"lstPrfRes",
"[",
"idxPrc",
"]",
"=",
"queOut",
".",
"get",
"(",
"True",
")",
"# Join processes:",
"for",
"idxPrc",
"in",
"range",
"(",
"0",
",",
"cfg",
".",
"varPar",
")",
":",
"lstPrcs",
"[",
"idxPrc",
"]",
".",
"join",
"(",
")",
"# *****************************************************************",
"# *****************************************************************",
"# *** Prepare pRF finding results for export",
"# Put output into correct order:",
"lstPrfRes",
"=",
"sorted",
"(",
"lstPrfRes",
")",
"# collect results from parallelization",
"aryBstTmpXpos",
"=",
"joinRes",
"(",
"lstPrfRes",
",",
"cfg",
".",
"varPar",
",",
"1",
",",
"inFormat",
"=",
"'1D'",
")",
"aryBstTmpYpos",
"=",
"joinRes",
"(",
"lstPrfRes",
",",
"cfg",
".",
"varPar",
",",
"2",
",",
"inFormat",
"=",
"'1D'",
")",
"aryBstTmpSd",
"=",
"joinRes",
"(",
"lstPrfRes",
",",
"cfg",
".",
"varPar",
",",
"3",
",",
"inFormat",
"=",
"'1D'",
")",
"aryBstTmpR2",
"=",
"joinRes",
"(",
"lstPrfRes",
",",
"cfg",
".",
"varPar",
",",
"4",
",",
"inFormat",
"=",
"'1D'",
")",
"aryBstTmpBts",
"=",
"joinRes",
"(",
"lstPrfRes",
",",
"cfg",
".",
"varPar",
",",
"5",
",",
"inFormat",
"=",
"'2D'",
")",
"if",
"np",
".",
"greater",
"(",
"cfg",
".",
"varNumXval",
",",
"1",
")",
":",
"aryTmpBstR2Single",
"=",
"joinRes",
"(",
"lstPrfRes",
",",
"cfg",
".",
"varPar",
",",
"6",
",",
"inFormat",
"=",
"'2D'",
")",
"# Delete unneeded large objects:",
"del",
"(",
"lstPrfRes",
")",
"# *****************************************************************",
"# *****************************************************************",
"# Put findings for voxels with specific polar angle into ary with",
"# result for all voxels",
"aryBstXpos",
"[",
"lgcUnqPlrAng",
"]",
"=",
"aryBstTmpXpos",
"aryBstYpos",
"[",
"lgcUnqPlrAng",
"]",
"=",
"aryBstTmpYpos",
"aryBstSd",
"[",
"lgcUnqPlrAng",
"]",
"=",
"aryBstTmpSd",
"aryBstR2",
"[",
"lgcUnqPlrAng",
"]",
"=",
"aryBstTmpR2",
"aryBstBts",
"[",
"lgcUnqPlrAng",
",",
":",
"]",
"=",
"aryBstTmpBts",
"if",
"np",
".",
"greater",
"(",
"cfg",
".",
"varNumXval",
",",
"1",
")",
":",
"aryBstR2Single",
"[",
"lgcUnqPlrAng",
",",
":",
"]",
"=",
"aryTmpBstR2Single",
"# *****************************************************************",
"# *************************************************************************",
"# Calculate polar angle map:",
"aryPlrAng",
"=",
"np",
".",
"arctan2",
"(",
"aryBstYpos",
",",
"aryBstXpos",
")",
"# Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ):",
"aryEcc",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"add",
"(",
"np",
".",
"square",
"(",
"aryBstXpos",
")",
",",
"np",
".",
"square",
"(",
"aryBstYpos",
")",
")",
")",
"# It is possible that after optimization the pRF has moved to location 0, 0",
"# In this cases, the polar angle parameter is arbitrary and will be",
"# assigned either 0 or pi. To preserve smoothness of the map, assign the",
"# initial polar angle value from independent localiser",
"lgcMvdOrgn",
"=",
"np",
".",
"logical_and",
"(",
"aryBstXpos",
"==",
"0.0",
",",
"aryBstYpos",
"==",
"0.0",
")",
"lgcMvdOrgn",
"=",
"np",
".",
"logical_and",
"(",
"lgcMvdOrgn",
",",
"aryBstSd",
">",
"0",
")",
"aryIntPlrAng",
"=",
"np",
".",
"arctan2",
"(",
"aryIntGssPrm",
"[",
":",
",",
"1",
"]",
",",
"aryIntGssPrm",
"[",
":",
",",
"0",
"]",
")",
"aryPlrAng",
"[",
"lgcMvdOrgn",
"]",
"=",
"np",
".",
"copy",
"(",
"aryIntPlrAng",
"[",
"lgcMvdOrgn",
"]",
")",
"# *************************************************************************",
"# *************************************************************************",
"# Export each map of best parameters as a 3D nii file",
"print",
"(",
"'---------Exporting results'",
")",
"# Append 'hrf' to cfg.strPathOut, if fitting was done with custom hrf",
"if",
"strPathHrf",
"is",
"not",
"None",
":",
"cfg",
".",
"strPathOut",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_hrf'",
"# Xoncatenate all the best voxel maps",
"aryBstMaps",
"=",
"np",
".",
"stack",
"(",
"[",
"aryBstXpos",
",",
"aryBstYpos",
",",
"aryBstSd",
",",
"aryBstR2",
",",
"aryPlrAng",
",",
"aryEcc",
"]",
",",
"axis",
"=",
"1",
")",
"# List with name suffices of output images:",
"lstNiiNames",
"=",
"[",
"'_x_pos_brute'",
",",
"'_y_pos_brute'",
",",
"'_SD_brute'",
",",
"'_R2_brute'",
",",
"'_polar_angle_brute'",
",",
"'_eccentricity_brute'",
"]",
"if",
"varRat",
"is",
"not",
"None",
":",
"lstNiiNames",
"=",
"[",
"strNii",
"+",
"'_'",
"+",
"str",
"(",
"varRat",
")",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# Create full path names from nii file names and output path",
"lstNiiNames",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"strNii",
"+",
"'.nii.gz'",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# export map results as seperate 3D nii files",
"export_nii",
"(",
"aryBstMaps",
",",
"lstNiiNames",
",",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"tplNiiShp",
",",
"aryAff",
",",
"hdrMsk",
",",
"outFormat",
"=",
"'3D'",
")",
"# *************************************************************************",
"# *************************************************************************",
"# Save beta parameter estimates for every feature:",
"# List with name suffices of output images:",
"lstNiiNames",
"=",
"[",
"'_Betas_brute'",
"]",
"if",
"varRat",
"is",
"not",
"None",
":",
"lstNiiNames",
"=",
"[",
"strNii",
"+",
"'_'",
"+",
"str",
"(",
"varRat",
")",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# Create full path names from nii file names and output path",
"lstNiiNames",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"strNii",
"+",
"'.nii.gz'",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# export beta parameter as a single 4D nii file",
"export_nii",
"(",
"aryBstBts",
",",
"lstNiiNames",
",",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"tplNiiShp",
",",
"aryAff",
",",
"hdrMsk",
",",
"outFormat",
"=",
"'4D'",
")",
"# *************************************************************************",
"# *************************************************************************",
"# Save R2 maps from crossvalidation (saved for every run) as nii:",
"if",
"np",
".",
"greater",
"(",
"cfg",
".",
"varNumXval",
",",
"1",
")",
":",
"# truncate extremely negative R2 values",
"aryBstR2Single",
"[",
"np",
".",
"where",
"(",
"np",
".",
"less_equal",
"(",
"aryBstR2Single",
",",
"-",
"1.0",
")",
")",
"]",
"=",
"-",
"1.0",
"# List with name suffices of output images:",
"lstNiiNames",
"=",
"[",
"'_R2_single_brute'",
"]",
"if",
"varRat",
"is",
"not",
"None",
":",
"lstNiiNames",
"=",
"[",
"strNii",
"+",
"'_'",
"+",
"str",
"(",
"varRat",
")",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# Create full path names from nii file names and output path",
"lstNiiNames",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"strNii",
"+",
"'.nii.gz'",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# export R2 maps as a single 4D nii file",
"export_nii",
"(",
"aryBstR2Single",
",",
"lstNiiNames",
",",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"tplNiiShp",
",",
"aryAff",
",",
"hdrMsk",
",",
"outFormat",
"=",
"'4D'",
")",
"# *************************************************************************",
"# *************************************************************************",
"# *** Report time",
"varTme02",
"=",
"time",
".",
"time",
"(",
")",
"varTme03",
"=",
"varTme02",
"-",
"varTme01",
"print",
"(",
"'---Elapsed time: '",
"+",
"str",
"(",
"varTme03",
")",
"+",
"' s'",
")",
"print",
"(",
"'---Done.'",
")"
] | Function for optimizing given pRF paramaters using brute-force grid search.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file.
objNspc : object
Name space from command line arguments.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF | [
"Function",
"for",
"optimizing",
"given",
"pRF",
"paramaters",
"using",
"brute",
"-",
"force",
"grid",
"search",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/pyprf_opt_brute.py#L48-L577 |
osilkin98/PyBRY | generator.py | get_lbry_api_function_docs | def get_lbry_api_function_docs(url=LBRY_API_RAW_JSON_URL):
""" Scrapes the given URL to a page in JSON format to obtain the documentation for the LBRY API
:param str url: URL to the documentation we need to obtain,
pybry.constants.LBRY_API_RAW_JSON_URL by default
:return: List of functions retrieved from the `url` given
:rtype: list
"""
try:
# Grab the page content
docs_page = urlopen(url)
# Read the contents of the actual url we grabbed and decode them into UTF-8
contents = docs_page.read().decode("utf-8")
# Return the contents loaded as JSON
return loads(contents)
# If we get an exception, simply exit
except URLError as UE:
print(UE)
except Exception as E:
print(E)
return [] | python | def get_lbry_api_function_docs(url=LBRY_API_RAW_JSON_URL):
""" Scrapes the given URL to a page in JSON format to obtain the documentation for the LBRY API
:param str url: URL to the documentation we need to obtain,
pybry.constants.LBRY_API_RAW_JSON_URL by default
:return: List of functions retrieved from the `url` given
:rtype: list
"""
try:
# Grab the page content
docs_page = urlopen(url)
# Read the contents of the actual url we grabbed and decode them into UTF-8
contents = docs_page.read().decode("utf-8")
# Return the contents loaded as JSON
return loads(contents)
# If we get an exception, simply exit
except URLError as UE:
print(UE)
except Exception as E:
print(E)
return [] | [
"def",
"get_lbry_api_function_docs",
"(",
"url",
"=",
"LBRY_API_RAW_JSON_URL",
")",
":",
"try",
":",
"# Grab the page content",
"docs_page",
"=",
"urlopen",
"(",
"url",
")",
"# Read the contents of the actual url we grabbed and decode them into UTF-8",
"contents",
"=",
"docs_page",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"# Return the contents loaded as JSON",
"return",
"loads",
"(",
"contents",
")",
"# If we get an exception, simply exit",
"except",
"URLError",
"as",
"UE",
":",
"print",
"(",
"UE",
")",
"except",
"Exception",
"as",
"E",
":",
"print",
"(",
"E",
")",
"return",
"[",
"]"
] | Scrapes the given URL to a page in JSON format to obtain the documentation for the LBRY API
:param str url: URL to the documentation we need to obtain,
pybry.constants.LBRY_API_RAW_JSON_URL by default
:return: List of functions retrieved from the `url` given
:rtype: list | [
"Scrapes",
"the",
"given",
"URL",
"to",
"a",
"page",
"in",
"JSON",
"format",
"to",
"obtain",
"the",
"documentation",
"for",
"the",
"LBRY",
"API"
] | train | https://github.com/osilkin98/PyBRY/blob/af86805a8077916f72f3fe980943d4cd741e61f0/generator.py#L10-L36 |
osilkin98/PyBRY | generator.py | generate_method_definition | def generate_method_definition(func):
""" Generates the body for the given function
:param dict func: dict of a JSON-Formatted function as defined by the API docs
:return: A String containing the definition for the function as it should be written in code
:rtype: str
"""
indent = 4
# initial definition
method_definition = (" " * indent) + "def " + func["name"]
# Here we just create a queue and put all the parameters
# into the queue in the order that they were given,
params_required = [
param for param in func["arguments"] if param["is_required"]
]
params_optional = [
param for param in func["arguments"]
if not param["is_required"]
]
# Open the parameter definitions
method_definition += "(self, "
for param in params_required:
# Put the parameter into the queue
method_definition += param["name"]
method_definition += ", "
for param in params_optional:
method_definition += param["name"]
# Default methods not required
method_definition += "=None, "
# Peel off the final ", " and close off the parameter definition
method_definition = method_definition.rstrip(", ") + "):\n"
indent += 4
# re-indent
method_definition += " " * indent
# Begin with description.
method_definition += '"""' + func["description"]
# re-indent
method_definition += "\n\n" + " " * indent
# Go through each parameter and insert description & type hint
for param in params_required + params_optional:
# Add the type
method_definition += ":param " + DTYPE_MAPPING[param["type"].lower()]
# Add the name
method_definition += " " + param["name"] + ": "
# Add the description
method_definition += param["description"]
# Add optionality & reindent
method_definition += "\n" if param[
"is_required"] else " (Optional)\n"
method_definition += " " * indent
open_index = func["returns"].find('(')
close_index = func["returns"].find(
')', (open_index if open_index > -1 else 0))
func["returns"] = func["returns"].replace("\t", " " * 4)
return_string = func["returns"].replace("\n", "")
if open_index < close_index and func["returns"][
open_index + 1:close_index] in DTYPE_MAPPING:
method_definition += ":rtype: " + DTYPE_MAPPING[
func["returns"][open_index + 1:close_index]]
func["returns"] = func["returns"].replace(
func["returns"][open_index:close_index + 1], "")
method_definition += "\n" + " " * indent
method_definition += ":return: " + return_string
for i in range(0, len(return_string) + 1, 80 - (indent + 2)):
method_definition += return_string[i:i + (
80 - (indent + 2))] + "\n" + " " * indent
# Close it off & reindent
method_definition += '"""' + "\n" + " " * indent
# Create the params map
params_map = "__params_map = {"
# Save the indent
params_indent, num_params = len(
params_map), len(params_required) + len(params_optional)
# Append the map to the method_definition
method_definition += params_map
# Go through the required parameters first
for i, param in enumerate(params_required + params_optional):
# append the methods to the map
method_definition += "'" + param["name"] + "': " + param["name"]
if not param["is_required"]:
method_definition + " if " + param[
"name"] + "is not None else None"
# add commas or ending bracket if needed & reindent correctly
method_definition += ",\n" + " " * indent + ' ' * params_indent if i + 1 < num_params else ""
method_definition += '}\n\n' + ' ' * indent
method_definition += "return self.make_request(SERVER_ADDRESS, '" + func["name"] + "', " \
+ params_map.rstrip(" = {") + ", timeout=self.timeout)\n\n"
return method_definition | python | def generate_method_definition(func):
""" Generates the body for the given function
:param dict func: dict of a JSON-Formatted function as defined by the API docs
:return: A String containing the definition for the function as it should be written in code
:rtype: str
"""
indent = 4
# initial definition
method_definition = (" " * indent) + "def " + func["name"]
# Here we just create a queue and put all the parameters
# into the queue in the order that they were given,
params_required = [
param for param in func["arguments"] if param["is_required"]
]
params_optional = [
param for param in func["arguments"]
if not param["is_required"]
]
# Open the parameter definitions
method_definition += "(self, "
for param in params_required:
# Put the parameter into the queue
method_definition += param["name"]
method_definition += ", "
for param in params_optional:
method_definition += param["name"]
# Default methods not required
method_definition += "=None, "
# Peel off the final ", " and close off the parameter definition
method_definition = method_definition.rstrip(", ") + "):\n"
indent += 4
# re-indent
method_definition += " " * indent
# Begin with description.
method_definition += '"""' + func["description"]
# re-indent
method_definition += "\n\n" + " " * indent
# Go through each parameter and insert description & type hint
for param in params_required + params_optional:
# Add the type
method_definition += ":param " + DTYPE_MAPPING[param["type"].lower()]
# Add the name
method_definition += " " + param["name"] + ": "
# Add the description
method_definition += param["description"]
# Add optionality & reindent
method_definition += "\n" if param[
"is_required"] else " (Optional)\n"
method_definition += " " * indent
open_index = func["returns"].find('(')
close_index = func["returns"].find(
')', (open_index if open_index > -1 else 0))
func["returns"] = func["returns"].replace("\t", " " * 4)
return_string = func["returns"].replace("\n", "")
if open_index < close_index and func["returns"][
open_index + 1:close_index] in DTYPE_MAPPING:
method_definition += ":rtype: " + DTYPE_MAPPING[
func["returns"][open_index + 1:close_index]]
func["returns"] = func["returns"].replace(
func["returns"][open_index:close_index + 1], "")
method_definition += "\n" + " " * indent
method_definition += ":return: " + return_string
for i in range(0, len(return_string) + 1, 80 - (indent + 2)):
method_definition += return_string[i:i + (
80 - (indent + 2))] + "\n" + " " * indent
# Close it off & reindent
method_definition += '"""' + "\n" + " " * indent
# Create the params map
params_map = "__params_map = {"
# Save the indent
params_indent, num_params = len(
params_map), len(params_required) + len(params_optional)
# Append the map to the method_definition
method_definition += params_map
# Go through the required parameters first
for i, param in enumerate(params_required + params_optional):
# append the methods to the map
method_definition += "'" + param["name"] + "': " + param["name"]
if not param["is_required"]:
method_definition + " if " + param[
"name"] + "is not None else None"
# add commas or ending bracket if needed & reindent correctly
method_definition += ",\n" + " " * indent + ' ' * params_indent if i + 1 < num_params else ""
method_definition += '}\n\n' + ' ' * indent
method_definition += "return self.make_request(SERVER_ADDRESS, '" + func["name"] + "', " \
+ params_map.rstrip(" = {") + ", timeout=self.timeout)\n\n"
return method_definition | [
"def",
"generate_method_definition",
"(",
"func",
")",
":",
"indent",
"=",
"4",
"# initial definition",
"method_definition",
"=",
"(",
"\" \"",
"*",
"indent",
")",
"+",
"\"def \"",
"+",
"func",
"[",
"\"name\"",
"]",
"# Here we just create a queue and put all the parameters",
"# into the queue in the order that they were given,",
"params_required",
"=",
"[",
"param",
"for",
"param",
"in",
"func",
"[",
"\"arguments\"",
"]",
"if",
"param",
"[",
"\"is_required\"",
"]",
"]",
"params_optional",
"=",
"[",
"param",
"for",
"param",
"in",
"func",
"[",
"\"arguments\"",
"]",
"if",
"not",
"param",
"[",
"\"is_required\"",
"]",
"]",
"# Open the parameter definitions",
"method_definition",
"+=",
"\"(self, \"",
"for",
"param",
"in",
"params_required",
":",
"# Put the parameter into the queue",
"method_definition",
"+=",
"param",
"[",
"\"name\"",
"]",
"method_definition",
"+=",
"\", \"",
"for",
"param",
"in",
"params_optional",
":",
"method_definition",
"+=",
"param",
"[",
"\"name\"",
"]",
"# Default methods not required",
"method_definition",
"+=",
"\"=None, \"",
"# Peel off the final \", \" and close off the parameter definition",
"method_definition",
"=",
"method_definition",
".",
"rstrip",
"(",
"\", \"",
")",
"+",
"\"):\\n\"",
"indent",
"+=",
"4",
"# re-indent",
"method_definition",
"+=",
"\" \"",
"*",
"indent",
"# Begin with description.",
"method_definition",
"+=",
"'\"\"\"'",
"+",
"func",
"[",
"\"description\"",
"]",
"# re-indent",
"method_definition",
"+=",
"\"\\n\\n\"",
"+",
"\" \"",
"*",
"indent",
"# Go through each parameter and insert description & type hint",
"for",
"param",
"in",
"params_required",
"+",
"params_optional",
":",
"# Add the type",
"method_definition",
"+=",
"\":param \"",
"+",
"DTYPE_MAPPING",
"[",
"param",
"[",
"\"type\"",
"]",
".",
"lower",
"(",
")",
"]",
"# Add the name",
"method_definition",
"+=",
"\" \"",
"+",
"param",
"[",
"\"name\"",
"]",
"+",
"\": \"",
"# Add the description",
"method_definition",
"+=",
"param",
"[",
"\"description\"",
"]",
"# Add optionality & reindent",
"method_definition",
"+=",
"\"\\n\"",
"if",
"param",
"[",
"\"is_required\"",
"]",
"else",
"\" (Optional)\\n\"",
"method_definition",
"+=",
"\" \"",
"*",
"indent",
"open_index",
"=",
"func",
"[",
"\"returns\"",
"]",
".",
"find",
"(",
"'('",
")",
"close_index",
"=",
"func",
"[",
"\"returns\"",
"]",
".",
"find",
"(",
"')'",
",",
"(",
"open_index",
"if",
"open_index",
">",
"-",
"1",
"else",
"0",
")",
")",
"func",
"[",
"\"returns\"",
"]",
"=",
"func",
"[",
"\"returns\"",
"]",
".",
"replace",
"(",
"\"\\t\"",
",",
"\" \"",
"*",
"4",
")",
"return_string",
"=",
"func",
"[",
"\"returns\"",
"]",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
"if",
"open_index",
"<",
"close_index",
"and",
"func",
"[",
"\"returns\"",
"]",
"[",
"open_index",
"+",
"1",
":",
"close_index",
"]",
"in",
"DTYPE_MAPPING",
":",
"method_definition",
"+=",
"\":rtype: \"",
"+",
"DTYPE_MAPPING",
"[",
"func",
"[",
"\"returns\"",
"]",
"[",
"open_index",
"+",
"1",
":",
"close_index",
"]",
"]",
"func",
"[",
"\"returns\"",
"]",
"=",
"func",
"[",
"\"returns\"",
"]",
".",
"replace",
"(",
"func",
"[",
"\"returns\"",
"]",
"[",
"open_index",
":",
"close_index",
"+",
"1",
"]",
",",
"\"\"",
")",
"method_definition",
"+=",
"\"\\n\"",
"+",
"\" \"",
"*",
"indent",
"method_definition",
"+=",
"\":return: \"",
"+",
"return_string",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"return_string",
")",
"+",
"1",
",",
"80",
"-",
"(",
"indent",
"+",
"2",
")",
")",
":",
"method_definition",
"+=",
"return_string",
"[",
"i",
":",
"i",
"+",
"(",
"80",
"-",
"(",
"indent",
"+",
"2",
")",
")",
"]",
"+",
"\"\\n\"",
"+",
"\" \"",
"*",
"indent",
"# Close it off & reindent",
"method_definition",
"+=",
"'\"\"\"'",
"+",
"\"\\n\"",
"+",
"\" \"",
"*",
"indent",
"# Create the params map",
"params_map",
"=",
"\"__params_map = {\"",
"# Save the indent",
"params_indent",
",",
"num_params",
"=",
"len",
"(",
"params_map",
")",
",",
"len",
"(",
"params_required",
")",
"+",
"len",
"(",
"params_optional",
")",
"# Append the map to the method_definition",
"method_definition",
"+=",
"params_map",
"# Go through the required parameters first",
"for",
"i",
",",
"param",
"in",
"enumerate",
"(",
"params_required",
"+",
"params_optional",
")",
":",
"# append the methods to the map",
"method_definition",
"+=",
"\"'\"",
"+",
"param",
"[",
"\"name\"",
"]",
"+",
"\"': \"",
"+",
"param",
"[",
"\"name\"",
"]",
"if",
"not",
"param",
"[",
"\"is_required\"",
"]",
":",
"method_definition",
"+",
"\" if \"",
"+",
"param",
"[",
"\"name\"",
"]",
"+",
"\"is not None else None\"",
"# add commas or ending bracket if needed & reindent correctly",
"method_definition",
"+=",
"\",\\n\"",
"+",
"\" \"",
"*",
"indent",
"+",
"' '",
"*",
"params_indent",
"if",
"i",
"+",
"1",
"<",
"num_params",
"else",
"\"\"",
"method_definition",
"+=",
"'}\\n\\n'",
"+",
"' '",
"*",
"indent",
"method_definition",
"+=",
"\"return self.make_request(SERVER_ADDRESS, '\"",
"+",
"func",
"[",
"\"name\"",
"]",
"+",
"\"', \"",
"+",
"params_map",
".",
"rstrip",
"(",
"\" = {\"",
")",
"+",
"\", timeout=self.timeout)\\n\\n\"",
"return",
"method_definition"
] | Generates the body for the given function
:param dict func: dict of a JSON-Formatted function as defined by the API docs
:return: A String containing the definition for the function as it should be written in code
:rtype: str | [
"Generates",
"the",
"body",
"for",
"the",
"given",
"function"
] | train | https://github.com/osilkin98/PyBRY/blob/af86805a8077916f72f3fe980943d4cd741e61f0/generator.py#L39-L162 |
osilkin98/PyBRY | generator.py | generate_lbryd_wrapper | def generate_lbryd_wrapper(url=LBRY_API_RAW_JSON_URL, read_file=__LBRYD_BASE_FPATH__, write_file=LBRYD_FPATH):
""" Generates the actual functions for lbryd_api.py based on lbry's documentation
:param str url: URL to the documentation we need to obtain,
pybry.constants.LBRY_API_RAW_JSON_URL by default
:param str read_file: This is the path to the file from which we will be reading
:param str write_file: Path from project root to the file we'll be writing to.
"""
functions = get_lbry_api_function_docs(url)
# Open the actual file for appending
with open(write_file, 'w') as lbry_file:
lbry_file.write("# This file was generated at build time using the generator function\n")
lbry_file.write("# You may edit but do so with caution\n")
with open(read_file, 'r') as template:
header = template.read()
lbry_file.write(header)
# Iterate through all the functions we retrieved
for func in functions:
method_definition = generate_method_definition(func)
# Write to file
lbry_file.write(method_definition)
try:
from yapf.yapflib.yapf_api import FormatFile
# Now we should format the file using the yapf formatter
FormatFile(write_file, in_place=True)
except ImportError as IE:
print("[Warning]: yapf is not installed, so the generated code will not follow an easy-to-read standard")
print(IE) | python | def generate_lbryd_wrapper(url=LBRY_API_RAW_JSON_URL, read_file=__LBRYD_BASE_FPATH__, write_file=LBRYD_FPATH):
""" Generates the actual functions for lbryd_api.py based on lbry's documentation
:param str url: URL to the documentation we need to obtain,
pybry.constants.LBRY_API_RAW_JSON_URL by default
:param str read_file: This is the path to the file from which we will be reading
:param str write_file: Path from project root to the file we'll be writing to.
"""
functions = get_lbry_api_function_docs(url)
# Open the actual file for appending
with open(write_file, 'w') as lbry_file:
lbry_file.write("# This file was generated at build time using the generator function\n")
lbry_file.write("# You may edit but do so with caution\n")
with open(read_file, 'r') as template:
header = template.read()
lbry_file.write(header)
# Iterate through all the functions we retrieved
for func in functions:
method_definition = generate_method_definition(func)
# Write to file
lbry_file.write(method_definition)
try:
from yapf.yapflib.yapf_api import FormatFile
# Now we should format the file using the yapf formatter
FormatFile(write_file, in_place=True)
except ImportError as IE:
print("[Warning]: yapf is not installed, so the generated code will not follow an easy-to-read standard")
print(IE) | [
"def",
"generate_lbryd_wrapper",
"(",
"url",
"=",
"LBRY_API_RAW_JSON_URL",
",",
"read_file",
"=",
"__LBRYD_BASE_FPATH__",
",",
"write_file",
"=",
"LBRYD_FPATH",
")",
":",
"functions",
"=",
"get_lbry_api_function_docs",
"(",
"url",
")",
"# Open the actual file for appending",
"with",
"open",
"(",
"write_file",
",",
"'w'",
")",
"as",
"lbry_file",
":",
"lbry_file",
".",
"write",
"(",
"\"# This file was generated at build time using the generator function\\n\"",
")",
"lbry_file",
".",
"write",
"(",
"\"# You may edit but do so with caution\\n\"",
")",
"with",
"open",
"(",
"read_file",
",",
"'r'",
")",
"as",
"template",
":",
"header",
"=",
"template",
".",
"read",
"(",
")",
"lbry_file",
".",
"write",
"(",
"header",
")",
"# Iterate through all the functions we retrieved",
"for",
"func",
"in",
"functions",
":",
"method_definition",
"=",
"generate_method_definition",
"(",
"func",
")",
"# Write to file",
"lbry_file",
".",
"write",
"(",
"method_definition",
")",
"try",
":",
"from",
"yapf",
".",
"yapflib",
".",
"yapf_api",
"import",
"FormatFile",
"# Now we should format the file using the yapf formatter",
"FormatFile",
"(",
"write_file",
",",
"in_place",
"=",
"True",
")",
"except",
"ImportError",
"as",
"IE",
":",
"print",
"(",
"\"[Warning]: yapf is not installed, so the generated code will not follow an easy-to-read standard\"",
")",
"print",
"(",
"IE",
")"
] | Generates the actual functions for lbryd_api.py based on lbry's documentation
:param str url: URL to the documentation we need to obtain,
pybry.constants.LBRY_API_RAW_JSON_URL by default
:param str read_file: This is the path to the file from which we will be reading
:param str write_file: Path from project root to the file we'll be writing to. | [
"Generates",
"the",
"actual",
"functions",
"for",
"lbryd_api",
".",
"py",
"based",
"on",
"lbry",
"s",
"documentation"
] | train | https://github.com/osilkin98/PyBRY/blob/af86805a8077916f72f3fe980943d4cd741e61f0/generator.py#L167-L205 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_general.py | load_nii | def load_nii(strPathIn, varSzeThr=5000.0):
"""
Load nii file.
Parameters
----------
strPathIn : str
Path to nii file to load.
varSzeThr : float
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
Returns
-------
aryNii : np.array
Array containing nii data. 32 bit floating point precision.
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
Notes
-----
If the nii file is larger than the specified threshold (`varSzeThr`), the
file is loaded volume-by-volume in order to prevent memory overflow. The
reason for this is that nibabel imports data at float64 precision, which
can lead to a memory overflow even for relatively small files.
"""
# Load nii file (this does not load the data into memory yet):
objNii = nb.load(strPathIn)
# Get size of nii file:
varNiiSze = os.path.getsize(strPathIn)
# Convert to MB:
varNiiSze = np.divide(float(varNiiSze), 1000000.0)
# Load volume-by-volume or all at once, depending on file size:
if np.greater(varNiiSze, float(varSzeThr)):
# Load large nii file
print(('---------Large file size ('
+ str(np.around(varNiiSze))
+ ' MB), reading volume-by-volume'))
# Get image dimensions:
tplSze = objNii.shape
# Create empty array for nii data:
aryNii = np.zeros(tplSze, dtype=np.float32)
# Loop through volumes:
for idxVol in range(tplSze[3]):
aryNii[..., idxVol] = np.asarray(
objNii.dataobj[..., idxVol]).astype(np.float32)
else:
# Load small nii file
# Load nii file (this doesn't load the data into memory yet):
objNii = nb.load(strPathIn)
# Load data into array:
aryNii = np.asarray(objNii.dataobj).astype(np.float32)
# Get headers:
objHdr = objNii.header
# Get 'affine':
aryAff = objNii.affine
# Output nii data (as numpy array), header, and 'affine':
return aryNii, objHdr, aryAff | python | def load_nii(strPathIn, varSzeThr=5000.0):
"""
Load nii file.
Parameters
----------
strPathIn : str
Path to nii file to load.
varSzeThr : float
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
Returns
-------
aryNii : np.array
Array containing nii data. 32 bit floating point precision.
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
Notes
-----
If the nii file is larger than the specified threshold (`varSzeThr`), the
file is loaded volume-by-volume in order to prevent memory overflow. The
reason for this is that nibabel imports data at float64 precision, which
can lead to a memory overflow even for relatively small files.
"""
# Load nii file (this does not load the data into memory yet):
objNii = nb.load(strPathIn)
# Get size of nii file:
varNiiSze = os.path.getsize(strPathIn)
# Convert to MB:
varNiiSze = np.divide(float(varNiiSze), 1000000.0)
# Load volume-by-volume or all at once, depending on file size:
if np.greater(varNiiSze, float(varSzeThr)):
# Load large nii file
print(('---------Large file size ('
+ str(np.around(varNiiSze))
+ ' MB), reading volume-by-volume'))
# Get image dimensions:
tplSze = objNii.shape
# Create empty array for nii data:
aryNii = np.zeros(tplSze, dtype=np.float32)
# Loop through volumes:
for idxVol in range(tplSze[3]):
aryNii[..., idxVol] = np.asarray(
objNii.dataobj[..., idxVol]).astype(np.float32)
else:
# Load small nii file
# Load nii file (this doesn't load the data into memory yet):
objNii = nb.load(strPathIn)
# Load data into array:
aryNii = np.asarray(objNii.dataobj).astype(np.float32)
# Get headers:
objHdr = objNii.header
# Get 'affine':
aryAff = objNii.affine
# Output nii data (as numpy array), header, and 'affine':
return aryNii, objHdr, aryAff | [
"def",
"load_nii",
"(",
"strPathIn",
",",
"varSzeThr",
"=",
"5000.0",
")",
":",
"# Load nii file (this does not load the data into memory yet):",
"objNii",
"=",
"nb",
".",
"load",
"(",
"strPathIn",
")",
"# Get size of nii file:",
"varNiiSze",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"strPathIn",
")",
"# Convert to MB:",
"varNiiSze",
"=",
"np",
".",
"divide",
"(",
"float",
"(",
"varNiiSze",
")",
",",
"1000000.0",
")",
"# Load volume-by-volume or all at once, depending on file size:",
"if",
"np",
".",
"greater",
"(",
"varNiiSze",
",",
"float",
"(",
"varSzeThr",
")",
")",
":",
"# Load large nii file",
"print",
"(",
"(",
"'---------Large file size ('",
"+",
"str",
"(",
"np",
".",
"around",
"(",
"varNiiSze",
")",
")",
"+",
"' MB), reading volume-by-volume'",
")",
")",
"# Get image dimensions:",
"tplSze",
"=",
"objNii",
".",
"shape",
"# Create empty array for nii data:",
"aryNii",
"=",
"np",
".",
"zeros",
"(",
"tplSze",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# Loop through volumes:",
"for",
"idxVol",
"in",
"range",
"(",
"tplSze",
"[",
"3",
"]",
")",
":",
"aryNii",
"[",
"...",
",",
"idxVol",
"]",
"=",
"np",
".",
"asarray",
"(",
"objNii",
".",
"dataobj",
"[",
"...",
",",
"idxVol",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"else",
":",
"# Load small nii file",
"# Load nii file (this doesn't load the data into memory yet):",
"objNii",
"=",
"nb",
".",
"load",
"(",
"strPathIn",
")",
"# Load data into array:",
"aryNii",
"=",
"np",
".",
"asarray",
"(",
"objNii",
".",
"dataobj",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"# Get headers:",
"objHdr",
"=",
"objNii",
".",
"header",
"# Get 'affine':",
"aryAff",
"=",
"objNii",
".",
"affine",
"# Output nii data (as numpy array), header, and 'affine':",
"return",
"aryNii",
",",
"objHdr",
",",
"aryAff"
] | Load nii file.
Parameters
----------
strPathIn : str
Path to nii file to load.
varSzeThr : float
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
Returns
-------
aryNii : np.array
Array containing nii data. 32 bit floating point precision.
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
Notes
-----
If the nii file is larger than the specified threshold (`varSzeThr`), the
file is loaded volume-by-volume in order to prevent memory overflow. The
reason for this is that nibabel imports data at float64 precision, which
can lead to a memory overflow even for relatively small files. | [
"Load",
"nii",
"file",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_general.py#L26-L102 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_general.py | load_res_prm | def load_res_prm(lstFunc, lstFlsMsk=None):
"""Load result parameters from multiple nii files, with optional mask.
Parameters
----------
lstFunc : list,
list of str with file names of 3D or 4D nii files
lstFlsMsk : list, optional
list of str with paths to 3D nii files that can act as mask/s
Returns
-------
lstPrmAry : list
The list will contain as many numpy arrays as masks were provided.
Each array is 2D with shape [nr voxel in mask, nr nii files in lstFunc]
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
"""
# load parameter/functional maps into a list
lstPrm = []
for ind, path in enumerate(lstFunc):
aryFnc = load_nii(path)[0].astype(np.float32)
if aryFnc.ndim == 3:
lstPrm.append(aryFnc)
# handle cases where nii array is 4D, in this case split arrays up in
# 3D arrays and appenbd those
elif aryFnc.ndim == 4:
for indAx in range(aryFnc.shape[-1]):
lstPrm.append(aryFnc[..., indAx])
# load mask/s if available
if lstFlsMsk is not None:
lstMsk = [None] * len(lstFlsMsk)
for ind, path in enumerate(lstFlsMsk):
aryMsk = load_nii(path)[0].astype(np.bool)
lstMsk[ind] = aryMsk
else:
print('------------No masks were provided')
if lstFlsMsk is None:
# if no mask was provided we just flatten all parameter array in list
# and return resulting list
lstPrmAry = [ary.flatten() for ary in lstPrm]
else:
# if masks are available, we loop over masks and then over parameter
# maps to extract selected voxels and parameters
lstPrmAry = [None] * len(lstFlsMsk)
for indLst, aryMsk in enumerate(lstMsk):
# prepare array that will hold parameter values of selected voxels
aryPrmSel = np.empty((np.sum(aryMsk), len(lstPrm)),
dtype=np.float32)
# loop over different parameter maps
for indAry, aryPrm in enumerate(lstPrm):
# get voxels specific to this mask
aryPrmSel[:, indAry] = aryPrm[aryMsk, ...]
# put array away in list, if only one parameter map was provided
# the output will be squeezed
lstPrmAry[indLst] = aryPrmSel
# also get header object and affine array
# we simply take it for the first functional nii file, cause that is the
# only file that has to be provided by necessity
objHdr, aryAff = load_nii(lstFunc[0])[1:]
return lstPrmAry, objHdr, aryAff | python | def load_res_prm(lstFunc, lstFlsMsk=None):
"""Load result parameters from multiple nii files, with optional mask.
Parameters
----------
lstFunc : list,
list of str with file names of 3D or 4D nii files
lstFlsMsk : list, optional
list of str with paths to 3D nii files that can act as mask/s
Returns
-------
lstPrmAry : list
The list will contain as many numpy arrays as masks were provided.
Each array is 2D with shape [nr voxel in mask, nr nii files in lstFunc]
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
"""
# load parameter/functional maps into a list
lstPrm = []
for ind, path in enumerate(lstFunc):
aryFnc = load_nii(path)[0].astype(np.float32)
if aryFnc.ndim == 3:
lstPrm.append(aryFnc)
# handle cases where nii array is 4D, in this case split arrays up in
# 3D arrays and appenbd those
elif aryFnc.ndim == 4:
for indAx in range(aryFnc.shape[-1]):
lstPrm.append(aryFnc[..., indAx])
# load mask/s if available
if lstFlsMsk is not None:
lstMsk = [None] * len(lstFlsMsk)
for ind, path in enumerate(lstFlsMsk):
aryMsk = load_nii(path)[0].astype(np.bool)
lstMsk[ind] = aryMsk
else:
print('------------No masks were provided')
if lstFlsMsk is None:
# if no mask was provided we just flatten all parameter array in list
# and return resulting list
lstPrmAry = [ary.flatten() for ary in lstPrm]
else:
# if masks are available, we loop over masks and then over parameter
# maps to extract selected voxels and parameters
lstPrmAry = [None] * len(lstFlsMsk)
for indLst, aryMsk in enumerate(lstMsk):
# prepare array that will hold parameter values of selected voxels
aryPrmSel = np.empty((np.sum(aryMsk), len(lstPrm)),
dtype=np.float32)
# loop over different parameter maps
for indAry, aryPrm in enumerate(lstPrm):
# get voxels specific to this mask
aryPrmSel[:, indAry] = aryPrm[aryMsk, ...]
# put array away in list, if only one parameter map was provided
# the output will be squeezed
lstPrmAry[indLst] = aryPrmSel
# also get header object and affine array
# we simply take it for the first functional nii file, cause that is the
# only file that has to be provided by necessity
objHdr, aryAff = load_nii(lstFunc[0])[1:]
return lstPrmAry, objHdr, aryAff | [
"def",
"load_res_prm",
"(",
"lstFunc",
",",
"lstFlsMsk",
"=",
"None",
")",
":",
"# load parameter/functional maps into a list",
"lstPrm",
"=",
"[",
"]",
"for",
"ind",
",",
"path",
"in",
"enumerate",
"(",
"lstFunc",
")",
":",
"aryFnc",
"=",
"load_nii",
"(",
"path",
")",
"[",
"0",
"]",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"if",
"aryFnc",
".",
"ndim",
"==",
"3",
":",
"lstPrm",
".",
"append",
"(",
"aryFnc",
")",
"# handle cases where nii array is 4D, in this case split arrays up in",
"# 3D arrays and appenbd those",
"elif",
"aryFnc",
".",
"ndim",
"==",
"4",
":",
"for",
"indAx",
"in",
"range",
"(",
"aryFnc",
".",
"shape",
"[",
"-",
"1",
"]",
")",
":",
"lstPrm",
".",
"append",
"(",
"aryFnc",
"[",
"...",
",",
"indAx",
"]",
")",
"# load mask/s if available",
"if",
"lstFlsMsk",
"is",
"not",
"None",
":",
"lstMsk",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"lstFlsMsk",
")",
"for",
"ind",
",",
"path",
"in",
"enumerate",
"(",
"lstFlsMsk",
")",
":",
"aryMsk",
"=",
"load_nii",
"(",
"path",
")",
"[",
"0",
"]",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"lstMsk",
"[",
"ind",
"]",
"=",
"aryMsk",
"else",
":",
"print",
"(",
"'------------No masks were provided'",
")",
"if",
"lstFlsMsk",
"is",
"None",
":",
"# if no mask was provided we just flatten all parameter array in list",
"# and return resulting list",
"lstPrmAry",
"=",
"[",
"ary",
".",
"flatten",
"(",
")",
"for",
"ary",
"in",
"lstPrm",
"]",
"else",
":",
"# if masks are available, we loop over masks and then over parameter",
"# maps to extract selected voxels and parameters",
"lstPrmAry",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"lstFlsMsk",
")",
"for",
"indLst",
",",
"aryMsk",
"in",
"enumerate",
"(",
"lstMsk",
")",
":",
"# prepare array that will hold parameter values of selected voxels",
"aryPrmSel",
"=",
"np",
".",
"empty",
"(",
"(",
"np",
".",
"sum",
"(",
"aryMsk",
")",
",",
"len",
"(",
"lstPrm",
")",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# loop over different parameter maps",
"for",
"indAry",
",",
"aryPrm",
"in",
"enumerate",
"(",
"lstPrm",
")",
":",
"# get voxels specific to this mask",
"aryPrmSel",
"[",
":",
",",
"indAry",
"]",
"=",
"aryPrm",
"[",
"aryMsk",
",",
"...",
"]",
"# put array away in list, if only one parameter map was provided",
"# the output will be squeezed",
"lstPrmAry",
"[",
"indLst",
"]",
"=",
"aryPrmSel",
"# also get header object and affine array",
"# we simply take it for the first functional nii file, cause that is the",
"# only file that has to be provided by necessity",
"objHdr",
",",
"aryAff",
"=",
"load_nii",
"(",
"lstFunc",
"[",
"0",
"]",
")",
"[",
"1",
":",
"]",
"return",
"lstPrmAry",
",",
"objHdr",
",",
"aryAff"
] | Load result parameters from multiple nii files, with optional mask.
Parameters
----------
lstFunc : list,
list of str with file names of 3D or 4D nii files
lstFlsMsk : list, optional
list of str with paths to 3D nii files that can act as mask/s
Returns
-------
lstPrmAry : list
The list will contain as many numpy arrays as masks were provided.
Each array is 2D with shape [nr voxel in mask, nr nii files in lstFunc]
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data. | [
"Load",
"result",
"parameters",
"from",
"multiple",
"nii",
"files",
"with",
"optional",
"mask",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_general.py#L105-L173 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_general.py | export_nii | def export_nii(ary2dNii, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff,
hdrMsk, outFormat='3D'):
"""
Export nii file(s).
Parameters
----------
ary2dNii : numpy array
Numpy array with results to be exported to nii.
lstNiiNames : list
List that contains strings with the complete file names.
aryLgcMsk : numpy array
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
aryLgcVar : np.array
1D numpy array containing logical values. One value per voxel after
mask has been applied. If `True`, the variance and mean of the voxel's
time course are greater than the provided thresholds in all runs and
the voxel is included in the output array (`aryFunc`). If `False`, the
variance or mean of the voxel's time course is lower than threshold in
at least one run and the voxel has been excluded from the output
(`aryFunc`). This is to avoid problems in the subsequent model fitting.
This array is necessary to put results into original dimensions after
model fitting.
tplNiiShp : tuple
Tuple that describes the 3D shape of the output volume
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
hdrMsk : nibabel-header-object
Nii header of mask.
outFormat : string, either '3D' or '4D'
String specifying whether images will be saved as seperate 3D nii
files or one 4D nii file
Notes
-----
[1] This function does not return any arrays but instead saves to disk.
[2] Depending on whether outFormat is '3D' or '4D' images will be saved as
seperate 3D nii files or one 4D nii file.
"""
# Number of voxels that were included in the mask:
varNumVoxMsk = np.sum(aryLgcMsk)
# Number of maps in ary2dNii
varNumMaps = ary2dNii.shape[-1]
# Place voxels based on low-variance exlusion:
aryPrfRes01 = np.zeros((varNumVoxMsk, varNumMaps), dtype=np.float32)
for indMap in range(varNumMaps):
aryPrfRes01[aryLgcVar, indMap] = ary2dNii[:, indMap]
# Total number of voxels:
varNumVoxTlt = (tplNiiShp[0] * tplNiiShp[1] * tplNiiShp[2])
# Place voxels based on mask-exclusion:
aryPrfRes02 = np.zeros((varNumVoxTlt, aryPrfRes01.shape[-1]),
dtype=np.float32)
for indDim in range(aryPrfRes01.shape[-1]):
aryPrfRes02[aryLgcMsk, indDim] = aryPrfRes01[:, indDim]
# Reshape pRF finding results into original image dimensions:
aryPrfRes = np.reshape(aryPrfRes02,
[tplNiiShp[0],
tplNiiShp[1],
tplNiiShp[2],
aryPrfRes01.shape[-1]])
if outFormat == '3D':
# Save nii results:
for idxOut in range(0, aryPrfRes.shape[-1]):
# Create nii object for results:
niiOut = nb.Nifti1Image(aryPrfRes[..., idxOut],
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = lstNiiNames[idxOut]
nb.save(niiOut, strTmp)
elif outFormat == '4D':
# adjust header
hdrMsk.set_data_shape(aryPrfRes.shape)
# Create nii object for results:
niiOut = nb.Nifti1Image(aryPrfRes,
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = lstNiiNames[0]
nb.save(niiOut, strTmp) | python | def export_nii(ary2dNii, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff,
hdrMsk, outFormat='3D'):
"""
Export nii file(s).
Parameters
----------
ary2dNii : numpy array
Numpy array with results to be exported to nii.
lstNiiNames : list
List that contains strings with the complete file names.
aryLgcMsk : numpy array
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
aryLgcVar : np.array
1D numpy array containing logical values. One value per voxel after
mask has been applied. If `True`, the variance and mean of the voxel's
time course are greater than the provided thresholds in all runs and
the voxel is included in the output array (`aryFunc`). If `False`, the
variance or mean of the voxel's time course is lower than threshold in
at least one run and the voxel has been excluded from the output
(`aryFunc`). This is to avoid problems in the subsequent model fitting.
This array is necessary to put results into original dimensions after
model fitting.
tplNiiShp : tuple
Tuple that describes the 3D shape of the output volume
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
hdrMsk : nibabel-header-object
Nii header of mask.
outFormat : string, either '3D' or '4D'
String specifying whether images will be saved as seperate 3D nii
files or one 4D nii file
Notes
-----
[1] This function does not return any arrays but instead saves to disk.
[2] Depending on whether outFormat is '3D' or '4D' images will be saved as
seperate 3D nii files or one 4D nii file.
"""
# Number of voxels that were included in the mask:
varNumVoxMsk = np.sum(aryLgcMsk)
# Number of maps in ary2dNii
varNumMaps = ary2dNii.shape[-1]
# Place voxels based on low-variance exlusion:
aryPrfRes01 = np.zeros((varNumVoxMsk, varNumMaps), dtype=np.float32)
for indMap in range(varNumMaps):
aryPrfRes01[aryLgcVar, indMap] = ary2dNii[:, indMap]
# Total number of voxels:
varNumVoxTlt = (tplNiiShp[0] * tplNiiShp[1] * tplNiiShp[2])
# Place voxels based on mask-exclusion:
aryPrfRes02 = np.zeros((varNumVoxTlt, aryPrfRes01.shape[-1]),
dtype=np.float32)
for indDim in range(aryPrfRes01.shape[-1]):
aryPrfRes02[aryLgcMsk, indDim] = aryPrfRes01[:, indDim]
# Reshape pRF finding results into original image dimensions:
aryPrfRes = np.reshape(aryPrfRes02,
[tplNiiShp[0],
tplNiiShp[1],
tplNiiShp[2],
aryPrfRes01.shape[-1]])
if outFormat == '3D':
# Save nii results:
for idxOut in range(0, aryPrfRes.shape[-1]):
# Create nii object for results:
niiOut = nb.Nifti1Image(aryPrfRes[..., idxOut],
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = lstNiiNames[idxOut]
nb.save(niiOut, strTmp)
elif outFormat == '4D':
# adjust header
hdrMsk.set_data_shape(aryPrfRes.shape)
# Create nii object for results:
niiOut = nb.Nifti1Image(aryPrfRes,
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = lstNiiNames[0]
nb.save(niiOut, strTmp) | [
"def",
"export_nii",
"(",
"ary2dNii",
",",
"lstNiiNames",
",",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"tplNiiShp",
",",
"aryAff",
",",
"hdrMsk",
",",
"outFormat",
"=",
"'3D'",
")",
":",
"# Number of voxels that were included in the mask:",
"varNumVoxMsk",
"=",
"np",
".",
"sum",
"(",
"aryLgcMsk",
")",
"# Number of maps in ary2dNii",
"varNumMaps",
"=",
"ary2dNii",
".",
"shape",
"[",
"-",
"1",
"]",
"# Place voxels based on low-variance exlusion:",
"aryPrfRes01",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxMsk",
",",
"varNumMaps",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"indMap",
"in",
"range",
"(",
"varNumMaps",
")",
":",
"aryPrfRes01",
"[",
"aryLgcVar",
",",
"indMap",
"]",
"=",
"ary2dNii",
"[",
":",
",",
"indMap",
"]",
"# Total number of voxels:",
"varNumVoxTlt",
"=",
"(",
"tplNiiShp",
"[",
"0",
"]",
"*",
"tplNiiShp",
"[",
"1",
"]",
"*",
"tplNiiShp",
"[",
"2",
"]",
")",
"# Place voxels based on mask-exclusion:",
"aryPrfRes02",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxTlt",
",",
"aryPrfRes01",
".",
"shape",
"[",
"-",
"1",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"indDim",
"in",
"range",
"(",
"aryPrfRes01",
".",
"shape",
"[",
"-",
"1",
"]",
")",
":",
"aryPrfRes02",
"[",
"aryLgcMsk",
",",
"indDim",
"]",
"=",
"aryPrfRes01",
"[",
":",
",",
"indDim",
"]",
"# Reshape pRF finding results into original image dimensions:",
"aryPrfRes",
"=",
"np",
".",
"reshape",
"(",
"aryPrfRes02",
",",
"[",
"tplNiiShp",
"[",
"0",
"]",
",",
"tplNiiShp",
"[",
"1",
"]",
",",
"tplNiiShp",
"[",
"2",
"]",
",",
"aryPrfRes01",
".",
"shape",
"[",
"-",
"1",
"]",
"]",
")",
"if",
"outFormat",
"==",
"'3D'",
":",
"# Save nii results:",
"for",
"idxOut",
"in",
"range",
"(",
"0",
",",
"aryPrfRes",
".",
"shape",
"[",
"-",
"1",
"]",
")",
":",
"# Create nii object for results:",
"niiOut",
"=",
"nb",
".",
"Nifti1Image",
"(",
"aryPrfRes",
"[",
"...",
",",
"idxOut",
"]",
",",
"aryAff",
",",
"header",
"=",
"hdrMsk",
")",
"# Save nii:",
"strTmp",
"=",
"lstNiiNames",
"[",
"idxOut",
"]",
"nb",
".",
"save",
"(",
"niiOut",
",",
"strTmp",
")",
"elif",
"outFormat",
"==",
"'4D'",
":",
"# adjust header",
"hdrMsk",
".",
"set_data_shape",
"(",
"aryPrfRes",
".",
"shape",
")",
"# Create nii object for results:",
"niiOut",
"=",
"nb",
".",
"Nifti1Image",
"(",
"aryPrfRes",
",",
"aryAff",
",",
"header",
"=",
"hdrMsk",
")",
"# Save nii:",
"strTmp",
"=",
"lstNiiNames",
"[",
"0",
"]",
"nb",
".",
"save",
"(",
"niiOut",
",",
"strTmp",
")"
] | Export nii file(s).
Parameters
----------
ary2dNii : numpy array
Numpy array with results to be exported to nii.
lstNiiNames : list
List that contains strings with the complete file names.
aryLgcMsk : numpy array
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
aryLgcVar : np.array
1D numpy array containing logical values. One value per voxel after
mask has been applied. If `True`, the variance and mean of the voxel's
time course are greater than the provided thresholds in all runs and
the voxel is included in the output array (`aryFunc`). If `False`, the
variance or mean of the voxel's time course is lower than threshold in
at least one run and the voxel has been excluded from the output
(`aryFunc`). This is to avoid problems in the subsequent model fitting.
This array is necessary to put results into original dimensions after
model fitting.
tplNiiShp : tuple
Tuple that describes the 3D shape of the output volume
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
hdrMsk : nibabel-header-object
Nii header of mask.
outFormat : string, either '3D' or '4D'
String specifying whether images will be saved as seperate 3D nii
files or one 4D nii file
Notes
-----
[1] This function does not return any arrays but instead saves to disk.
[2] Depending on whether outFormat is '3D' or '4D' images will be saved as
seperate 3D nii files or one 4D nii file. | [
"Export",
"nii",
"file",
"(",
"s",
")",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_general.py#L176-L270 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_general.py | joinRes | def joinRes(lstPrfRes, varPar, idxPos, inFormat='1D'):
"""Join results from different processing units (here cores).
Parameters
----------
lstPrfRes : list
Output of results from parallelization.
varPar : integer, positive
Number of cores that were used during parallelization
idxPos : integer, positive
List position index that we expect the results to be collected to have.
inFormat : string
Specifies whether input will be 1d or 2d.
Returns
-------
aryOut : numpy array
Numpy array with results collected from different cores
"""
if inFormat == '1D':
# initialize output array
aryOut = np.zeros((0,))
# gather arrays from different processing units
for idxRes in range(0, varPar):
aryOut = np.append(aryOut, lstPrfRes[idxRes][idxPos])
elif inFormat == '2D':
# initialize output array
aryOut = np.zeros((0, lstPrfRes[0][idxPos].shape[-1]))
# gather arrays from different processing units
for idxRes in range(0, varPar):
aryOut = np.concatenate((aryOut, lstPrfRes[idxRes][idxPos]),
axis=0)
return aryOut | python | def joinRes(lstPrfRes, varPar, idxPos, inFormat='1D'):
"""Join results from different processing units (here cores).
Parameters
----------
lstPrfRes : list
Output of results from parallelization.
varPar : integer, positive
Number of cores that were used during parallelization
idxPos : integer, positive
List position index that we expect the results to be collected to have.
inFormat : string
Specifies whether input will be 1d or 2d.
Returns
-------
aryOut : numpy array
Numpy array with results collected from different cores
"""
if inFormat == '1D':
# initialize output array
aryOut = np.zeros((0,))
# gather arrays from different processing units
for idxRes in range(0, varPar):
aryOut = np.append(aryOut, lstPrfRes[idxRes][idxPos])
elif inFormat == '2D':
# initialize output array
aryOut = np.zeros((0, lstPrfRes[0][idxPos].shape[-1]))
# gather arrays from different processing units
for idxRes in range(0, varPar):
aryOut = np.concatenate((aryOut, lstPrfRes[idxRes][idxPos]),
axis=0)
return aryOut | [
"def",
"joinRes",
"(",
"lstPrfRes",
",",
"varPar",
",",
"idxPos",
",",
"inFormat",
"=",
"'1D'",
")",
":",
"if",
"inFormat",
"==",
"'1D'",
":",
"# initialize output array",
"aryOut",
"=",
"np",
".",
"zeros",
"(",
"(",
"0",
",",
")",
")",
"# gather arrays from different processing units",
"for",
"idxRes",
"in",
"range",
"(",
"0",
",",
"varPar",
")",
":",
"aryOut",
"=",
"np",
".",
"append",
"(",
"aryOut",
",",
"lstPrfRes",
"[",
"idxRes",
"]",
"[",
"idxPos",
"]",
")",
"elif",
"inFormat",
"==",
"'2D'",
":",
"# initialize output array",
"aryOut",
"=",
"np",
".",
"zeros",
"(",
"(",
"0",
",",
"lstPrfRes",
"[",
"0",
"]",
"[",
"idxPos",
"]",
".",
"shape",
"[",
"-",
"1",
"]",
")",
")",
"# gather arrays from different processing units",
"for",
"idxRes",
"in",
"range",
"(",
"0",
",",
"varPar",
")",
":",
"aryOut",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryOut",
",",
"lstPrfRes",
"[",
"idxRes",
"]",
"[",
"idxPos",
"]",
")",
",",
"axis",
"=",
"0",
")",
"return",
"aryOut"
] | Join results from different processing units (here cores).
Parameters
----------
lstPrfRes : list
Output of results from parallelization.
varPar : integer, positive
Number of cores that were used during parallelization
idxPos : integer, positive
List position index that we expect the results to be collected to have.
inFormat : string
Specifies whether input will be 1d or 2d.
Returns
-------
aryOut : numpy array
Numpy array with results collected from different cores | [
"Join",
"results",
"from",
"different",
"processing",
"units",
"(",
"here",
"cores",
")",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_general.py#L273-L309 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_general.py | cmp_res_R2 | def cmp_res_R2(lstRat, lstNiiNames, strPathOut, strPathMdl, lgcSveMdlTc=True,
lgcDel=False, strNmeExt=''):
""""Compare results for different exponents and create winner nii.
Parameters
----------
lstRat : list
List of floats containing the ratios that were tested for surround
suppression.
lstNiiNames : list
List of names of the different pRF maps (e.g. xpos, ypos, SD)
strPathOut : string
Path to the parent directory where the results should be saved.
strPathMdl : string
Path to the parent directory where pRF models should be saved.
lgcDel : boolean
Should model time courses be saved as npy file?
lgcDel : boolean
Should inbetween results (in form of nii files) be deleted?
strNmeExt : string
Extra name appendix to denominate experiment name. If undesidered,
provide empty string.
Notes
-----
[1] This function does not return any arrays but instead saves to disk.
"""
print('---Compare results for different ratios')
# Extract the index position for R2 and Betas map in lstNiiNames
indPosR2 = [ind for ind, item in enumerate(lstNiiNames) if 'R2' in item]
indPosBetas = [ind for ind, item in enumerate(lstNiiNames) if 'Betas' in
item]
# Check that only one index was found
msgError = 'More than one nii file was provided that could serve as R2 map'
assert len(indPosR2) == 1, msgError
assert len(indPosBetas) == 1, msgError
# turn list int index
indPosR2 = indPosR2[0]
indPosBetas = indPosBetas[0]
# Get the names of the nii files with in-between results
lstCmpRes = []
for indRat in range(len(lstRat)):
# Get strExpSve
strExpSve = '_' + str(lstRat[indRat])
# If ratio is marked with 1.0, set empty string to find results.
# 1.0 is the key for fitting without a surround.
if lstRat[indRat] == 1.0:
strExpSve = ''
# Create full path names from nii file names and output path
lstPthNames = [strPathOut + strNii + strNmeExt + strExpSve + '.nii.gz'
for strNii in lstNiiNames]
# Append list to list that contains nii names for all exponents
lstCmpRes.append(lstPthNames)
print('------Find ratio that yielded highest R2 per voxel')
# Initialize winner R2 map with R2 values from fit without surround
aryWnrR2 = load_nii(lstCmpRes[0][indPosR2])[0]
# Initialize ratio map with 1 where no-surround model was fit, otherwise 0
aryRatMap = np.zeros(aryWnrR2.shape)
aryRatMap[np.nonzero(aryWnrR2)] = 1.0
# Loop over R2 maps to establish which exponents wins
# Skip the first ratio, since this is the reference ratio (no surround)
# and is reflected already in the initialized arrays - aryWnrR2 & aryRatMap
for indRat, lstMaps in zip(lstRat[1:], lstCmpRes[1:]):
# Load R2 map for this particular exponent
aryTmpR2 = load_nii(lstMaps[indPosR2])[0]
# Load beta values for this particular exponent
aryTmpBetas = load_nii(lstMaps[indPosBetas])[0]
# Get logical that tells us where current R2 map is greater than
# previous ones
aryLgcWnr = np.greater(aryTmpR2, aryWnrR2)
# Get logical that tells us where the beta parameter estimate for the
# centre is positive and the estimate for the surround is negative
aryLgcCtrSur1 = np.logical_and(np.greater(aryTmpBetas[..., 0], 0.0),
np.less(aryTmpBetas[..., 1], 0.0))
# Get logical that tells us where the absolute beta parameter estimate
# for the surround is less than beta parameter estimate for the center
aryLgcCtrSur2 = np.less(np.abs(aryTmpBetas[..., 1]),
np.abs(aryTmpBetas[..., 0]))
# Combine the two logicals
aryLgcCtrSur = np.logical_and(aryLgcCtrSur1, aryLgcCtrSur2)
# Combine logical for winner R2 and center-surround conditions
aryLgcWnr = np.logical_and(aryLgcWnr, aryLgcCtrSur)
# Replace values of R2, where current R2 map was greater
aryWnrR2[aryLgcWnr] = np.copy(aryTmpR2[aryLgcWnr])
# Remember the index of the exponent that gave rise to this new R2
aryRatMap[aryLgcWnr] = indRat
# Initialize list with winner maps. The winner maps are initialized with
# the same shape as the maps that the last tested ratio maps had.
lstRatMap = []
for strPthMaps in lstCmpRes[-1]:
lstRatMap.append(np.zeros(nb.load(strPthMaps).shape))
# Compose other maps by assigning map value from the map that resulted from
# the exponent that won for particular voxel
for indRat, lstMaps in zip(lstRat, lstCmpRes):
# Find out where this exponent won in terms of R2
lgcWinnerMap = [aryRatMap == indRat][0]
# Loop over all the maps
for indMap, _ in enumerate(lstMaps):
# Load map for this particular ratio
aryTmpMap = load_nii(lstMaps[indMap])[0]
# Handle exception: beta map will be 1D, if from ratio 1.0
# In this case we want to make it 2D. In particular, the second
# set of beta weights should be all zeros, so that later when
# forming the model time course, the 2nd predictors contributes 0
if indRat == 1.0 and indMap == indPosBetas:
aryTmpMap = np.concatenate((aryTmpMap,
np.zeros(aryTmpMap.shape)),
axis=-1)
# Load current winner map from array
aryCrrWnrMap = np.copy(lstRatMap[indMap])
# Assign values in temporary map to current winner map for voxels
# where this ratio won
aryCrrWnrMap[lgcWinnerMap] = np.copy(aryTmpMap[lgcWinnerMap])
lstRatMap[indMap] = aryCrrWnrMap
print('------Export results as nii')
# Save winner maps as nii files
# Get header and affine array
hdrMsk, aryAff = load_nii(lstMaps[indPosR2])[1:]
# Loop over all the maps
for indMap, aryMap in enumerate(lstRatMap):
# Create nii object for results:
niiOut = nb.Nifti1Image(aryMap,
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = strPathOut + '_supsur' + lstNiiNames[indMap] + strNmeExt + \
'.nii.gz'
nb.save(niiOut, strTmp)
# Save map with best ratios as nii
niiOut = nb.Nifti1Image(aryRatMap,
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = strPathOut + '_supsur' + '_Ratios' + strNmeExt + '.nii.gz'
nb.save(niiOut, strTmp)
if lgcSveMdlTc:
print('------Save model time courses/parameters/responses for ' +
'centre and surround, across all ratios')
# Get the names of the npy files with inbetween model responses
lstCmpMdlRsp = []
for indRat in range(len(lstRat)):
# Get strExpSve
strExpSve = '_' + str(lstRat[indRat])
# If ratio is marked with 0, set empty string to find results.
# This is the code for fitting without a surround.
if lstRat[indRat] == 1.0:
strExpSve = ''
# Create full path names from npy file names and output path
lstPthNames = [strPathMdl + strNpy + strNmeExt + strExpSve + '.npy'
for strNpy in ['', '_params', '_mdlRsp']]
# Append list to list that contains nii names for all exponents
lstCmpMdlRsp.append(lstPthNames)
# Load tc/parameters/responses for different ratios, for now skip "0.0"
# ratio because its tc/parameters/responses differs in shape
lstPrfTcSur = []
lstMdlParamsSur = []
lstMdlRspSur = []
for indNpy, lstNpy in enumerate(lstCmpMdlRsp[1:]):
lstPrfTcSur.append(np.load(lstNpy[0]))
lstMdlParamsSur.append(np.load(lstNpy[1]))
lstMdlRspSur.append(np.load(lstNpy[2]))
# Turn into arrays
aryPrfTcSur = np.stack(lstPrfTcSur, axis=2)
aryMdlParamsSur = np.stack(lstMdlParamsSur, axis=2)
aryMdlRspSur = np.stack(lstMdlRspSur, axis=2)
# Now handle the "1.0" ratio
# Load the tc/parameters/responses of the "1.0" ratio
aryPrfTc = np.load(lstCmpMdlRsp[0][0])
aryMdlParams = np.load(lstCmpMdlRsp[0][1])
aryMdlRsp = np.load(lstCmpMdlRsp[0][2])
# Make 2nd row of time courses all zeros so they get no weight in lstsq
aryPrfTc = np.concatenate((aryPrfTc, np.zeros(aryPrfTc.shape)), axis=1)
# Make 2nd row of parameters the same as first row
aryMdlParams = np.stack((aryMdlParams, aryMdlParams), axis=1)
# Make 2nd row of responses all zeros so they get no weight in lstsq
aryMdlRsp = np.stack((aryMdlRsp, np.zeros(aryMdlRsp.shape)), axis=1)
# Add the "1.0" ratio to tc/parameters/responses of other ratios
aryPrfTcSur = np.concatenate((np.expand_dims(aryPrfTc, axis=2),
aryPrfTcSur), axis=2)
aryMdlParamsSur = np.concatenate((np.expand_dims(aryMdlParams, axis=2),
aryMdlParamsSur), axis=2)
aryMdlRspSur = np.concatenate((np.expand_dims(aryMdlRsp, axis=2),
aryMdlRspSur), axis=2)
# Save parameters/response for centre and surround, for all ratios
np.save(strPathMdl + '_supsur' + '', aryPrfTcSur)
np.save(strPathMdl + '_supsur' + '_params', aryMdlParamsSur)
np.save(strPathMdl + '_supsur' + '_mdlRsp', aryMdlRspSur)
# Delete all the inbetween results, if desired by user, skip "0.0" ratio
if lgcDel:
lstCmpRes = [item for sublist in lstCmpRes[1:] for item in sublist]
print('------Delete in-between results')
for strMap in lstCmpRes[:]:
os.remove(strMap)
if lgcSveMdlTc:
lstCmpMdlRsp = [item for sublist in lstCmpMdlRsp[1:] for item in
sublist]
for strMap in lstCmpMdlRsp[:]:
os.remove(strMap) | python | def cmp_res_R2(lstRat, lstNiiNames, strPathOut, strPathMdl, lgcSveMdlTc=True,
lgcDel=False, strNmeExt=''):
""""Compare results for different exponents and create winner nii.
Parameters
----------
lstRat : list
List of floats containing the ratios that were tested for surround
suppression.
lstNiiNames : list
List of names of the different pRF maps (e.g. xpos, ypos, SD)
strPathOut : string
Path to the parent directory where the results should be saved.
strPathMdl : string
Path to the parent directory where pRF models should be saved.
lgcDel : boolean
Should model time courses be saved as npy file?
lgcDel : boolean
Should inbetween results (in form of nii files) be deleted?
strNmeExt : string
Extra name appendix to denominate experiment name. If undesidered,
provide empty string.
Notes
-----
[1] This function does not return any arrays but instead saves to disk.
"""
print('---Compare results for different ratios')
# Extract the index position for R2 and Betas map in lstNiiNames
indPosR2 = [ind for ind, item in enumerate(lstNiiNames) if 'R2' in item]
indPosBetas = [ind for ind, item in enumerate(lstNiiNames) if 'Betas' in
item]
# Check that only one index was found
msgError = 'More than one nii file was provided that could serve as R2 map'
assert len(indPosR2) == 1, msgError
assert len(indPosBetas) == 1, msgError
# turn list int index
indPosR2 = indPosR2[0]
indPosBetas = indPosBetas[0]
# Get the names of the nii files with in-between results
lstCmpRes = []
for indRat in range(len(lstRat)):
# Get strExpSve
strExpSve = '_' + str(lstRat[indRat])
# If ratio is marked with 1.0, set empty string to find results.
# 1.0 is the key for fitting without a surround.
if lstRat[indRat] == 1.0:
strExpSve = ''
# Create full path names from nii file names and output path
lstPthNames = [strPathOut + strNii + strNmeExt + strExpSve + '.nii.gz'
for strNii in lstNiiNames]
# Append list to list that contains nii names for all exponents
lstCmpRes.append(lstPthNames)
print('------Find ratio that yielded highest R2 per voxel')
# Initialize winner R2 map with R2 values from fit without surround
aryWnrR2 = load_nii(lstCmpRes[0][indPosR2])[0]
# Initialize ratio map with 1 where no-surround model was fit, otherwise 0
aryRatMap = np.zeros(aryWnrR2.shape)
aryRatMap[np.nonzero(aryWnrR2)] = 1.0
# Loop over R2 maps to establish which exponents wins
# Skip the first ratio, since this is the reference ratio (no surround)
# and is reflected already in the initialized arrays - aryWnrR2 & aryRatMap
for indRat, lstMaps in zip(lstRat[1:], lstCmpRes[1:]):
# Load R2 map for this particular exponent
aryTmpR2 = load_nii(lstMaps[indPosR2])[0]
# Load beta values for this particular exponent
aryTmpBetas = load_nii(lstMaps[indPosBetas])[0]
# Get logical that tells us where current R2 map is greater than
# previous ones
aryLgcWnr = np.greater(aryTmpR2, aryWnrR2)
# Get logical that tells us where the beta parameter estimate for the
# centre is positive and the estimate for the surround is negative
aryLgcCtrSur1 = np.logical_and(np.greater(aryTmpBetas[..., 0], 0.0),
np.less(aryTmpBetas[..., 1], 0.0))
# Get logical that tells us where the absolute beta parameter estimate
# for the surround is less than beta parameter estimate for the center
aryLgcCtrSur2 = np.less(np.abs(aryTmpBetas[..., 1]),
np.abs(aryTmpBetas[..., 0]))
# Combine the two logicals
aryLgcCtrSur = np.logical_and(aryLgcCtrSur1, aryLgcCtrSur2)
# Combine logical for winner R2 and center-surround conditions
aryLgcWnr = np.logical_and(aryLgcWnr, aryLgcCtrSur)
# Replace values of R2, where current R2 map was greater
aryWnrR2[aryLgcWnr] = np.copy(aryTmpR2[aryLgcWnr])
# Remember the index of the exponent that gave rise to this new R2
aryRatMap[aryLgcWnr] = indRat
# Initialize list with winner maps. The winner maps are initialized with
# the same shape as the maps that the last tested ratio maps had.
lstRatMap = []
for strPthMaps in lstCmpRes[-1]:
lstRatMap.append(np.zeros(nb.load(strPthMaps).shape))
# Compose other maps by assigning map value from the map that resulted from
# the exponent that won for particular voxel
for indRat, lstMaps in zip(lstRat, lstCmpRes):
# Find out where this exponent won in terms of R2
lgcWinnerMap = [aryRatMap == indRat][0]
# Loop over all the maps
for indMap, _ in enumerate(lstMaps):
# Load map for this particular ratio
aryTmpMap = load_nii(lstMaps[indMap])[0]
# Handle exception: beta map will be 1D, if from ratio 1.0
# In this case we want to make it 2D. In particular, the second
# set of beta weights should be all zeros, so that later when
# forming the model time course, the 2nd predictors contributes 0
if indRat == 1.0 and indMap == indPosBetas:
aryTmpMap = np.concatenate((aryTmpMap,
np.zeros(aryTmpMap.shape)),
axis=-1)
# Load current winner map from array
aryCrrWnrMap = np.copy(lstRatMap[indMap])
# Assign values in temporary map to current winner map for voxels
# where this ratio won
aryCrrWnrMap[lgcWinnerMap] = np.copy(aryTmpMap[lgcWinnerMap])
lstRatMap[indMap] = aryCrrWnrMap
print('------Export results as nii')
# Save winner maps as nii files
# Get header and affine array
hdrMsk, aryAff = load_nii(lstMaps[indPosR2])[1:]
# Loop over all the maps
for indMap, aryMap in enumerate(lstRatMap):
# Create nii object for results:
niiOut = nb.Nifti1Image(aryMap,
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = strPathOut + '_supsur' + lstNiiNames[indMap] + strNmeExt + \
'.nii.gz'
nb.save(niiOut, strTmp)
# Save map with best ratios as nii
niiOut = nb.Nifti1Image(aryRatMap,
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = strPathOut + '_supsur' + '_Ratios' + strNmeExt + '.nii.gz'
nb.save(niiOut, strTmp)
if lgcSveMdlTc:
print('------Save model time courses/parameters/responses for ' +
'centre and surround, across all ratios')
# Get the names of the npy files with inbetween model responses
lstCmpMdlRsp = []
for indRat in range(len(lstRat)):
# Get strExpSve
strExpSve = '_' + str(lstRat[indRat])
# If ratio is marked with 0, set empty string to find results.
# This is the code for fitting without a surround.
if lstRat[indRat] == 1.0:
strExpSve = ''
# Create full path names from npy file names and output path
lstPthNames = [strPathMdl + strNpy + strNmeExt + strExpSve + '.npy'
for strNpy in ['', '_params', '_mdlRsp']]
# Append list to list that contains nii names for all exponents
lstCmpMdlRsp.append(lstPthNames)
# Load tc/parameters/responses for different ratios, for now skip "0.0"
# ratio because its tc/parameters/responses differs in shape
lstPrfTcSur = []
lstMdlParamsSur = []
lstMdlRspSur = []
for indNpy, lstNpy in enumerate(lstCmpMdlRsp[1:]):
lstPrfTcSur.append(np.load(lstNpy[0]))
lstMdlParamsSur.append(np.load(lstNpy[1]))
lstMdlRspSur.append(np.load(lstNpy[2]))
# Turn into arrays
aryPrfTcSur = np.stack(lstPrfTcSur, axis=2)
aryMdlParamsSur = np.stack(lstMdlParamsSur, axis=2)
aryMdlRspSur = np.stack(lstMdlRspSur, axis=2)
# Now handle the "1.0" ratio
# Load the tc/parameters/responses of the "1.0" ratio
aryPrfTc = np.load(lstCmpMdlRsp[0][0])
aryMdlParams = np.load(lstCmpMdlRsp[0][1])
aryMdlRsp = np.load(lstCmpMdlRsp[0][2])
# Make 2nd row of time courses all zeros so they get no weight in lstsq
aryPrfTc = np.concatenate((aryPrfTc, np.zeros(aryPrfTc.shape)), axis=1)
# Make 2nd row of parameters the same as first row
aryMdlParams = np.stack((aryMdlParams, aryMdlParams), axis=1)
# Make 2nd row of responses all zeros so they get no weight in lstsq
aryMdlRsp = np.stack((aryMdlRsp, np.zeros(aryMdlRsp.shape)), axis=1)
# Add the "1.0" ratio to tc/parameters/responses of other ratios
aryPrfTcSur = np.concatenate((np.expand_dims(aryPrfTc, axis=2),
aryPrfTcSur), axis=2)
aryMdlParamsSur = np.concatenate((np.expand_dims(aryMdlParams, axis=2),
aryMdlParamsSur), axis=2)
aryMdlRspSur = np.concatenate((np.expand_dims(aryMdlRsp, axis=2),
aryMdlRspSur), axis=2)
# Save parameters/response for centre and surround, for all ratios
np.save(strPathMdl + '_supsur' + '', aryPrfTcSur)
np.save(strPathMdl + '_supsur' + '_params', aryMdlParamsSur)
np.save(strPathMdl + '_supsur' + '_mdlRsp', aryMdlRspSur)
# Delete all the inbetween results, if desired by user, skip "0.0" ratio
if lgcDel:
lstCmpRes = [item for sublist in lstCmpRes[1:] for item in sublist]
print('------Delete in-between results')
for strMap in lstCmpRes[:]:
os.remove(strMap)
if lgcSveMdlTc:
lstCmpMdlRsp = [item for sublist in lstCmpMdlRsp[1:] for item in
sublist]
for strMap in lstCmpMdlRsp[:]:
os.remove(strMap) | [
"def",
"cmp_res_R2",
"(",
"lstRat",
",",
"lstNiiNames",
",",
"strPathOut",
",",
"strPathMdl",
",",
"lgcSveMdlTc",
"=",
"True",
",",
"lgcDel",
"=",
"False",
",",
"strNmeExt",
"=",
"''",
")",
":",
"print",
"(",
"'---Compare results for different ratios'",
")",
"# Extract the index position for R2 and Betas map in lstNiiNames",
"indPosR2",
"=",
"[",
"ind",
"for",
"ind",
",",
"item",
"in",
"enumerate",
"(",
"lstNiiNames",
")",
"if",
"'R2'",
"in",
"item",
"]",
"indPosBetas",
"=",
"[",
"ind",
"for",
"ind",
",",
"item",
"in",
"enumerate",
"(",
"lstNiiNames",
")",
"if",
"'Betas'",
"in",
"item",
"]",
"# Check that only one index was found",
"msgError",
"=",
"'More than one nii file was provided that could serve as R2 map'",
"assert",
"len",
"(",
"indPosR2",
")",
"==",
"1",
",",
"msgError",
"assert",
"len",
"(",
"indPosBetas",
")",
"==",
"1",
",",
"msgError",
"# turn list int index",
"indPosR2",
"=",
"indPosR2",
"[",
"0",
"]",
"indPosBetas",
"=",
"indPosBetas",
"[",
"0",
"]",
"# Get the names of the nii files with in-between results",
"lstCmpRes",
"=",
"[",
"]",
"for",
"indRat",
"in",
"range",
"(",
"len",
"(",
"lstRat",
")",
")",
":",
"# Get strExpSve",
"strExpSve",
"=",
"'_'",
"+",
"str",
"(",
"lstRat",
"[",
"indRat",
"]",
")",
"# If ratio is marked with 1.0, set empty string to find results.",
"# 1.0 is the key for fitting without a surround.",
"if",
"lstRat",
"[",
"indRat",
"]",
"==",
"1.0",
":",
"strExpSve",
"=",
"''",
"# Create full path names from nii file names and output path",
"lstPthNames",
"=",
"[",
"strPathOut",
"+",
"strNii",
"+",
"strNmeExt",
"+",
"strExpSve",
"+",
"'.nii.gz'",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# Append list to list that contains nii names for all exponents",
"lstCmpRes",
".",
"append",
"(",
"lstPthNames",
")",
"print",
"(",
"'------Find ratio that yielded highest R2 per voxel'",
")",
"# Initialize winner R2 map with R2 values from fit without surround",
"aryWnrR2",
"=",
"load_nii",
"(",
"lstCmpRes",
"[",
"0",
"]",
"[",
"indPosR2",
"]",
")",
"[",
"0",
"]",
"# Initialize ratio map with 1 where no-surround model was fit, otherwise 0",
"aryRatMap",
"=",
"np",
".",
"zeros",
"(",
"aryWnrR2",
".",
"shape",
")",
"aryRatMap",
"[",
"np",
".",
"nonzero",
"(",
"aryWnrR2",
")",
"]",
"=",
"1.0",
"# Loop over R2 maps to establish which exponents wins",
"# Skip the first ratio, since this is the reference ratio (no surround)",
"# and is reflected already in the initialized arrays - aryWnrR2 & aryRatMap",
"for",
"indRat",
",",
"lstMaps",
"in",
"zip",
"(",
"lstRat",
"[",
"1",
":",
"]",
",",
"lstCmpRes",
"[",
"1",
":",
"]",
")",
":",
"# Load R2 map for this particular exponent",
"aryTmpR2",
"=",
"load_nii",
"(",
"lstMaps",
"[",
"indPosR2",
"]",
")",
"[",
"0",
"]",
"# Load beta values for this particular exponent",
"aryTmpBetas",
"=",
"load_nii",
"(",
"lstMaps",
"[",
"indPosBetas",
"]",
")",
"[",
"0",
"]",
"# Get logical that tells us where current R2 map is greater than",
"# previous ones",
"aryLgcWnr",
"=",
"np",
".",
"greater",
"(",
"aryTmpR2",
",",
"aryWnrR2",
")",
"# Get logical that tells us where the beta parameter estimate for the",
"# centre is positive and the estimate for the surround is negative",
"aryLgcCtrSur1",
"=",
"np",
".",
"logical_and",
"(",
"np",
".",
"greater",
"(",
"aryTmpBetas",
"[",
"...",
",",
"0",
"]",
",",
"0.0",
")",
",",
"np",
".",
"less",
"(",
"aryTmpBetas",
"[",
"...",
",",
"1",
"]",
",",
"0.0",
")",
")",
"# Get logical that tells us where the absolute beta parameter estimate",
"# for the surround is less than beta parameter estimate for the center",
"aryLgcCtrSur2",
"=",
"np",
".",
"less",
"(",
"np",
".",
"abs",
"(",
"aryTmpBetas",
"[",
"...",
",",
"1",
"]",
")",
",",
"np",
".",
"abs",
"(",
"aryTmpBetas",
"[",
"...",
",",
"0",
"]",
")",
")",
"# Combine the two logicals",
"aryLgcCtrSur",
"=",
"np",
".",
"logical_and",
"(",
"aryLgcCtrSur1",
",",
"aryLgcCtrSur2",
")",
"# Combine logical for winner R2 and center-surround conditions",
"aryLgcWnr",
"=",
"np",
".",
"logical_and",
"(",
"aryLgcWnr",
",",
"aryLgcCtrSur",
")",
"# Replace values of R2, where current R2 map was greater",
"aryWnrR2",
"[",
"aryLgcWnr",
"]",
"=",
"np",
".",
"copy",
"(",
"aryTmpR2",
"[",
"aryLgcWnr",
"]",
")",
"# Remember the index of the exponent that gave rise to this new R2",
"aryRatMap",
"[",
"aryLgcWnr",
"]",
"=",
"indRat",
"# Initialize list with winner maps. The winner maps are initialized with",
"# the same shape as the maps that the last tested ratio maps had.",
"lstRatMap",
"=",
"[",
"]",
"for",
"strPthMaps",
"in",
"lstCmpRes",
"[",
"-",
"1",
"]",
":",
"lstRatMap",
".",
"append",
"(",
"np",
".",
"zeros",
"(",
"nb",
".",
"load",
"(",
"strPthMaps",
")",
".",
"shape",
")",
")",
"# Compose other maps by assigning map value from the map that resulted from",
"# the exponent that won for particular voxel",
"for",
"indRat",
",",
"lstMaps",
"in",
"zip",
"(",
"lstRat",
",",
"lstCmpRes",
")",
":",
"# Find out where this exponent won in terms of R2",
"lgcWinnerMap",
"=",
"[",
"aryRatMap",
"==",
"indRat",
"]",
"[",
"0",
"]",
"# Loop over all the maps",
"for",
"indMap",
",",
"_",
"in",
"enumerate",
"(",
"lstMaps",
")",
":",
"# Load map for this particular ratio",
"aryTmpMap",
"=",
"load_nii",
"(",
"lstMaps",
"[",
"indMap",
"]",
")",
"[",
"0",
"]",
"# Handle exception: beta map will be 1D, if from ratio 1.0",
"# In this case we want to make it 2D. In particular, the second",
"# set of beta weights should be all zeros, so that later when",
"# forming the model time course, the 2nd predictors contributes 0",
"if",
"indRat",
"==",
"1.0",
"and",
"indMap",
"==",
"indPosBetas",
":",
"aryTmpMap",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryTmpMap",
",",
"np",
".",
"zeros",
"(",
"aryTmpMap",
".",
"shape",
")",
")",
",",
"axis",
"=",
"-",
"1",
")",
"# Load current winner map from array",
"aryCrrWnrMap",
"=",
"np",
".",
"copy",
"(",
"lstRatMap",
"[",
"indMap",
"]",
")",
"# Assign values in temporary map to current winner map for voxels",
"# where this ratio won",
"aryCrrWnrMap",
"[",
"lgcWinnerMap",
"]",
"=",
"np",
".",
"copy",
"(",
"aryTmpMap",
"[",
"lgcWinnerMap",
"]",
")",
"lstRatMap",
"[",
"indMap",
"]",
"=",
"aryCrrWnrMap",
"print",
"(",
"'------Export results as nii'",
")",
"# Save winner maps as nii files",
"# Get header and affine array",
"hdrMsk",
",",
"aryAff",
"=",
"load_nii",
"(",
"lstMaps",
"[",
"indPosR2",
"]",
")",
"[",
"1",
":",
"]",
"# Loop over all the maps",
"for",
"indMap",
",",
"aryMap",
"in",
"enumerate",
"(",
"lstRatMap",
")",
":",
"# Create nii object for results:",
"niiOut",
"=",
"nb",
".",
"Nifti1Image",
"(",
"aryMap",
",",
"aryAff",
",",
"header",
"=",
"hdrMsk",
")",
"# Save nii:",
"strTmp",
"=",
"strPathOut",
"+",
"'_supsur'",
"+",
"lstNiiNames",
"[",
"indMap",
"]",
"+",
"strNmeExt",
"+",
"'.nii.gz'",
"nb",
".",
"save",
"(",
"niiOut",
",",
"strTmp",
")",
"# Save map with best ratios as nii",
"niiOut",
"=",
"nb",
".",
"Nifti1Image",
"(",
"aryRatMap",
",",
"aryAff",
",",
"header",
"=",
"hdrMsk",
")",
"# Save nii:",
"strTmp",
"=",
"strPathOut",
"+",
"'_supsur'",
"+",
"'_Ratios'",
"+",
"strNmeExt",
"+",
"'.nii.gz'",
"nb",
".",
"save",
"(",
"niiOut",
",",
"strTmp",
")",
"if",
"lgcSveMdlTc",
":",
"print",
"(",
"'------Save model time courses/parameters/responses for '",
"+",
"'centre and surround, across all ratios'",
")",
"# Get the names of the npy files with inbetween model responses",
"lstCmpMdlRsp",
"=",
"[",
"]",
"for",
"indRat",
"in",
"range",
"(",
"len",
"(",
"lstRat",
")",
")",
":",
"# Get strExpSve",
"strExpSve",
"=",
"'_'",
"+",
"str",
"(",
"lstRat",
"[",
"indRat",
"]",
")",
"# If ratio is marked with 0, set empty string to find results.",
"# This is the code for fitting without a surround.",
"if",
"lstRat",
"[",
"indRat",
"]",
"==",
"1.0",
":",
"strExpSve",
"=",
"''",
"# Create full path names from npy file names and output path",
"lstPthNames",
"=",
"[",
"strPathMdl",
"+",
"strNpy",
"+",
"strNmeExt",
"+",
"strExpSve",
"+",
"'.npy'",
"for",
"strNpy",
"in",
"[",
"''",
",",
"'_params'",
",",
"'_mdlRsp'",
"]",
"]",
"# Append list to list that contains nii names for all exponents",
"lstCmpMdlRsp",
".",
"append",
"(",
"lstPthNames",
")",
"# Load tc/parameters/responses for different ratios, for now skip \"0.0\"",
"# ratio because its tc/parameters/responses differs in shape",
"lstPrfTcSur",
"=",
"[",
"]",
"lstMdlParamsSur",
"=",
"[",
"]",
"lstMdlRspSur",
"=",
"[",
"]",
"for",
"indNpy",
",",
"lstNpy",
"in",
"enumerate",
"(",
"lstCmpMdlRsp",
"[",
"1",
":",
"]",
")",
":",
"lstPrfTcSur",
".",
"append",
"(",
"np",
".",
"load",
"(",
"lstNpy",
"[",
"0",
"]",
")",
")",
"lstMdlParamsSur",
".",
"append",
"(",
"np",
".",
"load",
"(",
"lstNpy",
"[",
"1",
"]",
")",
")",
"lstMdlRspSur",
".",
"append",
"(",
"np",
".",
"load",
"(",
"lstNpy",
"[",
"2",
"]",
")",
")",
"# Turn into arrays",
"aryPrfTcSur",
"=",
"np",
".",
"stack",
"(",
"lstPrfTcSur",
",",
"axis",
"=",
"2",
")",
"aryMdlParamsSur",
"=",
"np",
".",
"stack",
"(",
"lstMdlParamsSur",
",",
"axis",
"=",
"2",
")",
"aryMdlRspSur",
"=",
"np",
".",
"stack",
"(",
"lstMdlRspSur",
",",
"axis",
"=",
"2",
")",
"# Now handle the \"1.0\" ratio",
"# Load the tc/parameters/responses of the \"1.0\" ratio",
"aryPrfTc",
"=",
"np",
".",
"load",
"(",
"lstCmpMdlRsp",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"aryMdlParams",
"=",
"np",
".",
"load",
"(",
"lstCmpMdlRsp",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"aryMdlRsp",
"=",
"np",
".",
"load",
"(",
"lstCmpMdlRsp",
"[",
"0",
"]",
"[",
"2",
"]",
")",
"# Make 2nd row of time courses all zeros so they get no weight in lstsq",
"aryPrfTc",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryPrfTc",
",",
"np",
".",
"zeros",
"(",
"aryPrfTc",
".",
"shape",
")",
")",
",",
"axis",
"=",
"1",
")",
"# Make 2nd row of parameters the same as first row",
"aryMdlParams",
"=",
"np",
".",
"stack",
"(",
"(",
"aryMdlParams",
",",
"aryMdlParams",
")",
",",
"axis",
"=",
"1",
")",
"# Make 2nd row of responses all zeros so they get no weight in lstsq",
"aryMdlRsp",
"=",
"np",
".",
"stack",
"(",
"(",
"aryMdlRsp",
",",
"np",
".",
"zeros",
"(",
"aryMdlRsp",
".",
"shape",
")",
")",
",",
"axis",
"=",
"1",
")",
"# Add the \"1.0\" ratio to tc/parameters/responses of other ratios",
"aryPrfTcSur",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"expand_dims",
"(",
"aryPrfTc",
",",
"axis",
"=",
"2",
")",
",",
"aryPrfTcSur",
")",
",",
"axis",
"=",
"2",
")",
"aryMdlParamsSur",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"expand_dims",
"(",
"aryMdlParams",
",",
"axis",
"=",
"2",
")",
",",
"aryMdlParamsSur",
")",
",",
"axis",
"=",
"2",
")",
"aryMdlRspSur",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"expand_dims",
"(",
"aryMdlRsp",
",",
"axis",
"=",
"2",
")",
",",
"aryMdlRspSur",
")",
",",
"axis",
"=",
"2",
")",
"# Save parameters/response for centre and surround, for all ratios",
"np",
".",
"save",
"(",
"strPathMdl",
"+",
"'_supsur'",
"+",
"''",
",",
"aryPrfTcSur",
")",
"np",
".",
"save",
"(",
"strPathMdl",
"+",
"'_supsur'",
"+",
"'_params'",
",",
"aryMdlParamsSur",
")",
"np",
".",
"save",
"(",
"strPathMdl",
"+",
"'_supsur'",
"+",
"'_mdlRsp'",
",",
"aryMdlRspSur",
")",
"# Delete all the inbetween results, if desired by user, skip \"0.0\" ratio",
"if",
"lgcDel",
":",
"lstCmpRes",
"=",
"[",
"item",
"for",
"sublist",
"in",
"lstCmpRes",
"[",
"1",
":",
"]",
"for",
"item",
"in",
"sublist",
"]",
"print",
"(",
"'------Delete in-between results'",
")",
"for",
"strMap",
"in",
"lstCmpRes",
"[",
":",
"]",
":",
"os",
".",
"remove",
"(",
"strMap",
")",
"if",
"lgcSveMdlTc",
":",
"lstCmpMdlRsp",
"=",
"[",
"item",
"for",
"sublist",
"in",
"lstCmpMdlRsp",
"[",
"1",
":",
"]",
"for",
"item",
"in",
"sublist",
"]",
"for",
"strMap",
"in",
"lstCmpMdlRsp",
"[",
":",
"]",
":",
"os",
".",
"remove",
"(",
"strMap",
")"
] | Compare results for different exponents and create winner nii.
Parameters
----------
lstRat : list
List of floats containing the ratios that were tested for surround
suppression.
lstNiiNames : list
List of names of the different pRF maps (e.g. xpos, ypos, SD)
strPathOut : string
Path to the parent directory where the results should be saved.
strPathMdl : string
Path to the parent directory where pRF models should be saved.
lgcDel : boolean
Should model time courses be saved as npy file?
lgcDel : boolean
Should inbetween results (in form of nii files) be deleted?
strNmeExt : string
Extra name appendix to denominate experiment name. If undesidered,
provide empty string.
Notes
-----
[1] This function does not return any arrays but instead saves to disk. | [
"Compare",
"results",
"for",
"different",
"exponents",
"and",
"create",
"winner",
"nii",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_general.py#L312-L529 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_general.py | map_crt_to_pol | def map_crt_to_pol(aryXCrds, aryYrds):
"""Remap coordinates from cartesian to polar
Parameters
----------
aryXCrds : 1D numpy array
Array with x coordinate values.
aryYrds : 1D numpy array
Array with y coordinate values.
Returns
-------
aryTht : 1D numpy array
Angle of coordinates
aryRad : 1D numpy array
Radius of coordinates.
"""
aryRad = np.sqrt(aryXCrds**2+aryYrds**2)
aryTht = np.arctan2(aryYrds, aryXCrds)
return aryTht, aryRad | python | def map_crt_to_pol(aryXCrds, aryYrds):
"""Remap coordinates from cartesian to polar
Parameters
----------
aryXCrds : 1D numpy array
Array with x coordinate values.
aryYrds : 1D numpy array
Array with y coordinate values.
Returns
-------
aryTht : 1D numpy array
Angle of coordinates
aryRad : 1D numpy array
Radius of coordinates.
"""
aryRad = np.sqrt(aryXCrds**2+aryYrds**2)
aryTht = np.arctan2(aryYrds, aryXCrds)
return aryTht, aryRad | [
"def",
"map_crt_to_pol",
"(",
"aryXCrds",
",",
"aryYrds",
")",
":",
"aryRad",
"=",
"np",
".",
"sqrt",
"(",
"aryXCrds",
"**",
"2",
"+",
"aryYrds",
"**",
"2",
")",
"aryTht",
"=",
"np",
".",
"arctan2",
"(",
"aryYrds",
",",
"aryXCrds",
")",
"return",
"aryTht",
",",
"aryRad"
] | Remap coordinates from cartesian to polar
Parameters
----------
aryXCrds : 1D numpy array
Array with x coordinate values.
aryYrds : 1D numpy array
Array with y coordinate values.
Returns
-------
aryTht : 1D numpy array
Angle of coordinates
aryRad : 1D numpy array
Radius of coordinates. | [
"Remap",
"coordinates",
"from",
"cartesian",
"to",
"polar"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_general.py#L532-L553 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_general.py | map_pol_to_crt | def map_pol_to_crt(aryTht, aryRad):
"""Remap coordinates from polar to cartesian
Parameters
----------
aryTht : 1D numpy array
Angle of coordinates
aryRad : 1D numpy array
Radius of coordinates.
Returns
-------
aryXCrds : 1D numpy array
Array with x coordinate values.
aryYrds : 1D numpy array
Array with y coordinate values.
"""
aryXCrds = aryRad * np.cos(aryTht)
aryYrds = aryRad * np.sin(aryTht)
return aryXCrds, aryYrds | python | def map_pol_to_crt(aryTht, aryRad):
"""Remap coordinates from polar to cartesian
Parameters
----------
aryTht : 1D numpy array
Angle of coordinates
aryRad : 1D numpy array
Radius of coordinates.
Returns
-------
aryXCrds : 1D numpy array
Array with x coordinate values.
aryYrds : 1D numpy array
Array with y coordinate values.
"""
aryXCrds = aryRad * np.cos(aryTht)
aryYrds = aryRad * np.sin(aryTht)
return aryXCrds, aryYrds | [
"def",
"map_pol_to_crt",
"(",
"aryTht",
",",
"aryRad",
")",
":",
"aryXCrds",
"=",
"aryRad",
"*",
"np",
".",
"cos",
"(",
"aryTht",
")",
"aryYrds",
"=",
"aryRad",
"*",
"np",
".",
"sin",
"(",
"aryTht",
")",
"return",
"aryXCrds",
",",
"aryYrds"
] | Remap coordinates from polar to cartesian
Parameters
----------
aryTht : 1D numpy array
Angle of coordinates
aryRad : 1D numpy array
Radius of coordinates.
Returns
-------
aryXCrds : 1D numpy array
Array with x coordinate values.
aryYrds : 1D numpy array
Array with y coordinate values. | [
"Remap",
"coordinates",
"from",
"polar",
"to",
"cartesian"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_general.py#L556-L577 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_general.py | find_near_pol_ang | def find_near_pol_ang(aryEmpPlrAng, aryExpPlrAng):
"""Return index of nearest expected polar angle.
Parameters
----------
aryEmpPlrAng : 1D numpy array
Empirically found polar angle estimates
aryExpPlrAng : 1D numpy array
Theoretically expected polar angle estimates
Returns
-------
aryXCrds : 1D numpy array
Indices of nearest theoretically expected polar angle.
aryYrds : 1D numpy array
Distances to nearest theoretically expected polar angle.
"""
dist = np.abs(np.subtract(aryEmpPlrAng[:, None],
aryExpPlrAng[None, :]))
return np.argmin(dist, axis=-1), np.min(dist, axis=-1) | python | def find_near_pol_ang(aryEmpPlrAng, aryExpPlrAng):
"""Return index of nearest expected polar angle.
Parameters
----------
aryEmpPlrAng : 1D numpy array
Empirically found polar angle estimates
aryExpPlrAng : 1D numpy array
Theoretically expected polar angle estimates
Returns
-------
aryXCrds : 1D numpy array
Indices of nearest theoretically expected polar angle.
aryYrds : 1D numpy array
Distances to nearest theoretically expected polar angle.
"""
dist = np.abs(np.subtract(aryEmpPlrAng[:, None],
aryExpPlrAng[None, :]))
return np.argmin(dist, axis=-1), np.min(dist, axis=-1) | [
"def",
"find_near_pol_ang",
"(",
"aryEmpPlrAng",
",",
"aryExpPlrAng",
")",
":",
"dist",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"subtract",
"(",
"aryEmpPlrAng",
"[",
":",
",",
"None",
"]",
",",
"aryExpPlrAng",
"[",
"None",
",",
":",
"]",
")",
")",
"return",
"np",
".",
"argmin",
"(",
"dist",
",",
"axis",
"=",
"-",
"1",
")",
",",
"np",
".",
"min",
"(",
"dist",
",",
"axis",
"=",
"-",
"1",
")"
] | Return index of nearest expected polar angle.
Parameters
----------
aryEmpPlrAng : 1D numpy array
Empirically found polar angle estimates
aryExpPlrAng : 1D numpy array
Theoretically expected polar angle estimates
Returns
-------
aryXCrds : 1D numpy array
Indices of nearest theoretically expected polar angle.
aryYrds : 1D numpy array
Distances to nearest theoretically expected polar angle. | [
"Return",
"index",
"of",
"nearest",
"expected",
"polar",
"angle",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_general.py#L580-L601 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_general.py | rmp_rng | def rmp_rng(aryVls, varNewMin, varNewMax, varOldThrMin=None,
varOldAbsMax=None):
"""Remap values in an array from one range to another.
Parameters
----------
aryVls : 1D numpy array
Array with values that need to be remapped.
varNewMin : float
Desired minimum value of new, remapped array.
varNewMax : float
Desired maximum value of new, remapped array.
varOldThrMin : float
Theoretical minimum of old distribution. Can be specified if this
theoretical minimum does not occur in empirical distribution but
should be considered nontheless.
varOldThrMin : float
Theoretical maximum of old distribution. Can be specified if this
theoretical maximum does not occur in empirical distribution but
should be considered nontheless.
Returns
-------
aryVls : 1D numpy array
Array with remapped values.
"""
if varOldThrMin is None:
varOldMin = aryVls.min()
else:
varOldMin = varOldThrMin
if varOldAbsMax is None:
varOldMax = aryVls.max()
else:
varOldMax = varOldAbsMax
aryNewVls = np.empty((aryVls.shape), dtype=aryVls.dtype)
for ind, val in enumerate(aryVls):
aryNewVls[ind] = (((val - varOldMin) * (varNewMax - varNewMin)) /
(varOldMax - varOldMin)) + varNewMin
return aryNewVls | python | def rmp_rng(aryVls, varNewMin, varNewMax, varOldThrMin=None,
varOldAbsMax=None):
"""Remap values in an array from one range to another.
Parameters
----------
aryVls : 1D numpy array
Array with values that need to be remapped.
varNewMin : float
Desired minimum value of new, remapped array.
varNewMax : float
Desired maximum value of new, remapped array.
varOldThrMin : float
Theoretical minimum of old distribution. Can be specified if this
theoretical minimum does not occur in empirical distribution but
should be considered nontheless.
varOldThrMin : float
Theoretical maximum of old distribution. Can be specified if this
theoretical maximum does not occur in empirical distribution but
should be considered nontheless.
Returns
-------
aryVls : 1D numpy array
Array with remapped values.
"""
if varOldThrMin is None:
varOldMin = aryVls.min()
else:
varOldMin = varOldThrMin
if varOldAbsMax is None:
varOldMax = aryVls.max()
else:
varOldMax = varOldAbsMax
aryNewVls = np.empty((aryVls.shape), dtype=aryVls.dtype)
for ind, val in enumerate(aryVls):
aryNewVls[ind] = (((val - varOldMin) * (varNewMax - varNewMin)) /
(varOldMax - varOldMin)) + varNewMin
return aryNewVls | [
"def",
"rmp_rng",
"(",
"aryVls",
",",
"varNewMin",
",",
"varNewMax",
",",
"varOldThrMin",
"=",
"None",
",",
"varOldAbsMax",
"=",
"None",
")",
":",
"if",
"varOldThrMin",
"is",
"None",
":",
"varOldMin",
"=",
"aryVls",
".",
"min",
"(",
")",
"else",
":",
"varOldMin",
"=",
"varOldThrMin",
"if",
"varOldAbsMax",
"is",
"None",
":",
"varOldMax",
"=",
"aryVls",
".",
"max",
"(",
")",
"else",
":",
"varOldMax",
"=",
"varOldAbsMax",
"aryNewVls",
"=",
"np",
".",
"empty",
"(",
"(",
"aryVls",
".",
"shape",
")",
",",
"dtype",
"=",
"aryVls",
".",
"dtype",
")",
"for",
"ind",
",",
"val",
"in",
"enumerate",
"(",
"aryVls",
")",
":",
"aryNewVls",
"[",
"ind",
"]",
"=",
"(",
"(",
"(",
"val",
"-",
"varOldMin",
")",
"*",
"(",
"varNewMax",
"-",
"varNewMin",
")",
")",
"/",
"(",
"varOldMax",
"-",
"varOldMin",
")",
")",
"+",
"varNewMin",
"return",
"aryNewVls"
] | Remap values in an array from one range to another.
Parameters
----------
aryVls : 1D numpy array
Array with values that need to be remapped.
varNewMin : float
Desired minimum value of new, remapped array.
varNewMax : float
Desired maximum value of new, remapped array.
varOldThrMin : float
Theoretical minimum of old distribution. Can be specified if this
theoretical minimum does not occur in empirical distribution but
should be considered nontheless.
varOldThrMin : float
Theoretical maximum of old distribution. Can be specified if this
theoretical maximum does not occur in empirical distribution but
should be considered nontheless.
Returns
-------
aryVls : 1D numpy array
Array with remapped values. | [
"Remap",
"values",
"in",
"an",
"array",
"from",
"one",
"range",
"to",
"another",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_general.py#L604-L644 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_general.py | rmp_deg_pixel_xys | def rmp_deg_pixel_xys(vecX, vecY, vecPrfSd, tplPngSize,
varExtXmin, varExtXmax, varExtYmin, varExtYmax):
"""Remap x, y, sigma parameters from degrees to pixel.
Parameters
----------
vecX : 1D numpy array
Array with possible x parametrs in degree
vecY : 1D numpy array
Array with possible y parametrs in degree
vecPrfSd : 1D numpy array
Array with possible sd parametrs in degree
tplPngSize : tuple, 2
Pixel dimensions of the visual space in pixel (width, height).
varExtXmin : float
Extent of visual space from centre in negative x-direction (width)
varExtXmax : float
Extent of visual space from centre in positive x-direction (width)
varExtYmin : float
Extent of visual space from centre in negative y-direction (height)
varExtYmax : float
Extent of visual space from centre in positive y-direction (height)
Returns
-------
vecX : 1D numpy array
Array with possible x parametrs in pixel
vecY : 1D numpy array
Array with possible y parametrs in pixel
vecPrfSd : 1D numpy array
Array with possible sd parametrs in pixel
"""
# Remap modelled x-positions of the pRFs:
vecXpxl = rmp_rng(vecX, 0.0, (tplPngSize[0] - 1), varOldThrMin=varExtXmin,
varOldAbsMax=varExtXmax)
# Remap modelled y-positions of the pRFs:
vecYpxl = rmp_rng(vecY, 0.0, (tplPngSize[1] - 1), varOldThrMin=varExtYmin,
varOldAbsMax=varExtYmax)
# We calculate the scaling factor from degrees of visual angle to
# pixels separately for the x- and the y-directions (the two should
# be the same).
varDgr2PixX = np.divide(tplPngSize[0], (varExtXmax - varExtXmin))
varDgr2PixY = np.divide(tplPngSize[1], (varExtYmax - varExtYmin))
# Check whether varDgr2PixX and varDgr2PixY are similar:
strErrMsg = 'ERROR. The ratio of X and Y dimensions in ' + \
'stimulus space (in degrees of visual angle) and the ' + \
'ratio of X and Y dimensions in the upsampled visual space' + \
'do not agree'
assert 0.5 > np.absolute((varDgr2PixX - varDgr2PixY)), strErrMsg
# Convert prf sizes from degrees of visual angles to pixel
vecPrfSdpxl = np.multiply(vecPrfSd, varDgr2PixX)
# Return new values in column stack.
# Since values are now in pixel, they should be integer
return np.column_stack((vecXpxl, vecYpxl, vecPrfSdpxl)).astype(np.int32) | python | def rmp_deg_pixel_xys(vecX, vecY, vecPrfSd, tplPngSize,
varExtXmin, varExtXmax, varExtYmin, varExtYmax):
"""Remap x, y, sigma parameters from degrees to pixel.
Parameters
----------
vecX : 1D numpy array
Array with possible x parametrs in degree
vecY : 1D numpy array
Array with possible y parametrs in degree
vecPrfSd : 1D numpy array
Array with possible sd parametrs in degree
tplPngSize : tuple, 2
Pixel dimensions of the visual space in pixel (width, height).
varExtXmin : float
Extent of visual space from centre in negative x-direction (width)
varExtXmax : float
Extent of visual space from centre in positive x-direction (width)
varExtYmin : float
Extent of visual space from centre in negative y-direction (height)
varExtYmax : float
Extent of visual space from centre in positive y-direction (height)
Returns
-------
vecX : 1D numpy array
Array with possible x parametrs in pixel
vecY : 1D numpy array
Array with possible y parametrs in pixel
vecPrfSd : 1D numpy array
Array with possible sd parametrs in pixel
"""
# Remap modelled x-positions of the pRFs:
vecXpxl = rmp_rng(vecX, 0.0, (tplPngSize[0] - 1), varOldThrMin=varExtXmin,
varOldAbsMax=varExtXmax)
# Remap modelled y-positions of the pRFs:
vecYpxl = rmp_rng(vecY, 0.0, (tplPngSize[1] - 1), varOldThrMin=varExtYmin,
varOldAbsMax=varExtYmax)
# We calculate the scaling factor from degrees of visual angle to
# pixels separately for the x- and the y-directions (the two should
# be the same).
varDgr2PixX = np.divide(tplPngSize[0], (varExtXmax - varExtXmin))
varDgr2PixY = np.divide(tplPngSize[1], (varExtYmax - varExtYmin))
# Check whether varDgr2PixX and varDgr2PixY are similar:
strErrMsg = 'ERROR. The ratio of X and Y dimensions in ' + \
'stimulus space (in degrees of visual angle) and the ' + \
'ratio of X and Y dimensions in the upsampled visual space' + \
'do not agree'
assert 0.5 > np.absolute((varDgr2PixX - varDgr2PixY)), strErrMsg
# Convert prf sizes from degrees of visual angles to pixel
vecPrfSdpxl = np.multiply(vecPrfSd, varDgr2PixX)
# Return new values in column stack.
# Since values are now in pixel, they should be integer
return np.column_stack((vecXpxl, vecYpxl, vecPrfSdpxl)).astype(np.int32) | [
"def",
"rmp_deg_pixel_xys",
"(",
"vecX",
",",
"vecY",
",",
"vecPrfSd",
",",
"tplPngSize",
",",
"varExtXmin",
",",
"varExtXmax",
",",
"varExtYmin",
",",
"varExtYmax",
")",
":",
"# Remap modelled x-positions of the pRFs:",
"vecXpxl",
"=",
"rmp_rng",
"(",
"vecX",
",",
"0.0",
",",
"(",
"tplPngSize",
"[",
"0",
"]",
"-",
"1",
")",
",",
"varOldThrMin",
"=",
"varExtXmin",
",",
"varOldAbsMax",
"=",
"varExtXmax",
")",
"# Remap modelled y-positions of the pRFs:",
"vecYpxl",
"=",
"rmp_rng",
"(",
"vecY",
",",
"0.0",
",",
"(",
"tplPngSize",
"[",
"1",
"]",
"-",
"1",
")",
",",
"varOldThrMin",
"=",
"varExtYmin",
",",
"varOldAbsMax",
"=",
"varExtYmax",
")",
"# We calculate the scaling factor from degrees of visual angle to",
"# pixels separately for the x- and the y-directions (the two should",
"# be the same).",
"varDgr2PixX",
"=",
"np",
".",
"divide",
"(",
"tplPngSize",
"[",
"0",
"]",
",",
"(",
"varExtXmax",
"-",
"varExtXmin",
")",
")",
"varDgr2PixY",
"=",
"np",
".",
"divide",
"(",
"tplPngSize",
"[",
"1",
"]",
",",
"(",
"varExtYmax",
"-",
"varExtYmin",
")",
")",
"# Check whether varDgr2PixX and varDgr2PixY are similar:",
"strErrMsg",
"=",
"'ERROR. The ratio of X and Y dimensions in '",
"+",
"'stimulus space (in degrees of visual angle) and the '",
"+",
"'ratio of X and Y dimensions in the upsampled visual space'",
"+",
"'do not agree'",
"assert",
"0.5",
">",
"np",
".",
"absolute",
"(",
"(",
"varDgr2PixX",
"-",
"varDgr2PixY",
")",
")",
",",
"strErrMsg",
"# Convert prf sizes from degrees of visual angles to pixel",
"vecPrfSdpxl",
"=",
"np",
".",
"multiply",
"(",
"vecPrfSd",
",",
"varDgr2PixX",
")",
"# Return new values in column stack.",
"# Since values are now in pixel, they should be integer",
"return",
"np",
".",
"column_stack",
"(",
"(",
"vecXpxl",
",",
"vecYpxl",
",",
"vecPrfSdpxl",
")",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")"
] | Remap x, y, sigma parameters from degrees to pixel.
Parameters
----------
vecX : 1D numpy array
Array with possible x parametrs in degree
vecY : 1D numpy array
Array with possible y parametrs in degree
vecPrfSd : 1D numpy array
Array with possible sd parametrs in degree
tplPngSize : tuple, 2
Pixel dimensions of the visual space in pixel (width, height).
varExtXmin : float
Extent of visual space from centre in negative x-direction (width)
varExtXmax : float
Extent of visual space from centre in positive x-direction (width)
varExtYmin : float
Extent of visual space from centre in negative y-direction (height)
varExtYmax : float
Extent of visual space from centre in positive y-direction (height)
Returns
-------
vecX : 1D numpy array
Array with possible x parametrs in pixel
vecY : 1D numpy array
Array with possible y parametrs in pixel
vecPrfSd : 1D numpy array
Array with possible sd parametrs in pixel | [
"Remap",
"x",
"y",
"sigma",
"parameters",
"from",
"degrees",
"to",
"pixel",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_general.py#L647-L705 |
MSchnei/pyprf_feature | pyprf_feature/analysis/utils_general.py | cnvl_2D_gauss | def cnvl_2D_gauss(idxPrc, aryMdlParamsChnk, arySptExpInf, tplPngSize, queOut,
strCrd='crt'):
"""Spatially convolve input with 2D Gaussian model.
Parameters
----------
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryMdlParamsChnk : 2d numpy array, shape [n_models, n_model_params]
Array with the model parameter combinations for this chunk.
arySptExpInf : 3d numpy array, shape [n_x_pix, n_y_pix, n_conditions]
All spatial conditions stacked along second axis.
tplPngSize : tuple, 2.
Pixel dimensions of the visual space (width, height).
queOut : multiprocessing.queues.Queue
Queue to put the results on. If this is None, the user is not running
multiprocessing but is just calling the function
strCrd, string, either 'crt' or 'pol'
Whether model parameters are provided in cartesian or polar coordinates
Returns
-------
data : 2d numpy array, shape [n_models, n_conditions]
Closed data.
Reference
---------
[1]
"""
# Number of combinations of model parameters in the current chunk:
varChnkSze = aryMdlParamsChnk.shape[0]
# Number of conditions / time points of the input data
varNumLstAx = arySptExpInf.shape[-1]
# Output array with results of convolution:
aryOut = np.zeros((varChnkSze, varNumLstAx))
# Loop through combinations of model parameters:
for idxMdl in range(0, varChnkSze):
# Spatial parameters of current model:
if strCrd == 'pol':
# Position was given in polar coordinates
varTmpEcc = aryMdlParamsChnk[idxMdl, 0]
varTmpPlrAng = aryMdlParamsChnk[idxMdl, 1]
# Convert from polar to to cartesian coordinates
varTmpX = varTmpEcc * np.cos(varTmpPlrAng) + tplPngSize[0]/2.
varTmpY = varTmpEcc * np.sin(varTmpPlrAng) + tplPngSize[1]/2.
elif strCrd == 'crt':
varTmpX = aryMdlParamsChnk[idxMdl, 0]
varTmpY = aryMdlParamsChnk[idxMdl, 1]
# Standard deviation does not depend on coordinate system
varTmpSd = aryMdlParamsChnk[idxMdl, 2]
# Create pRF model (2D):
aryGauss = crt_2D_gauss(tplPngSize[0],
tplPngSize[1],
varTmpX,
varTmpY,
varTmpSd)
# Multiply pixel-time courses with Gaussian pRF models:
aryCndTcTmp = np.multiply(arySptExpInf, aryGauss[:, :, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'.
aryCndTcTmp = np.sum(aryCndTcTmp, axis=(0, 1))
# Put model time courses into function's output with 2d Gaussian
# arrray:
aryOut[idxMdl, :] = aryCndTcTmp
if queOut is None:
# if user is not using multiprocessing, return the array directly
return aryOut
else:
# Put column with the indices of model-parameter-combinations into the
# output array (in order to be able to put the pRF model time courses
# into the correct order after the parallelised function):
lstOut = [idxPrc,
aryOut]
# Put output to queue:
queOut.put(lstOut) | python | def cnvl_2D_gauss(idxPrc, aryMdlParamsChnk, arySptExpInf, tplPngSize, queOut,
strCrd='crt'):
"""Spatially convolve input with 2D Gaussian model.
Parameters
----------
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryMdlParamsChnk : 2d numpy array, shape [n_models, n_model_params]
Array with the model parameter combinations for this chunk.
arySptExpInf : 3d numpy array, shape [n_x_pix, n_y_pix, n_conditions]
All spatial conditions stacked along second axis.
tplPngSize : tuple, 2.
Pixel dimensions of the visual space (width, height).
queOut : multiprocessing.queues.Queue
Queue to put the results on. If this is None, the user is not running
multiprocessing but is just calling the function
strCrd, string, either 'crt' or 'pol'
Whether model parameters are provided in cartesian or polar coordinates
Returns
-------
data : 2d numpy array, shape [n_models, n_conditions]
Closed data.
Reference
---------
[1]
"""
# Number of combinations of model parameters in the current chunk:
varChnkSze = aryMdlParamsChnk.shape[0]
# Number of conditions / time points of the input data
varNumLstAx = arySptExpInf.shape[-1]
# Output array with results of convolution:
aryOut = np.zeros((varChnkSze, varNumLstAx))
# Loop through combinations of model parameters:
for idxMdl in range(0, varChnkSze):
# Spatial parameters of current model:
if strCrd == 'pol':
# Position was given in polar coordinates
varTmpEcc = aryMdlParamsChnk[idxMdl, 0]
varTmpPlrAng = aryMdlParamsChnk[idxMdl, 1]
# Convert from polar to to cartesian coordinates
varTmpX = varTmpEcc * np.cos(varTmpPlrAng) + tplPngSize[0]/2.
varTmpY = varTmpEcc * np.sin(varTmpPlrAng) + tplPngSize[1]/2.
elif strCrd == 'crt':
varTmpX = aryMdlParamsChnk[idxMdl, 0]
varTmpY = aryMdlParamsChnk[idxMdl, 1]
# Standard deviation does not depend on coordinate system
varTmpSd = aryMdlParamsChnk[idxMdl, 2]
# Create pRF model (2D):
aryGauss = crt_2D_gauss(tplPngSize[0],
tplPngSize[1],
varTmpX,
varTmpY,
varTmpSd)
# Multiply pixel-time courses with Gaussian pRF models:
aryCndTcTmp = np.multiply(arySptExpInf, aryGauss[:, :, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'.
aryCndTcTmp = np.sum(aryCndTcTmp, axis=(0, 1))
# Put model time courses into function's output with 2d Gaussian
# arrray:
aryOut[idxMdl, :] = aryCndTcTmp
if queOut is None:
# if user is not using multiprocessing, return the array directly
return aryOut
else:
# Put column with the indices of model-parameter-combinations into the
# output array (in order to be able to put the pRF model time courses
# into the correct order after the parallelised function):
lstOut = [idxPrc,
aryOut]
# Put output to queue:
queOut.put(lstOut) | [
"def",
"cnvl_2D_gauss",
"(",
"idxPrc",
",",
"aryMdlParamsChnk",
",",
"arySptExpInf",
",",
"tplPngSize",
",",
"queOut",
",",
"strCrd",
"=",
"'crt'",
")",
":",
"# Number of combinations of model parameters in the current chunk:",
"varChnkSze",
"=",
"aryMdlParamsChnk",
".",
"shape",
"[",
"0",
"]",
"# Number of conditions / time points of the input data",
"varNumLstAx",
"=",
"arySptExpInf",
".",
"shape",
"[",
"-",
"1",
"]",
"# Output array with results of convolution:",
"aryOut",
"=",
"np",
".",
"zeros",
"(",
"(",
"varChnkSze",
",",
"varNumLstAx",
")",
")",
"# Loop through combinations of model parameters:",
"for",
"idxMdl",
"in",
"range",
"(",
"0",
",",
"varChnkSze",
")",
":",
"# Spatial parameters of current model:",
"if",
"strCrd",
"==",
"'pol'",
":",
"# Position was given in polar coordinates",
"varTmpEcc",
"=",
"aryMdlParamsChnk",
"[",
"idxMdl",
",",
"0",
"]",
"varTmpPlrAng",
"=",
"aryMdlParamsChnk",
"[",
"idxMdl",
",",
"1",
"]",
"# Convert from polar to to cartesian coordinates",
"varTmpX",
"=",
"varTmpEcc",
"*",
"np",
".",
"cos",
"(",
"varTmpPlrAng",
")",
"+",
"tplPngSize",
"[",
"0",
"]",
"/",
"2.",
"varTmpY",
"=",
"varTmpEcc",
"*",
"np",
".",
"sin",
"(",
"varTmpPlrAng",
")",
"+",
"tplPngSize",
"[",
"1",
"]",
"/",
"2.",
"elif",
"strCrd",
"==",
"'crt'",
":",
"varTmpX",
"=",
"aryMdlParamsChnk",
"[",
"idxMdl",
",",
"0",
"]",
"varTmpY",
"=",
"aryMdlParamsChnk",
"[",
"idxMdl",
",",
"1",
"]",
"# Standard deviation does not depend on coordinate system",
"varTmpSd",
"=",
"aryMdlParamsChnk",
"[",
"idxMdl",
",",
"2",
"]",
"# Create pRF model (2D):",
"aryGauss",
"=",
"crt_2D_gauss",
"(",
"tplPngSize",
"[",
"0",
"]",
",",
"tplPngSize",
"[",
"1",
"]",
",",
"varTmpX",
",",
"varTmpY",
",",
"varTmpSd",
")",
"# Multiply pixel-time courses with Gaussian pRF models:",
"aryCndTcTmp",
"=",
"np",
".",
"multiply",
"(",
"arySptExpInf",
",",
"aryGauss",
"[",
":",
",",
":",
",",
"None",
"]",
")",
"# Calculate sum across x- and y-dimensions - the 'area under the",
"# Gaussian surface'.",
"aryCndTcTmp",
"=",
"np",
".",
"sum",
"(",
"aryCndTcTmp",
",",
"axis",
"=",
"(",
"0",
",",
"1",
")",
")",
"# Put model time courses into function's output with 2d Gaussian",
"# arrray:",
"aryOut",
"[",
"idxMdl",
",",
":",
"]",
"=",
"aryCndTcTmp",
"if",
"queOut",
"is",
"None",
":",
"# if user is not using multiprocessing, return the array directly",
"return",
"aryOut",
"else",
":",
"# Put column with the indices of model-parameter-combinations into the",
"# output array (in order to be able to put the pRF model time courses",
"# into the correct order after the parallelised function):",
"lstOut",
"=",
"[",
"idxPrc",
",",
"aryOut",
"]",
"# Put output to queue:",
"queOut",
".",
"put",
"(",
"lstOut",
")"
] | Spatially convolve input with 2D Gaussian model.
Parameters
----------
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryMdlParamsChnk : 2d numpy array, shape [n_models, n_model_params]
Array with the model parameter combinations for this chunk.
arySptExpInf : 3d numpy array, shape [n_x_pix, n_y_pix, n_conditions]
All spatial conditions stacked along second axis.
tplPngSize : tuple, 2.
Pixel dimensions of the visual space (width, height).
queOut : multiprocessing.queues.Queue
Queue to put the results on. If this is None, the user is not running
multiprocessing but is just calling the function
strCrd, string, either 'crt' or 'pol'
Whether model parameters are provided in cartesian or polar coordinates
Returns
-------
data : 2d numpy array, shape [n_models, n_conditions]
Closed data.
Reference
---------
[1] | [
"Spatially",
"convolve",
"input",
"with",
"2D",
"Gaussian",
"model",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_general.py#L747-L835 |
mediawiki-utilities/python-mwreverts | mwreverts/detector.py | Detector.process | def process(self, checksum, revision=None):
"""
Process a new revision and detect a revert if it occurred. Note that
you can pass whatever you like as `revision` and it will be returned in
the case that a revert occurs.
:Parameters:
checksum : str
Any identity-machable string-based hash of revision content
revision : `mixed`
Revision metadata. Note that any data will just be returned
in the case of a revert.
:Returns:
a :class:`~mwreverts.Revert` if one occured or `None`
"""
revert = None
if checksum in self: # potential revert
reverteds = list(self.up_to(checksum))
if len(reverteds) > 0: # If no reverted revisions, this is a noop
revert = Revert(revision, reverteds, self[checksum])
self.insert(checksum, revision)
return revert | python | def process(self, checksum, revision=None):
"""
Process a new revision and detect a revert if it occurred. Note that
you can pass whatever you like as `revision` and it will be returned in
the case that a revert occurs.
:Parameters:
checksum : str
Any identity-machable string-based hash of revision content
revision : `mixed`
Revision metadata. Note that any data will just be returned
in the case of a revert.
:Returns:
a :class:`~mwreverts.Revert` if one occured or `None`
"""
revert = None
if checksum in self: # potential revert
reverteds = list(self.up_to(checksum))
if len(reverteds) > 0: # If no reverted revisions, this is a noop
revert = Revert(revision, reverteds, self[checksum])
self.insert(checksum, revision)
return revert | [
"def",
"process",
"(",
"self",
",",
"checksum",
",",
"revision",
"=",
"None",
")",
":",
"revert",
"=",
"None",
"if",
"checksum",
"in",
"self",
":",
"# potential revert",
"reverteds",
"=",
"list",
"(",
"self",
".",
"up_to",
"(",
"checksum",
")",
")",
"if",
"len",
"(",
"reverteds",
")",
">",
"0",
":",
"# If no reverted revisions, this is a noop",
"revert",
"=",
"Revert",
"(",
"revision",
",",
"reverteds",
",",
"self",
"[",
"checksum",
"]",
")",
"self",
".",
"insert",
"(",
"checksum",
",",
"revision",
")",
"return",
"revert"
] | Process a new revision and detect a revert if it occurred. Note that
you can pass whatever you like as `revision` and it will be returned in
the case that a revert occurs.
:Parameters:
checksum : str
Any identity-machable string-based hash of revision content
revision : `mixed`
Revision metadata. Note that any data will just be returned
in the case of a revert.
:Returns:
a :class:`~mwreverts.Revert` if one occured or `None` | [
"Process",
"a",
"new",
"revision",
"and",
"detect",
"a",
"revert",
"if",
"it",
"occurred",
".",
"Note",
"that",
"you",
"can",
"pass",
"whatever",
"you",
"like",
"as",
"revision",
"and",
"it",
"will",
"be",
"returned",
"in",
"the",
"case",
"that",
"a",
"revert",
"occurs",
"."
] | train | https://github.com/mediawiki-utilities/python-mwreverts/blob/d379ac941e14e235ad82a48bd445a3dfa6cc022e/mwreverts/detector.py#L39-L65 |
MSchnei/pyprf_feature | pyprf_feature/analysis/__main__.py | main | def main():
"""pyprf_feature entry point."""
# %% Print Welcome message
strWelcome = 'pyprf_feature ' + __version__
strDec = '=' * len(strWelcome)
print(strDec + '\n' + strWelcome + '\n' + strDec)
# %% Get list of input arguments
# Create parser object:
objParser = argparse.ArgumentParser()
# Add argument to namespace - config file path:
objParser.add_argument('-config',
metavar='config.csv',
help='Absolute file path of config file with \
parameters for pRF analysis. Ignored if in \
testing mode.'
)
# Add argument to namespace -mdl_rsp flag:
objParser.add_argument('-strPathHrf', default=None, required=False,
metavar='/path/to/custom_hrf_parameter.npy',
help='Path to npy file with custom hrf parameters. \
Ignored if in testing mode.')
objParser.add_argument('-supsur', nargs='+',
help='List of floats that represent the ratio of \
size neg surround to size pos center.',
type=float, default=None)
# Add argument to namespace -save_tc flag:
objParser.add_argument('-save_tc', dest='save_tc',
action='store_true', default=False,
help='Save fitted and empirical time courses to \
nifti file. Ignored if in testing mode.')
# Add argument to namespace -mdl_rsp flag:
objParser.add_argument('-mdl_rsp', dest='lgcMdlRsp',
action='store_true', default=False,
help='When saving fitted and empirical time \
courses, should fitted aperture responses be \
saved as well? Ignored if in testing mode.')
# Namespace object containign arguments and values:
objNspc = objParser.parse_args()
# Get path of config file from argument parser:
strCsvCnfg = objNspc.config
# %% Decide which action to perform
# If no config argument is provided, print info to user.
if strCsvCnfg is None:
print('Please provide the file path to a config file, e.g.:')
print(' pyprf_feature -config /path/to/my_config_file.csv')
# If config file is provided, either perform fitting or recreate fitted
# and empirical time courses depending on whether save_tc is True or False
else:
# Signal non-test mode to lower functions (needed for pytest):
lgcTest = False
# If save_tc true, save fitted and empirical time courses to nifti file
# This assumes that fitting has already been run and will throw an
# error if the resulting nii files of the fitting cannot be found.
if objNspc.save_tc:
print('***Mode: Save fitted and empirical time courses***')
if objNspc.lgcMdlRsp:
print(' ***Also save fitted aperture responses***')
# Call to function
save_tc_to_nii(strCsvCnfg, lgcTest=lgcTest, lstRat=objNspc.supsur,
lgcMdlRsp=objNspc.lgcMdlRsp,
strPathHrf=objNspc.strPathHrf)
# If save_tc false, perform pRF fitting, either with or without
# suppressive surround
else:
# Perform pRF fitting without suppressive surround
if objNspc.supsur is None:
print('***Mode: Fit pRF models, no suppressive surround***')
# Call to main function, to invoke pRF fitting:
pyprf(strCsvCnfg, lgcTest, varRat=None,
strPathHrf=objNspc.strPathHrf)
# Perform pRF fitting with suppressive surround
else:
print('***Mode: Fit pRF models, suppressive surround***')
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest,
lgcPrint=False)
# Load config parameters from dictionary into namespace.
# We do this on every loop so we have a fresh start in case
# variables are redefined during the prf analysis
cfg = cls_set_config(dicCnfg)
# Make sure that lgcCrteMdl is set to True since we will need
# to loop iteratively over pyprf_feature with different ratios
# for size surround to size center. On every loop models,
# reflecting the new ratio, need to be created from scratch
errorMsg = 'lgcCrteMdl needs to be set to True for -supsur.'
assert cfg.lgcCrteMdl, errorMsg
# Make sure that switchHrf is set to 1. It would not make sense
# to find the negative surround for the hrf deriavtive function
errorMsg = 'switchHrfSet needs to be set to 1 for -supsur.'
assert cfg.switchHrfSet == 1, errorMsg
# Get list with size ratios
lstRat = objNspc.supsur
# Make sure that all ratios are larger than 1.0
errorMsg = 'All provided ratios need to be larger than 1.0'
assert np.all(np.greater(np.array(lstRat), 1.0)), errorMsg
# Append None as the first entry, so fitting without surround
# is performed once as well
lstRat.insert(0, None)
# Loop over ratios and find best pRF
for varRat in lstRat:
# Print to command line, so the user knows which exponent
# is used
print('---Ratio surround to center: ' + str(varRat))
# Call to main function, to invoke pRF analysis:
pyprf(strCsvCnfg, lgcTest=lgcTest, varRat=varRat,
strPathHrf=objNspc.strPathHrf)
# List with name suffices of output images:
lstNiiNames = ['_x_pos',
'_y_pos',
'_SD',
'_R2',
'_polar_angle',
'_eccentricity',
'_Betas']
# Compare results for the different ratios, export nii files
# based on the results of the comparison and delete in-between
# results
# Replace first entry (None) with 1, so it can be saved to nii
lstRat[0] = 1.0
# Append 'hrf' to cfg.strPathOut, if fitting was done with
# custom hrf
if objNspc.strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
cmp_res_R2(lstRat, lstNiiNames, cfg.strPathOut, cfg.strPathMdl,
lgcDel=True) | python | def main():
"""pyprf_feature entry point."""
# %% Print Welcome message
strWelcome = 'pyprf_feature ' + __version__
strDec = '=' * len(strWelcome)
print(strDec + '\n' + strWelcome + '\n' + strDec)
# %% Get list of input arguments
# Create parser object:
objParser = argparse.ArgumentParser()
# Add argument to namespace - config file path:
objParser.add_argument('-config',
metavar='config.csv',
help='Absolute file path of config file with \
parameters for pRF analysis. Ignored if in \
testing mode.'
)
# Add argument to namespace -mdl_rsp flag:
objParser.add_argument('-strPathHrf', default=None, required=False,
metavar='/path/to/custom_hrf_parameter.npy',
help='Path to npy file with custom hrf parameters. \
Ignored if in testing mode.')
objParser.add_argument('-supsur', nargs='+',
help='List of floats that represent the ratio of \
size neg surround to size pos center.',
type=float, default=None)
# Add argument to namespace -save_tc flag:
objParser.add_argument('-save_tc', dest='save_tc',
action='store_true', default=False,
help='Save fitted and empirical time courses to \
nifti file. Ignored if in testing mode.')
# Add argument to namespace -mdl_rsp flag:
objParser.add_argument('-mdl_rsp', dest='lgcMdlRsp',
action='store_true', default=False,
help='When saving fitted and empirical time \
courses, should fitted aperture responses be \
saved as well? Ignored if in testing mode.')
# Namespace object containign arguments and values:
objNspc = objParser.parse_args()
# Get path of config file from argument parser:
strCsvCnfg = objNspc.config
# %% Decide which action to perform
# If no config argument is provided, print info to user.
if strCsvCnfg is None:
print('Please provide the file path to a config file, e.g.:')
print(' pyprf_feature -config /path/to/my_config_file.csv')
# If config file is provided, either perform fitting or recreate fitted
# and empirical time courses depending on whether save_tc is True or False
else:
# Signal non-test mode to lower functions (needed for pytest):
lgcTest = False
# If save_tc true, save fitted and empirical time courses to nifti file
# This assumes that fitting has already been run and will throw an
# error if the resulting nii files of the fitting cannot be found.
if objNspc.save_tc:
print('***Mode: Save fitted and empirical time courses***')
if objNspc.lgcMdlRsp:
print(' ***Also save fitted aperture responses***')
# Call to function
save_tc_to_nii(strCsvCnfg, lgcTest=lgcTest, lstRat=objNspc.supsur,
lgcMdlRsp=objNspc.lgcMdlRsp,
strPathHrf=objNspc.strPathHrf)
# If save_tc false, perform pRF fitting, either with or without
# suppressive surround
else:
# Perform pRF fitting without suppressive surround
if objNspc.supsur is None:
print('***Mode: Fit pRF models, no suppressive surround***')
# Call to main function, to invoke pRF fitting:
pyprf(strCsvCnfg, lgcTest, varRat=None,
strPathHrf=objNspc.strPathHrf)
# Perform pRF fitting with suppressive surround
else:
print('***Mode: Fit pRF models, suppressive surround***')
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest,
lgcPrint=False)
# Load config parameters from dictionary into namespace.
# We do this on every loop so we have a fresh start in case
# variables are redefined during the prf analysis
cfg = cls_set_config(dicCnfg)
# Make sure that lgcCrteMdl is set to True since we will need
# to loop iteratively over pyprf_feature with different ratios
# for size surround to size center. On every loop models,
# reflecting the new ratio, need to be created from scratch
errorMsg = 'lgcCrteMdl needs to be set to True for -supsur.'
assert cfg.lgcCrteMdl, errorMsg
# Make sure that switchHrf is set to 1. It would not make sense
# to find the negative surround for the hrf deriavtive function
errorMsg = 'switchHrfSet needs to be set to 1 for -supsur.'
assert cfg.switchHrfSet == 1, errorMsg
# Get list with size ratios
lstRat = objNspc.supsur
# Make sure that all ratios are larger than 1.0
errorMsg = 'All provided ratios need to be larger than 1.0'
assert np.all(np.greater(np.array(lstRat), 1.0)), errorMsg
# Append None as the first entry, so fitting without surround
# is performed once as well
lstRat.insert(0, None)
# Loop over ratios and find best pRF
for varRat in lstRat:
# Print to command line, so the user knows which exponent
# is used
print('---Ratio surround to center: ' + str(varRat))
# Call to main function, to invoke pRF analysis:
pyprf(strCsvCnfg, lgcTest=lgcTest, varRat=varRat,
strPathHrf=objNspc.strPathHrf)
# List with name suffices of output images:
lstNiiNames = ['_x_pos',
'_y_pos',
'_SD',
'_R2',
'_polar_angle',
'_eccentricity',
'_Betas']
# Compare results for the different ratios, export nii files
# based on the results of the comparison and delete in-between
# results
# Replace first entry (None) with 1, so it can be saved to nii
lstRat[0] = 1.0
# Append 'hrf' to cfg.strPathOut, if fitting was done with
# custom hrf
if objNspc.strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
cmp_res_R2(lstRat, lstNiiNames, cfg.strPathOut, cfg.strPathMdl,
lgcDel=True) | [
"def",
"main",
"(",
")",
":",
"# %% Print Welcome message",
"strWelcome",
"=",
"'pyprf_feature '",
"+",
"__version__",
"strDec",
"=",
"'='",
"*",
"len",
"(",
"strWelcome",
")",
"print",
"(",
"strDec",
"+",
"'\\n'",
"+",
"strWelcome",
"+",
"'\\n'",
"+",
"strDec",
")",
"# %% Get list of input arguments",
"# Create parser object:",
"objParser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"# Add argument to namespace - config file path:",
"objParser",
".",
"add_argument",
"(",
"'-config'",
",",
"metavar",
"=",
"'config.csv'",
",",
"help",
"=",
"'Absolute file path of config file with \\\n parameters for pRF analysis. Ignored if in \\\n testing mode.'",
")",
"# Add argument to namespace -mdl_rsp flag:",
"objParser",
".",
"add_argument",
"(",
"'-strPathHrf'",
",",
"default",
"=",
"None",
",",
"required",
"=",
"False",
",",
"metavar",
"=",
"'/path/to/custom_hrf_parameter.npy'",
",",
"help",
"=",
"'Path to npy file with custom hrf parameters. \\\n Ignored if in testing mode.'",
")",
"objParser",
".",
"add_argument",
"(",
"'-supsur'",
",",
"nargs",
"=",
"'+'",
",",
"help",
"=",
"'List of floats that represent the ratio of \\\n size neg surround to size pos center.'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"None",
")",
"# Add argument to namespace -save_tc flag:",
"objParser",
".",
"add_argument",
"(",
"'-save_tc'",
",",
"dest",
"=",
"'save_tc'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Save fitted and empirical time courses to \\\n nifti file. Ignored if in testing mode.'",
")",
"# Add argument to namespace -mdl_rsp flag:",
"objParser",
".",
"add_argument",
"(",
"'-mdl_rsp'",
",",
"dest",
"=",
"'lgcMdlRsp'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'When saving fitted and empirical time \\\n courses, should fitted aperture responses be \\\n saved as well? Ignored if in testing mode.'",
")",
"# Namespace object containign arguments and values:",
"objNspc",
"=",
"objParser",
".",
"parse_args",
"(",
")",
"# Get path of config file from argument parser:",
"strCsvCnfg",
"=",
"objNspc",
".",
"config",
"# %% Decide which action to perform",
"# If no config argument is provided, print info to user.",
"if",
"strCsvCnfg",
"is",
"None",
":",
"print",
"(",
"'Please provide the file path to a config file, e.g.:'",
")",
"print",
"(",
"' pyprf_feature -config /path/to/my_config_file.csv'",
")",
"# If config file is provided, either perform fitting or recreate fitted",
"# and empirical time courses depending on whether save_tc is True or False",
"else",
":",
"# Signal non-test mode to lower functions (needed for pytest):",
"lgcTest",
"=",
"False",
"# If save_tc true, save fitted and empirical time courses to nifti file",
"# This assumes that fitting has already been run and will throw an",
"# error if the resulting nii files of the fitting cannot be found.",
"if",
"objNspc",
".",
"save_tc",
":",
"print",
"(",
"'***Mode: Save fitted and empirical time courses***'",
")",
"if",
"objNspc",
".",
"lgcMdlRsp",
":",
"print",
"(",
"' ***Also save fitted aperture responses***'",
")",
"# Call to function",
"save_tc_to_nii",
"(",
"strCsvCnfg",
",",
"lgcTest",
"=",
"lgcTest",
",",
"lstRat",
"=",
"objNspc",
".",
"supsur",
",",
"lgcMdlRsp",
"=",
"objNspc",
".",
"lgcMdlRsp",
",",
"strPathHrf",
"=",
"objNspc",
".",
"strPathHrf",
")",
"# If save_tc false, perform pRF fitting, either with or without",
"# suppressive surround",
"else",
":",
"# Perform pRF fitting without suppressive surround",
"if",
"objNspc",
".",
"supsur",
"is",
"None",
":",
"print",
"(",
"'***Mode: Fit pRF models, no suppressive surround***'",
")",
"# Call to main function, to invoke pRF fitting:",
"pyprf",
"(",
"strCsvCnfg",
",",
"lgcTest",
",",
"varRat",
"=",
"None",
",",
"strPathHrf",
"=",
"objNspc",
".",
"strPathHrf",
")",
"# Perform pRF fitting with suppressive surround",
"else",
":",
"print",
"(",
"'***Mode: Fit pRF models, suppressive surround***'",
")",
"# Load config parameters from csv file into dictionary:",
"dicCnfg",
"=",
"load_config",
"(",
"strCsvCnfg",
",",
"lgcTest",
"=",
"lgcTest",
",",
"lgcPrint",
"=",
"False",
")",
"# Load config parameters from dictionary into namespace.",
"# We do this on every loop so we have a fresh start in case",
"# variables are redefined during the prf analysis",
"cfg",
"=",
"cls_set_config",
"(",
"dicCnfg",
")",
"# Make sure that lgcCrteMdl is set to True since we will need",
"# to loop iteratively over pyprf_feature with different ratios",
"# for size surround to size center. On every loop models,",
"# reflecting the new ratio, need to be created from scratch",
"errorMsg",
"=",
"'lgcCrteMdl needs to be set to True for -supsur.'",
"assert",
"cfg",
".",
"lgcCrteMdl",
",",
"errorMsg",
"# Make sure that switchHrf is set to 1. It would not make sense",
"# to find the negative surround for the hrf deriavtive function",
"errorMsg",
"=",
"'switchHrfSet needs to be set to 1 for -supsur.'",
"assert",
"cfg",
".",
"switchHrfSet",
"==",
"1",
",",
"errorMsg",
"# Get list with size ratios",
"lstRat",
"=",
"objNspc",
".",
"supsur",
"# Make sure that all ratios are larger than 1.0",
"errorMsg",
"=",
"'All provided ratios need to be larger than 1.0'",
"assert",
"np",
".",
"all",
"(",
"np",
".",
"greater",
"(",
"np",
".",
"array",
"(",
"lstRat",
")",
",",
"1.0",
")",
")",
",",
"errorMsg",
"# Append None as the first entry, so fitting without surround",
"# is performed once as well",
"lstRat",
".",
"insert",
"(",
"0",
",",
"None",
")",
"# Loop over ratios and find best pRF",
"for",
"varRat",
"in",
"lstRat",
":",
"# Print to command line, so the user knows which exponent",
"# is used",
"print",
"(",
"'---Ratio surround to center: '",
"+",
"str",
"(",
"varRat",
")",
")",
"# Call to main function, to invoke pRF analysis:",
"pyprf",
"(",
"strCsvCnfg",
",",
"lgcTest",
"=",
"lgcTest",
",",
"varRat",
"=",
"varRat",
",",
"strPathHrf",
"=",
"objNspc",
".",
"strPathHrf",
")",
"# List with name suffices of output images:",
"lstNiiNames",
"=",
"[",
"'_x_pos'",
",",
"'_y_pos'",
",",
"'_SD'",
",",
"'_R2'",
",",
"'_polar_angle'",
",",
"'_eccentricity'",
",",
"'_Betas'",
"]",
"# Compare results for the different ratios, export nii files",
"# based on the results of the comparison and delete in-between",
"# results",
"# Replace first entry (None) with 1, so it can be saved to nii",
"lstRat",
"[",
"0",
"]",
"=",
"1.0",
"# Append 'hrf' to cfg.strPathOut, if fitting was done with",
"# custom hrf",
"if",
"objNspc",
".",
"strPathHrf",
"is",
"not",
"None",
":",
"cfg",
".",
"strPathOut",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_hrf'",
"cmp_res_R2",
"(",
"lstRat",
",",
"lstNiiNames",
",",
"cfg",
".",
"strPathOut",
",",
"cfg",
".",
"strPathMdl",
",",
"lgcDel",
"=",
"True",
")"
] | pyprf_feature entry point. | [
"pyprf_feature",
"entry",
"point",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/__main__.py#L39-L196 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_filtering.py | funcSmthSpt | def funcSmthSpt(aryFuncChnk, varSdSmthSpt):
"""Apply spatial smoothing to the input data.
Parameters
----------
aryFuncChnk : np.array
TODO
varSdSmthSpt : float (?)
Extent of smoothing.
Returns
-------
aryFuncChnk : np.array
Smoothed data.
"""
varNdim = aryFuncChnk.ndim
# Number of time points in this chunk:
varNumVol = aryFuncChnk.shape[-1]
# Loop through volumes:
if varNdim == 4:
for idxVol in range(0, varNumVol):
aryFuncChnk[:, :, :, idxVol] = gaussian_filter(
aryFuncChnk[:, :, :, idxVol],
varSdSmthSpt,
order=0,
mode='nearest',
truncate=4.0)
elif varNdim == 5:
varNumMtnDrctns = aryFuncChnk.shape[3]
for idxVol in range(0, varNumVol):
for idxMtn in range(0, varNumMtnDrctns):
aryFuncChnk[:, :, :, idxMtn, idxVol] = gaussian_filter(
aryFuncChnk[:, :, :, idxMtn, idxVol],
varSdSmthSpt,
order=0,
mode='nearest',
truncate=4.0)
# Output list:
return aryFuncChnk | python | def funcSmthSpt(aryFuncChnk, varSdSmthSpt):
"""Apply spatial smoothing to the input data.
Parameters
----------
aryFuncChnk : np.array
TODO
varSdSmthSpt : float (?)
Extent of smoothing.
Returns
-------
aryFuncChnk : np.array
Smoothed data.
"""
varNdim = aryFuncChnk.ndim
# Number of time points in this chunk:
varNumVol = aryFuncChnk.shape[-1]
# Loop through volumes:
if varNdim == 4:
for idxVol in range(0, varNumVol):
aryFuncChnk[:, :, :, idxVol] = gaussian_filter(
aryFuncChnk[:, :, :, idxVol],
varSdSmthSpt,
order=0,
mode='nearest',
truncate=4.0)
elif varNdim == 5:
varNumMtnDrctns = aryFuncChnk.shape[3]
for idxVol in range(0, varNumVol):
for idxMtn in range(0, varNumMtnDrctns):
aryFuncChnk[:, :, :, idxMtn, idxVol] = gaussian_filter(
aryFuncChnk[:, :, :, idxMtn, idxVol],
varSdSmthSpt,
order=0,
mode='nearest',
truncate=4.0)
# Output list:
return aryFuncChnk | [
"def",
"funcSmthSpt",
"(",
"aryFuncChnk",
",",
"varSdSmthSpt",
")",
":",
"varNdim",
"=",
"aryFuncChnk",
".",
"ndim",
"# Number of time points in this chunk:",
"varNumVol",
"=",
"aryFuncChnk",
".",
"shape",
"[",
"-",
"1",
"]",
"# Loop through volumes:",
"if",
"varNdim",
"==",
"4",
":",
"for",
"idxVol",
"in",
"range",
"(",
"0",
",",
"varNumVol",
")",
":",
"aryFuncChnk",
"[",
":",
",",
":",
",",
":",
",",
"idxVol",
"]",
"=",
"gaussian_filter",
"(",
"aryFuncChnk",
"[",
":",
",",
":",
",",
":",
",",
"idxVol",
"]",
",",
"varSdSmthSpt",
",",
"order",
"=",
"0",
",",
"mode",
"=",
"'nearest'",
",",
"truncate",
"=",
"4.0",
")",
"elif",
"varNdim",
"==",
"5",
":",
"varNumMtnDrctns",
"=",
"aryFuncChnk",
".",
"shape",
"[",
"3",
"]",
"for",
"idxVol",
"in",
"range",
"(",
"0",
",",
"varNumVol",
")",
":",
"for",
"idxMtn",
"in",
"range",
"(",
"0",
",",
"varNumMtnDrctns",
")",
":",
"aryFuncChnk",
"[",
":",
",",
":",
",",
":",
",",
"idxMtn",
",",
"idxVol",
"]",
"=",
"gaussian_filter",
"(",
"aryFuncChnk",
"[",
":",
",",
":",
",",
":",
",",
"idxMtn",
",",
"idxVol",
"]",
",",
"varSdSmthSpt",
",",
"order",
"=",
"0",
",",
"mode",
"=",
"'nearest'",
",",
"truncate",
"=",
"4.0",
")",
"# Output list:",
"return",
"aryFuncChnk"
] | Apply spatial smoothing to the input data.
Parameters
----------
aryFuncChnk : np.array
TODO
varSdSmthSpt : float (?)
Extent of smoothing.
Returns
-------
aryFuncChnk : np.array
Smoothed data. | [
"Apply",
"spatial",
"smoothing",
"to",
"the",
"input",
"data",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_filtering.py#L27-L68 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_filtering.py | funcSmthTmp | def funcSmthTmp(aryFuncChnk, varSdSmthTmp):
"""Apply temporal smoothing to fMRI data & pRF time course models.
Parameters
----------
aryFuncChnk : np.array
TODO
varSdSmthTmp : float (?)
extend of smoothing
Returns
-------
aryFuncChnk : np.array
TODO
"""
# For the filtering to perform well at the ends of the time series, we
# set the method to 'nearest' and place a volume with mean intensity
# (over time) at the beginning and at the end.
aryFuncChnkMean = np.mean(aryFuncChnk,
axis=1,
keepdims=True)
aryFuncChnk = np.concatenate((aryFuncChnkMean,
aryFuncChnk,
aryFuncChnkMean), axis=1)
# In the input data, time goes from left to right. Therefore, we apply
# the filter along axis=1.
aryFuncChnk = gaussian_filter1d(aryFuncChnk,
varSdSmthTmp,
axis=1,
order=0,
mode='nearest',
truncate=4.0)
# Remove mean-intensity volumes at the beginning and at the end:
aryFuncChnk = aryFuncChnk[:, 1:-1]
# Output list:
return aryFuncChnk | python | def funcSmthTmp(aryFuncChnk, varSdSmthTmp):
"""Apply temporal smoothing to fMRI data & pRF time course models.
Parameters
----------
aryFuncChnk : np.array
TODO
varSdSmthTmp : float (?)
extend of smoothing
Returns
-------
aryFuncChnk : np.array
TODO
"""
# For the filtering to perform well at the ends of the time series, we
# set the method to 'nearest' and place a volume with mean intensity
# (over time) at the beginning and at the end.
aryFuncChnkMean = np.mean(aryFuncChnk,
axis=1,
keepdims=True)
aryFuncChnk = np.concatenate((aryFuncChnkMean,
aryFuncChnk,
aryFuncChnkMean), axis=1)
# In the input data, time goes from left to right. Therefore, we apply
# the filter along axis=1.
aryFuncChnk = gaussian_filter1d(aryFuncChnk,
varSdSmthTmp,
axis=1,
order=0,
mode='nearest',
truncate=4.0)
# Remove mean-intensity volumes at the beginning and at the end:
aryFuncChnk = aryFuncChnk[:, 1:-1]
# Output list:
return aryFuncChnk | [
"def",
"funcSmthTmp",
"(",
"aryFuncChnk",
",",
"varSdSmthTmp",
")",
":",
"# For the filtering to perform well at the ends of the time series, we",
"# set the method to 'nearest' and place a volume with mean intensity",
"# (over time) at the beginning and at the end.",
"aryFuncChnkMean",
"=",
"np",
".",
"mean",
"(",
"aryFuncChnk",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"aryFuncChnk",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryFuncChnkMean",
",",
"aryFuncChnk",
",",
"aryFuncChnkMean",
")",
",",
"axis",
"=",
"1",
")",
"# In the input data, time goes from left to right. Therefore, we apply",
"# the filter along axis=1.",
"aryFuncChnk",
"=",
"gaussian_filter1d",
"(",
"aryFuncChnk",
",",
"varSdSmthTmp",
",",
"axis",
"=",
"1",
",",
"order",
"=",
"0",
",",
"mode",
"=",
"'nearest'",
",",
"truncate",
"=",
"4.0",
")",
"# Remove mean-intensity volumes at the beginning and at the end:",
"aryFuncChnk",
"=",
"aryFuncChnk",
"[",
":",
",",
"1",
":",
"-",
"1",
"]",
"# Output list:",
"return",
"aryFuncChnk"
] | Apply temporal smoothing to fMRI data & pRF time course models.
Parameters
----------
aryFuncChnk : np.array
TODO
varSdSmthTmp : float (?)
extend of smoothing
Returns
-------
aryFuncChnk : np.array
TODO | [
"Apply",
"temporal",
"smoothing",
"to",
"fMRI",
"data",
"&",
"pRF",
"time",
"course",
"models",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_filtering.py#L72-L111 |
MSchnei/pyprf_feature | pyprf_feature/analysis/prepare.py | prep_models | def prep_models(aryPrfTc, varSdSmthTmp=2.0, lgcPrint=True):
"""
Prepare pRF model time courses.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
`aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`,
no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same
dimensions as input (`aryPrfTc[x-position, y-position, SD, volume]`).
"""
if lgcPrint:
print('------Prepare pRF time course models')
# Define temporal smoothing of pRF time course models
def funcSmthTmp(aryPrfTc, varSdSmthTmp, lgcPrint=True):
"""Apply temporal smoothing to fMRI data & pRF time course models.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following
dimensions: `aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float, positive
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If
`zero`, no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same dimension
as input (`aryPrfTc[x-position, y-position, SD, volume]`).
"""
# adjust the input, if necessary, such that input is 2D, with last
# dim time
tplInpShp = aryPrfTc.shape
aryPrfTc = aryPrfTc.reshape((-1, aryPrfTc.shape[-1]))
# For the filtering to perform well at the ends of the time series, we
# set the method to 'nearest' and place a volume with mean intensity
# (over time) at the beginning and at the end.
aryPrfTcMean = np.mean(aryPrfTc, axis=-1, keepdims=True).reshape(-1, 1)
aryPrfTc = np.concatenate((aryPrfTcMean, aryPrfTc, aryPrfTcMean),
axis=-1)
# In the input data, time goes from left to right. Therefore, we apply
# the filter along axis=1.
aryPrfTc = gaussian_filter1d(aryPrfTc.astype('float32'), varSdSmthTmp,
axis=-1, order=0, mode='nearest',
truncate=4.0)
# Remove mean-intensity volumes at the beginning and at the end:
aryPrfTc = aryPrfTc[..., 1:-1]
# Output array:
return aryPrfTc.reshape(tplInpShp).astype('float16')
# Perform temporal smoothing of pRF time course models
if 0.0 < varSdSmthTmp:
if lgcPrint:
print('---------Temporal smoothing on pRF time course models')
print('------------SD tmp smooth is: ' + str(varSdSmthTmp))
aryPrfTc = funcSmthTmp(aryPrfTc, varSdSmthTmp)
# Z-score the prf time course models
if lgcPrint:
print('---------Zscore the pRF time course models')
# De-mean the prf time course models:
aryPrfTc = np.subtract(aryPrfTc, np.mean(aryPrfTc, axis=-1)[..., None])
# Standardize the prf time course models:
# In order to avoid devision by zero, only divide those voxels with a
# standard deviation greater than zero:
aryTmpStd = np.std(aryPrfTc, axis=-1)
aryTmpLgc = np.greater(aryTmpStd, np.array([0.0]))
aryPrfTc[aryTmpLgc, :] = np.divide(aryPrfTc[aryTmpLgc, :],
aryTmpStd[aryTmpLgc, None])
return aryPrfTc | python | def prep_models(aryPrfTc, varSdSmthTmp=2.0, lgcPrint=True):
"""
Prepare pRF model time courses.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
`aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`,
no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same
dimensions as input (`aryPrfTc[x-position, y-position, SD, volume]`).
"""
if lgcPrint:
print('------Prepare pRF time course models')
# Define temporal smoothing of pRF time course models
def funcSmthTmp(aryPrfTc, varSdSmthTmp, lgcPrint=True):
"""Apply temporal smoothing to fMRI data & pRF time course models.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following
dimensions: `aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float, positive
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If
`zero`, no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same dimension
as input (`aryPrfTc[x-position, y-position, SD, volume]`).
"""
# adjust the input, if necessary, such that input is 2D, with last
# dim time
tplInpShp = aryPrfTc.shape
aryPrfTc = aryPrfTc.reshape((-1, aryPrfTc.shape[-1]))
# For the filtering to perform well at the ends of the time series, we
# set the method to 'nearest' and place a volume with mean intensity
# (over time) at the beginning and at the end.
aryPrfTcMean = np.mean(aryPrfTc, axis=-1, keepdims=True).reshape(-1, 1)
aryPrfTc = np.concatenate((aryPrfTcMean, aryPrfTc, aryPrfTcMean),
axis=-1)
# In the input data, time goes from left to right. Therefore, we apply
# the filter along axis=1.
aryPrfTc = gaussian_filter1d(aryPrfTc.astype('float32'), varSdSmthTmp,
axis=-1, order=0, mode='nearest',
truncate=4.0)
# Remove mean-intensity volumes at the beginning and at the end:
aryPrfTc = aryPrfTc[..., 1:-1]
# Output array:
return aryPrfTc.reshape(tplInpShp).astype('float16')
# Perform temporal smoothing of pRF time course models
if 0.0 < varSdSmthTmp:
if lgcPrint:
print('---------Temporal smoothing on pRF time course models')
print('------------SD tmp smooth is: ' + str(varSdSmthTmp))
aryPrfTc = funcSmthTmp(aryPrfTc, varSdSmthTmp)
# Z-score the prf time course models
if lgcPrint:
print('---------Zscore the pRF time course models')
# De-mean the prf time course models:
aryPrfTc = np.subtract(aryPrfTc, np.mean(aryPrfTc, axis=-1)[..., None])
# Standardize the prf time course models:
# In order to avoid devision by zero, only divide those voxels with a
# standard deviation greater than zero:
aryTmpStd = np.std(aryPrfTc, axis=-1)
aryTmpLgc = np.greater(aryTmpStd, np.array([0.0]))
aryPrfTc[aryTmpLgc, :] = np.divide(aryPrfTc[aryTmpLgc, :],
aryTmpStd[aryTmpLgc, None])
return aryPrfTc | [
"def",
"prep_models",
"(",
"aryPrfTc",
",",
"varSdSmthTmp",
"=",
"2.0",
",",
"lgcPrint",
"=",
"True",
")",
":",
"if",
"lgcPrint",
":",
"print",
"(",
"'------Prepare pRF time course models'",
")",
"# Define temporal smoothing of pRF time course models",
"def",
"funcSmthTmp",
"(",
"aryPrfTc",
",",
"varSdSmthTmp",
",",
"lgcPrint",
"=",
"True",
")",
":",
"\"\"\"Apply temporal smoothing to fMRI data & pRF time course models.\n\n Parameters\n ----------\n aryPrfTc : np.array\n 4D numpy array with pRF time course models, with following\n dimensions: `aryPrfTc[x-position, y-position, SD, volume]`.\n varSdSmthTmp : float, positive\n Extent of temporal smoothing that is applied to functional data and\n pRF time course models, [SD of Gaussian kernel, in seconds]. If\n `zero`, no temporal smoothing is applied.\n lgcPrint : boolean\n Whether print statements should be executed.\n\n Returns\n -------\n aryPrfTc : np.array\n 4D numpy array with prepared pRF time course models, same dimension\n as input (`aryPrfTc[x-position, y-position, SD, volume]`).\n \"\"\"",
"# adjust the input, if necessary, such that input is 2D, with last",
"# dim time",
"tplInpShp",
"=",
"aryPrfTc",
".",
"shape",
"aryPrfTc",
"=",
"aryPrfTc",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"aryPrfTc",
".",
"shape",
"[",
"-",
"1",
"]",
")",
")",
"# For the filtering to perform well at the ends of the time series, we",
"# set the method to 'nearest' and place a volume with mean intensity",
"# (over time) at the beginning and at the end.",
"aryPrfTcMean",
"=",
"np",
".",
"mean",
"(",
"aryPrfTc",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"True",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"aryPrfTc",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryPrfTcMean",
",",
"aryPrfTc",
",",
"aryPrfTcMean",
")",
",",
"axis",
"=",
"-",
"1",
")",
"# In the input data, time goes from left to right. Therefore, we apply",
"# the filter along axis=1.",
"aryPrfTc",
"=",
"gaussian_filter1d",
"(",
"aryPrfTc",
".",
"astype",
"(",
"'float32'",
")",
",",
"varSdSmthTmp",
",",
"axis",
"=",
"-",
"1",
",",
"order",
"=",
"0",
",",
"mode",
"=",
"'nearest'",
",",
"truncate",
"=",
"4.0",
")",
"# Remove mean-intensity volumes at the beginning and at the end:",
"aryPrfTc",
"=",
"aryPrfTc",
"[",
"...",
",",
"1",
":",
"-",
"1",
"]",
"# Output array:",
"return",
"aryPrfTc",
".",
"reshape",
"(",
"tplInpShp",
")",
".",
"astype",
"(",
"'float16'",
")",
"# Perform temporal smoothing of pRF time course models",
"if",
"0.0",
"<",
"varSdSmthTmp",
":",
"if",
"lgcPrint",
":",
"print",
"(",
"'---------Temporal smoothing on pRF time course models'",
")",
"print",
"(",
"'------------SD tmp smooth is: '",
"+",
"str",
"(",
"varSdSmthTmp",
")",
")",
"aryPrfTc",
"=",
"funcSmthTmp",
"(",
"aryPrfTc",
",",
"varSdSmthTmp",
")",
"# Z-score the prf time course models",
"if",
"lgcPrint",
":",
"print",
"(",
"'---------Zscore the pRF time course models'",
")",
"# De-mean the prf time course models:",
"aryPrfTc",
"=",
"np",
".",
"subtract",
"(",
"aryPrfTc",
",",
"np",
".",
"mean",
"(",
"aryPrfTc",
",",
"axis",
"=",
"-",
"1",
")",
"[",
"...",
",",
"None",
"]",
")",
"# Standardize the prf time course models:",
"# In order to avoid devision by zero, only divide those voxels with a",
"# standard deviation greater than zero:",
"aryTmpStd",
"=",
"np",
".",
"std",
"(",
"aryPrfTc",
",",
"axis",
"=",
"-",
"1",
")",
"aryTmpLgc",
"=",
"np",
".",
"greater",
"(",
"aryTmpStd",
",",
"np",
".",
"array",
"(",
"[",
"0.0",
"]",
")",
")",
"aryPrfTc",
"[",
"aryTmpLgc",
",",
":",
"]",
"=",
"np",
".",
"divide",
"(",
"aryPrfTc",
"[",
"aryTmpLgc",
",",
":",
"]",
",",
"aryTmpStd",
"[",
"aryTmpLgc",
",",
"None",
"]",
")",
"return",
"aryPrfTc"
] | Prepare pRF model time courses.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
`aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`,
no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same
dimensions as input (`aryPrfTc[x-position, y-position, SD, volume]`). | [
"Prepare",
"pRF",
"model",
"time",
"courses",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/prepare.py#L25-L119 |
MSchnei/pyprf_feature | pyprf_feature/analysis/prepare.py | prep_func | def prep_func(strPathNiiMask, lstPathNiiFunc, varAvgThr=100.,
varVarThr=0.0001, strPrePro='demean'):
"""
Load & prepare functional data.
Parameters
----------
strPathNiiMask: str
Path to mask used to restrict pRF model finding. Only voxels with
a value greater than zero in the mask are considered.
lstPathNiiFunc : list
List of paths of functional data (nii files).
varAvgThr : float, positive, default = 100.
Float. Voxels that have at least one run with a mean lower than this
(before demeaning) will be excluded from model fitting.
varVarThr : float, positive, default = 0.0001
Float. Voxels that have at least one run with a variance lower than
this (after demeaning) will be excluded from model fitting.
strPrePro : string, default 'demean'
Preprocessing that will be applied to the data.
By default they are demeaned.
Returns
-------
aryLgcMsk : np.array
3D numpy array with logial values. Externally supplied mask (e.g grey
matter mask). Voxels that are `False` in the mask are excluded.
vecLgcIncl : np.array
1D numpy array containing logical values. One value per voxel after
mask has been applied. If `True`, the variance and mean of the voxel's
time course are greater than the provided thresholds in all runs and
the voxel is included in the output array (`aryFunc`). If `False`, the
variance or mean of the voxel's time course is lower than threshold in
at least one run and the voxel has been excluded from the output
(`aryFunc`). This is to avoid problems in the subsequent model fitting.
This array is necessary to put results into original dimensions after
model fitting.
hdrMsk : nibabel-header-object
Nii header of mask.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of mask nii data.
aryFunc : np.array
2D numpy array containing prepared functional data, of the form
aryFunc[voxelCount, time].
tplNiiShp : tuple
Spatial dimensions of input nii data (number of voxels in x, y, z
direction). The data are reshaped during preparation, this
information is needed to fit final output into original spatial
dimensions.
Notes
-----
Functional data is loaded from disk. The functional data is reshaped, into
the form aryFunc[voxel, time]. A mask is applied (externally supplied, e.g.
a grey matter mask). Subsequently, the functional data is de-meaned.
"""
print('------Load & prepare nii data')
# Load mask (to restrict model fitting):
aryMask, hdrMsk, aryAff = load_nii(strPathNiiMask)
# Mask is loaded as float32, but is better represented as integer:
aryMask = np.array(aryMask).astype(np.int16)
# Number of non-zero voxels in mask:
# varNumVoxMsk = int(np.count_nonzero(aryMask))
# Dimensions of nii data:
tplNiiShp = aryMask.shape
# Total number of voxels:
varNumVoxTlt = (tplNiiShp[0] * tplNiiShp[1] * tplNiiShp[2])
# Reshape mask:
aryMask = np.reshape(aryMask, varNumVoxTlt)
# List for arrays with functional data (possibly several runs):
lstFunc = []
# List for averages of the individual runs (before demeaning)
lstFuncAvg = []
# List for variances of the individual runs (after demeaning)
lstFuncVar = []
# Number of runs:
varNumRun = len(lstPathNiiFunc)
# Loop through runs and load data:
for idxRun in range(varNumRun):
print(('---------Prepare run ' + str(idxRun + 1)))
# Load 4D nii data:
aryTmpFunc, _, _ = load_nii(lstPathNiiFunc[idxRun])
# Dimensions of nii data (including temporal dimension; spatial
# dimensions need to be the same for mask & functional data):
tplNiiShp = aryTmpFunc.shape
# Reshape functional nii data, from now on of the form
# aryTmpFunc[voxelCount, time]:
aryTmpFunc = np.reshape(aryTmpFunc, [varNumVoxTlt, tplNiiShp[3]])
# Apply mask:
print('------------Mask')
aryLgcMsk = np.greater(aryMask.astype(np.int16),
np.array([0], dtype=np.int16)[0])
aryTmpFunc = aryTmpFunc[aryLgcMsk, :]
# save the mean of the run
lstFuncAvg.append(np.mean(aryTmpFunc, axis=1, dtype=np.float32))
# also save the variance of the run
lstFuncVar.append(np.var(aryTmpFunc, axis=1, dtype=np.float32))
# De-mean functional data:
if strPrePro == 'demean':
print('------------Demean')
aryTmpFunc = np.subtract(aryTmpFunc,
np.mean(aryTmpFunc,
axis=1,
dtype=np.float32)[:, None])
elif strPrePro == 'zscore':
print('------------Zscore')
aryTmpFunc = np.subtract(aryTmpFunc,
np.mean(aryTmpFunc,
axis=1,
dtype=np.float32)[:, None])
# Standardize the data time courses:
# In order to avoid devision by zero, only divide
# those voxels with a standard deviation greater
# than zero:
aryTmpStd = np.std(aryTmpFunc, axis=-1)
aryTmpLgc = np.greater(aryTmpStd, np.array([0.0]))
aryTmpFunc[aryTmpLgc, :] = np.divide(aryTmpFunc[aryTmpLgc, :],
aryTmpStd[aryTmpLgc, None])
# Put prepared functional data of current run into list:
lstFunc.append(aryTmpFunc)
del(aryTmpFunc)
# Put functional data from separate runs into one array. 2D array of the
# form aryFunc[voxelCount, time]
aryFunc = np.concatenate(lstFunc, axis=1).astype(np.float32, copy=False)
del(lstFunc)
# Put the averages (before demeaning) from the separate runs into one
# array. 2D array of the form aryFuncVar[voxelCount, nr of runs]
aryFuncAvg = np.stack(lstFuncAvg, axis=1).astype(np.float32, copy=False)
del(lstFuncAvg)
# Put the variance (after demeaning) from the separate runs into one array.
# 2D array of the form aryFuncVar[voxelCount, nr of runs]
aryFuncVar = np.stack(lstFuncVar, axis=1).astype(np.float32, copy=False)
del(lstFuncVar)
# Especially if data were recorded in different sessions, there can
# sometimes be voxels that have close to zero signal in runs from one
# session but regular signal in the runs from another session. These voxels
# are very few, are located at the edge of the functional and can cause
# problems during model fitting. They are therefore excluded.
# Is the mean greater than threshold?
aryLgcAvg = np.greater(aryFuncAvg,
np.array([varAvgThr]).astype(np.float32)[0])
# Mean needs to be greater than threshold in every single run
vecLgcAvg = np.all(aryLgcAvg, axis=1)
# Voxels that are outside the brain and have no, or very little, signal
# should not be included in the pRF model finding. We take the variance
# over time and exclude voxels with a suspiciously low variance, if they
# have low variance in at least one run. Because the data given into the
# cython or GPU function has float32 precision, we calculate the variance
# on data with float32 precision.
# Is the variance greater than threshold?
aryLgcVar = np.greater(aryFuncVar,
np.array([varVarThr]).astype(np.float32)[0])
# Variance needs to be greater than threshold in every single run
vecLgcVar = np.all(aryLgcVar, axis=1)
# Are there any nan values in the functional time series?
vecLgcNan = np.invert(np.any(np.isnan(aryFunc), axis=1))
# combine the logical vectors for exclusion resulting from low variance and
# low mean signal time course
vecLgcIncl = np.logical_and(vecLgcAvg, vecLgcVar)
# combine logical vectors for mean/variance with vector for nan exclsion
vecLgcIncl = np.logical_and(vecLgcIncl, vecLgcNan)
# Array with functional data for which conditions (mask inclusion and
# cutoff value) are fullfilled:
aryFunc = aryFunc[vecLgcIncl, :]
# print info about the exclusion of voxels
print('---------Minimum mean threshold for voxels applied at: ' +
str(varAvgThr))
print('---------Minimum variance threshold for voxels applied at: ' +
str(varVarThr))
print('---------Number of voxels excluded due to low mean or variance: ' +
str(np.sum(np.invert(vecLgcIncl))))
return aryLgcMsk, vecLgcIncl, hdrMsk, aryAff, aryFunc, tplNiiShp | python | def prep_func(strPathNiiMask, lstPathNiiFunc, varAvgThr=100.,
varVarThr=0.0001, strPrePro='demean'):
"""
Load & prepare functional data.
Parameters
----------
strPathNiiMask: str
Path to mask used to restrict pRF model finding. Only voxels with
a value greater than zero in the mask are considered.
lstPathNiiFunc : list
List of paths of functional data (nii files).
varAvgThr : float, positive, default = 100.
Float. Voxels that have at least one run with a mean lower than this
(before demeaning) will be excluded from model fitting.
varVarThr : float, positive, default = 0.0001
Float. Voxels that have at least one run with a variance lower than
this (after demeaning) will be excluded from model fitting.
strPrePro : string, default 'demean'
Preprocessing that will be applied to the data.
By default they are demeaned.
Returns
-------
aryLgcMsk : np.array
3D numpy array with logial values. Externally supplied mask (e.g grey
matter mask). Voxels that are `False` in the mask are excluded.
vecLgcIncl : np.array
1D numpy array containing logical values. One value per voxel after
mask has been applied. If `True`, the variance and mean of the voxel's
time course are greater than the provided thresholds in all runs and
the voxel is included in the output array (`aryFunc`). If `False`, the
variance or mean of the voxel's time course is lower than threshold in
at least one run and the voxel has been excluded from the output
(`aryFunc`). This is to avoid problems in the subsequent model fitting.
This array is necessary to put results into original dimensions after
model fitting.
hdrMsk : nibabel-header-object
Nii header of mask.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of mask nii data.
aryFunc : np.array
2D numpy array containing prepared functional data, of the form
aryFunc[voxelCount, time].
tplNiiShp : tuple
Spatial dimensions of input nii data (number of voxels in x, y, z
direction). The data are reshaped during preparation, this
information is needed to fit final output into original spatial
dimensions.
Notes
-----
Functional data is loaded from disk. The functional data is reshaped, into
the form aryFunc[voxel, time]. A mask is applied (externally supplied, e.g.
a grey matter mask). Subsequently, the functional data is de-meaned.
"""
print('------Load & prepare nii data')
# Load mask (to restrict model fitting):
aryMask, hdrMsk, aryAff = load_nii(strPathNiiMask)
# Mask is loaded as float32, but is better represented as integer:
aryMask = np.array(aryMask).astype(np.int16)
# Number of non-zero voxels in mask:
# varNumVoxMsk = int(np.count_nonzero(aryMask))
# Dimensions of nii data:
tplNiiShp = aryMask.shape
# Total number of voxels:
varNumVoxTlt = (tplNiiShp[0] * tplNiiShp[1] * tplNiiShp[2])
# Reshape mask:
aryMask = np.reshape(aryMask, varNumVoxTlt)
# List for arrays with functional data (possibly several runs):
lstFunc = []
# List for averages of the individual runs (before demeaning)
lstFuncAvg = []
# List for variances of the individual runs (after demeaning)
lstFuncVar = []
# Number of runs:
varNumRun = len(lstPathNiiFunc)
# Loop through runs and load data:
for idxRun in range(varNumRun):
print(('---------Prepare run ' + str(idxRun + 1)))
# Load 4D nii data:
aryTmpFunc, _, _ = load_nii(lstPathNiiFunc[idxRun])
# Dimensions of nii data (including temporal dimension; spatial
# dimensions need to be the same for mask & functional data):
tplNiiShp = aryTmpFunc.shape
# Reshape functional nii data, from now on of the form
# aryTmpFunc[voxelCount, time]:
aryTmpFunc = np.reshape(aryTmpFunc, [varNumVoxTlt, tplNiiShp[3]])
# Apply mask:
print('------------Mask')
aryLgcMsk = np.greater(aryMask.astype(np.int16),
np.array([0], dtype=np.int16)[0])
aryTmpFunc = aryTmpFunc[aryLgcMsk, :]
# save the mean of the run
lstFuncAvg.append(np.mean(aryTmpFunc, axis=1, dtype=np.float32))
# also save the variance of the run
lstFuncVar.append(np.var(aryTmpFunc, axis=1, dtype=np.float32))
# De-mean functional data:
if strPrePro == 'demean':
print('------------Demean')
aryTmpFunc = np.subtract(aryTmpFunc,
np.mean(aryTmpFunc,
axis=1,
dtype=np.float32)[:, None])
elif strPrePro == 'zscore':
print('------------Zscore')
aryTmpFunc = np.subtract(aryTmpFunc,
np.mean(aryTmpFunc,
axis=1,
dtype=np.float32)[:, None])
# Standardize the data time courses:
# In order to avoid devision by zero, only divide
# those voxels with a standard deviation greater
# than zero:
aryTmpStd = np.std(aryTmpFunc, axis=-1)
aryTmpLgc = np.greater(aryTmpStd, np.array([0.0]))
aryTmpFunc[aryTmpLgc, :] = np.divide(aryTmpFunc[aryTmpLgc, :],
aryTmpStd[aryTmpLgc, None])
# Put prepared functional data of current run into list:
lstFunc.append(aryTmpFunc)
del(aryTmpFunc)
# Put functional data from separate runs into one array. 2D array of the
# form aryFunc[voxelCount, time]
aryFunc = np.concatenate(lstFunc, axis=1).astype(np.float32, copy=False)
del(lstFunc)
# Put the averages (before demeaning) from the separate runs into one
# array. 2D array of the form aryFuncVar[voxelCount, nr of runs]
aryFuncAvg = np.stack(lstFuncAvg, axis=1).astype(np.float32, copy=False)
del(lstFuncAvg)
# Put the variance (after demeaning) from the separate runs into one array.
# 2D array of the form aryFuncVar[voxelCount, nr of runs]
aryFuncVar = np.stack(lstFuncVar, axis=1).astype(np.float32, copy=False)
del(lstFuncVar)
# Especially if data were recorded in different sessions, there can
# sometimes be voxels that have close to zero signal in runs from one
# session but regular signal in the runs from another session. These voxels
# are very few, are located at the edge of the functional and can cause
# problems during model fitting. They are therefore excluded.
# Is the mean greater than threshold?
aryLgcAvg = np.greater(aryFuncAvg,
np.array([varAvgThr]).astype(np.float32)[0])
# Mean needs to be greater than threshold in every single run
vecLgcAvg = np.all(aryLgcAvg, axis=1)
# Voxels that are outside the brain and have no, or very little, signal
# should not be included in the pRF model finding. We take the variance
# over time and exclude voxels with a suspiciously low variance, if they
# have low variance in at least one run. Because the data given into the
# cython or GPU function has float32 precision, we calculate the variance
# on data with float32 precision.
# Is the variance greater than threshold?
aryLgcVar = np.greater(aryFuncVar,
np.array([varVarThr]).astype(np.float32)[0])
# Variance needs to be greater than threshold in every single run
vecLgcVar = np.all(aryLgcVar, axis=1)
# Are there any nan values in the functional time series?
vecLgcNan = np.invert(np.any(np.isnan(aryFunc), axis=1))
# combine the logical vectors for exclusion resulting from low variance and
# low mean signal time course
vecLgcIncl = np.logical_and(vecLgcAvg, vecLgcVar)
# combine logical vectors for mean/variance with vector for nan exclsion
vecLgcIncl = np.logical_and(vecLgcIncl, vecLgcNan)
# Array with functional data for which conditions (mask inclusion and
# cutoff value) are fullfilled:
aryFunc = aryFunc[vecLgcIncl, :]
# print info about the exclusion of voxels
print('---------Minimum mean threshold for voxels applied at: ' +
str(varAvgThr))
print('---------Minimum variance threshold for voxels applied at: ' +
str(varVarThr))
print('---------Number of voxels excluded due to low mean or variance: ' +
str(np.sum(np.invert(vecLgcIncl))))
return aryLgcMsk, vecLgcIncl, hdrMsk, aryAff, aryFunc, tplNiiShp | [
"def",
"prep_func",
"(",
"strPathNiiMask",
",",
"lstPathNiiFunc",
",",
"varAvgThr",
"=",
"100.",
",",
"varVarThr",
"=",
"0.0001",
",",
"strPrePro",
"=",
"'demean'",
")",
":",
"print",
"(",
"'------Load & prepare nii data'",
")",
"# Load mask (to restrict model fitting):",
"aryMask",
",",
"hdrMsk",
",",
"aryAff",
"=",
"load_nii",
"(",
"strPathNiiMask",
")",
"# Mask is loaded as float32, but is better represented as integer:",
"aryMask",
"=",
"np",
".",
"array",
"(",
"aryMask",
")",
".",
"astype",
"(",
"np",
".",
"int16",
")",
"# Number of non-zero voxels in mask:",
"# varNumVoxMsk = int(np.count_nonzero(aryMask))",
"# Dimensions of nii data:",
"tplNiiShp",
"=",
"aryMask",
".",
"shape",
"# Total number of voxels:",
"varNumVoxTlt",
"=",
"(",
"tplNiiShp",
"[",
"0",
"]",
"*",
"tplNiiShp",
"[",
"1",
"]",
"*",
"tplNiiShp",
"[",
"2",
"]",
")",
"# Reshape mask:",
"aryMask",
"=",
"np",
".",
"reshape",
"(",
"aryMask",
",",
"varNumVoxTlt",
")",
"# List for arrays with functional data (possibly several runs):",
"lstFunc",
"=",
"[",
"]",
"# List for averages of the individual runs (before demeaning)",
"lstFuncAvg",
"=",
"[",
"]",
"# List for variances of the individual runs (after demeaning)",
"lstFuncVar",
"=",
"[",
"]",
"# Number of runs:",
"varNumRun",
"=",
"len",
"(",
"lstPathNiiFunc",
")",
"# Loop through runs and load data:",
"for",
"idxRun",
"in",
"range",
"(",
"varNumRun",
")",
":",
"print",
"(",
"(",
"'---------Prepare run '",
"+",
"str",
"(",
"idxRun",
"+",
"1",
")",
")",
")",
"# Load 4D nii data:",
"aryTmpFunc",
",",
"_",
",",
"_",
"=",
"load_nii",
"(",
"lstPathNiiFunc",
"[",
"idxRun",
"]",
")",
"# Dimensions of nii data (including temporal dimension; spatial",
"# dimensions need to be the same for mask & functional data):",
"tplNiiShp",
"=",
"aryTmpFunc",
".",
"shape",
"# Reshape functional nii data, from now on of the form",
"# aryTmpFunc[voxelCount, time]:",
"aryTmpFunc",
"=",
"np",
".",
"reshape",
"(",
"aryTmpFunc",
",",
"[",
"varNumVoxTlt",
",",
"tplNiiShp",
"[",
"3",
"]",
"]",
")",
"# Apply mask:",
"print",
"(",
"'------------Mask'",
")",
"aryLgcMsk",
"=",
"np",
".",
"greater",
"(",
"aryMask",
".",
"astype",
"(",
"np",
".",
"int16",
")",
",",
"np",
".",
"array",
"(",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"int16",
")",
"[",
"0",
"]",
")",
"aryTmpFunc",
"=",
"aryTmpFunc",
"[",
"aryLgcMsk",
",",
":",
"]",
"# save the mean of the run",
"lstFuncAvg",
".",
"append",
"(",
"np",
".",
"mean",
"(",
"aryTmpFunc",
",",
"axis",
"=",
"1",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
")",
"# also save the variance of the run",
"lstFuncVar",
".",
"append",
"(",
"np",
".",
"var",
"(",
"aryTmpFunc",
",",
"axis",
"=",
"1",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
")",
"# De-mean functional data:",
"if",
"strPrePro",
"==",
"'demean'",
":",
"print",
"(",
"'------------Demean'",
")",
"aryTmpFunc",
"=",
"np",
".",
"subtract",
"(",
"aryTmpFunc",
",",
"np",
".",
"mean",
"(",
"aryTmpFunc",
",",
"axis",
"=",
"1",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"[",
":",
",",
"None",
"]",
")",
"elif",
"strPrePro",
"==",
"'zscore'",
":",
"print",
"(",
"'------------Zscore'",
")",
"aryTmpFunc",
"=",
"np",
".",
"subtract",
"(",
"aryTmpFunc",
",",
"np",
".",
"mean",
"(",
"aryTmpFunc",
",",
"axis",
"=",
"1",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"[",
":",
",",
"None",
"]",
")",
"# Standardize the data time courses:",
"# In order to avoid devision by zero, only divide",
"# those voxels with a standard deviation greater",
"# than zero:",
"aryTmpStd",
"=",
"np",
".",
"std",
"(",
"aryTmpFunc",
",",
"axis",
"=",
"-",
"1",
")",
"aryTmpLgc",
"=",
"np",
".",
"greater",
"(",
"aryTmpStd",
",",
"np",
".",
"array",
"(",
"[",
"0.0",
"]",
")",
")",
"aryTmpFunc",
"[",
"aryTmpLgc",
",",
":",
"]",
"=",
"np",
".",
"divide",
"(",
"aryTmpFunc",
"[",
"aryTmpLgc",
",",
":",
"]",
",",
"aryTmpStd",
"[",
"aryTmpLgc",
",",
"None",
"]",
")",
"# Put prepared functional data of current run into list:",
"lstFunc",
".",
"append",
"(",
"aryTmpFunc",
")",
"del",
"(",
"aryTmpFunc",
")",
"# Put functional data from separate runs into one array. 2D array of the",
"# form aryFunc[voxelCount, time]",
"aryFunc",
"=",
"np",
".",
"concatenate",
"(",
"lstFunc",
",",
"axis",
"=",
"1",
")",
".",
"astype",
"(",
"np",
".",
"float32",
",",
"copy",
"=",
"False",
")",
"del",
"(",
"lstFunc",
")",
"# Put the averages (before demeaning) from the separate runs into one",
"# array. 2D array of the form aryFuncVar[voxelCount, nr of runs]",
"aryFuncAvg",
"=",
"np",
".",
"stack",
"(",
"lstFuncAvg",
",",
"axis",
"=",
"1",
")",
".",
"astype",
"(",
"np",
".",
"float32",
",",
"copy",
"=",
"False",
")",
"del",
"(",
"lstFuncAvg",
")",
"# Put the variance (after demeaning) from the separate runs into one array.",
"# 2D array of the form aryFuncVar[voxelCount, nr of runs]",
"aryFuncVar",
"=",
"np",
".",
"stack",
"(",
"lstFuncVar",
",",
"axis",
"=",
"1",
")",
".",
"astype",
"(",
"np",
".",
"float32",
",",
"copy",
"=",
"False",
")",
"del",
"(",
"lstFuncVar",
")",
"# Especially if data were recorded in different sessions, there can",
"# sometimes be voxels that have close to zero signal in runs from one",
"# session but regular signal in the runs from another session. These voxels",
"# are very few, are located at the edge of the functional and can cause",
"# problems during model fitting. They are therefore excluded.",
"# Is the mean greater than threshold?",
"aryLgcAvg",
"=",
"np",
".",
"greater",
"(",
"aryFuncAvg",
",",
"np",
".",
"array",
"(",
"[",
"varAvgThr",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"[",
"0",
"]",
")",
"# Mean needs to be greater than threshold in every single run",
"vecLgcAvg",
"=",
"np",
".",
"all",
"(",
"aryLgcAvg",
",",
"axis",
"=",
"1",
")",
"# Voxels that are outside the brain and have no, or very little, signal",
"# should not be included in the pRF model finding. We take the variance",
"# over time and exclude voxels with a suspiciously low variance, if they",
"# have low variance in at least one run. Because the data given into the",
"# cython or GPU function has float32 precision, we calculate the variance",
"# on data with float32 precision.",
"# Is the variance greater than threshold?",
"aryLgcVar",
"=",
"np",
".",
"greater",
"(",
"aryFuncVar",
",",
"np",
".",
"array",
"(",
"[",
"varVarThr",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"[",
"0",
"]",
")",
"# Variance needs to be greater than threshold in every single run",
"vecLgcVar",
"=",
"np",
".",
"all",
"(",
"aryLgcVar",
",",
"axis",
"=",
"1",
")",
"# Are there any nan values in the functional time series?",
"vecLgcNan",
"=",
"np",
".",
"invert",
"(",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"aryFunc",
")",
",",
"axis",
"=",
"1",
")",
")",
"# combine the logical vectors for exclusion resulting from low variance and",
"# low mean signal time course",
"vecLgcIncl",
"=",
"np",
".",
"logical_and",
"(",
"vecLgcAvg",
",",
"vecLgcVar",
")",
"# combine logical vectors for mean/variance with vector for nan exclsion",
"vecLgcIncl",
"=",
"np",
".",
"logical_and",
"(",
"vecLgcIncl",
",",
"vecLgcNan",
")",
"# Array with functional data for which conditions (mask inclusion and",
"# cutoff value) are fullfilled:",
"aryFunc",
"=",
"aryFunc",
"[",
"vecLgcIncl",
",",
":",
"]",
"# print info about the exclusion of voxels",
"print",
"(",
"'---------Minimum mean threshold for voxels applied at: '",
"+",
"str",
"(",
"varAvgThr",
")",
")",
"print",
"(",
"'---------Minimum variance threshold for voxels applied at: '",
"+",
"str",
"(",
"varVarThr",
")",
")",
"print",
"(",
"'---------Number of voxels excluded due to low mean or variance: '",
"+",
"str",
"(",
"np",
".",
"sum",
"(",
"np",
".",
"invert",
"(",
"vecLgcIncl",
")",
")",
")",
")",
"return",
"aryLgcMsk",
",",
"vecLgcIncl",
",",
"hdrMsk",
",",
"aryAff",
",",
"aryFunc",
",",
"tplNiiShp"
] | Load & prepare functional data.
Parameters
----------
strPathNiiMask: str
Path to mask used to restrict pRF model finding. Only voxels with
a value greater than zero in the mask are considered.
lstPathNiiFunc : list
List of paths of functional data (nii files).
varAvgThr : float, positive, default = 100.
Float. Voxels that have at least one run with a mean lower than this
(before demeaning) will be excluded from model fitting.
varVarThr : float, positive, default = 0.0001
Float. Voxels that have at least one run with a variance lower than
this (after demeaning) will be excluded from model fitting.
strPrePro : string, default 'demean'
Preprocessing that will be applied to the data.
By default they are demeaned.
Returns
-------
aryLgcMsk : np.array
3D numpy array with logial values. Externally supplied mask (e.g grey
matter mask). Voxels that are `False` in the mask are excluded.
vecLgcIncl : np.array
1D numpy array containing logical values. One value per voxel after
mask has been applied. If `True`, the variance and mean of the voxel's
time course are greater than the provided thresholds in all runs and
the voxel is included in the output array (`aryFunc`). If `False`, the
variance or mean of the voxel's time course is lower than threshold in
at least one run and the voxel has been excluded from the output
(`aryFunc`). This is to avoid problems in the subsequent model fitting.
This array is necessary to put results into original dimensions after
model fitting.
hdrMsk : nibabel-header-object
Nii header of mask.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of mask nii data.
aryFunc : np.array
2D numpy array containing prepared functional data, of the form
aryFunc[voxelCount, time].
tplNiiShp : tuple
Spatial dimensions of input nii data (number of voxels in x, y, z
direction). The data are reshaped during preparation, this
information is needed to fit final output into original spatial
dimensions.
Notes
-----
Functional data is loaded from disk. The functional data is reshaped, into
the form aryFunc[voxel, time]. A mask is applied (externally supplied, e.g.
a grey matter mask). Subsequently, the functional data is de-meaned. | [
"Load",
"&",
"prepare",
"functional",
"data",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/prepare.py#L122-L328 |
fvdsn/py-xml-escpos | xmlescpos/escpos.py | StyleStack.get | def get(self,style):
""" what's the value of a style at the current stack level"""
level = len(self.stack) -1
while level >= 0:
if style in self.stack[level]:
return self.stack[level][style]
else:
level = level - 1
return None | python | def get(self,style):
""" what's the value of a style at the current stack level"""
level = len(self.stack) -1
while level >= 0:
if style in self.stack[level]:
return self.stack[level][style]
else:
level = level - 1
return None | [
"def",
"get",
"(",
"self",
",",
"style",
")",
":",
"level",
"=",
"len",
"(",
"self",
".",
"stack",
")",
"-",
"1",
"while",
"level",
">=",
"0",
":",
"if",
"style",
"in",
"self",
".",
"stack",
"[",
"level",
"]",
":",
"return",
"self",
".",
"stack",
"[",
"level",
"]",
"[",
"style",
"]",
"else",
":",
"level",
"=",
"level",
"-",
"1",
"return",
"None"
] | what's the value of a style at the current stack level | [
"what",
"s",
"the",
"value",
"of",
"a",
"style",
"at",
"the",
"current",
"stack",
"level"
] | train | https://github.com/fvdsn/py-xml-escpos/blob/7f77e039c960d5773fb919aed02ba392dccbc360/xmlescpos/escpos.py#L124-L132 |
fvdsn/py-xml-escpos | xmlescpos/escpos.py | StyleStack.enforce_type | def enforce_type(self, attr, val):
"""converts a value to the attribute's type"""
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val) | python | def enforce_type(self, attr, val):
"""converts a value to the attribute's type"""
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val) | [
"def",
"enforce_type",
"(",
"self",
",",
"attr",
",",
"val",
")",
":",
"if",
"not",
"attr",
"in",
"self",
".",
"types",
":",
"return",
"utfstr",
"(",
"val",
")",
"elif",
"self",
".",
"types",
"[",
"attr",
"]",
"==",
"'int'",
":",
"return",
"int",
"(",
"float",
"(",
"val",
")",
")",
"elif",
"self",
".",
"types",
"[",
"attr",
"]",
"==",
"'float'",
":",
"return",
"float",
"(",
"val",
")",
"else",
":",
"return",
"utfstr",
"(",
"val",
")"
] | converts a value to the attribute's type | [
"converts",
"a",
"value",
"to",
"the",
"attribute",
"s",
"type"
] | train | https://github.com/fvdsn/py-xml-escpos/blob/7f77e039c960d5773fb919aed02ba392dccbc360/xmlescpos/escpos.py#L134-L143 |
fvdsn/py-xml-escpos | xmlescpos/escpos.py | StyleStack.push | def push(self, style={}):
"""push a new level on the stack with a style dictionnary containing style:value pairs"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
_style[attr] = self.enforce_type(attr, style[attr])
self.stack.append(_style) | python | def push(self, style={}):
"""push a new level on the stack with a style dictionnary containing style:value pairs"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
_style[attr] = self.enforce_type(attr, style[attr])
self.stack.append(_style) | [
"def",
"push",
"(",
"self",
",",
"style",
"=",
"{",
"}",
")",
":",
"_style",
"=",
"{",
"}",
"for",
"attr",
"in",
"style",
":",
"if",
"attr",
"in",
"self",
".",
"cmds",
"and",
"not",
"style",
"[",
"attr",
"]",
"in",
"self",
".",
"cmds",
"[",
"attr",
"]",
":",
"print",
"'WARNING: ESC/POS PRINTING: ignoring invalid value: '",
"+",
"utfstr",
"(",
"style",
"[",
"attr",
"]",
")",
"+",
"' for style: '",
"+",
"utfstr",
"(",
"attr",
")",
"else",
":",
"_style",
"[",
"attr",
"]",
"=",
"self",
".",
"enforce_type",
"(",
"attr",
",",
"style",
"[",
"attr",
"]",
")",
"self",
".",
"stack",
".",
"append",
"(",
"_style",
")"
] | push a new level on the stack with a style dictionnary containing style:value pairs | [
"push",
"a",
"new",
"level",
"on",
"the",
"stack",
"with",
"a",
"style",
"dictionnary",
"containing",
"style",
":",
"value",
"pairs"
] | train | https://github.com/fvdsn/py-xml-escpos/blob/7f77e039c960d5773fb919aed02ba392dccbc360/xmlescpos/escpos.py#L145-L153 |
fvdsn/py-xml-escpos | xmlescpos/escpos.py | StyleStack.set | def set(self, style={}):
"""overrides style values at the current stack level"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
self.stack[-1][attr] = self.enforce_type(attr, style[attr]) | python | def set(self, style={}):
"""overrides style values at the current stack level"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
self.stack[-1][attr] = self.enforce_type(attr, style[attr]) | [
"def",
"set",
"(",
"self",
",",
"style",
"=",
"{",
"}",
")",
":",
"_style",
"=",
"{",
"}",
"for",
"attr",
"in",
"style",
":",
"if",
"attr",
"in",
"self",
".",
"cmds",
"and",
"not",
"style",
"[",
"attr",
"]",
"in",
"self",
".",
"cmds",
"[",
"attr",
"]",
":",
"print",
"'WARNING: ESC/POS PRINTING: ignoring invalid value: '",
"+",
"utfstr",
"(",
"style",
"[",
"attr",
"]",
")",
"+",
"' for style: '",
"+",
"utfstr",
"(",
"attr",
")",
"else",
":",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"[",
"attr",
"]",
"=",
"self",
".",
"enforce_type",
"(",
"attr",
",",
"style",
"[",
"attr",
"]",
")"
] | overrides style values at the current stack level | [
"overrides",
"style",
"values",
"at",
"the",
"current",
"stack",
"level"
] | train | https://github.com/fvdsn/py-xml-escpos/blob/7f77e039c960d5773fb919aed02ba392dccbc360/xmlescpos/escpos.py#L155-L162 |
fvdsn/py-xml-escpos | xmlescpos/escpos.py | StyleStack.to_escpos | def to_escpos(self):
""" converts the current style to an escpos command string """
cmd = ''
ordered_cmds = self.cmds.keys()
ordered_cmds.sort(lambda x,y: cmp(self.cmds[x]['_order'], self.cmds[y]['_order']))
for style in ordered_cmds:
cmd += self.cmds[style][self.get(style)]
return cmd | python | def to_escpos(self):
""" converts the current style to an escpos command string """
cmd = ''
ordered_cmds = self.cmds.keys()
ordered_cmds.sort(lambda x,y: cmp(self.cmds[x]['_order'], self.cmds[y]['_order']))
for style in ordered_cmds:
cmd += self.cmds[style][self.get(style)]
return cmd | [
"def",
"to_escpos",
"(",
"self",
")",
":",
"cmd",
"=",
"''",
"ordered_cmds",
"=",
"self",
".",
"cmds",
".",
"keys",
"(",
")",
"ordered_cmds",
".",
"sort",
"(",
"lambda",
"x",
",",
"y",
":",
"cmp",
"(",
"self",
".",
"cmds",
"[",
"x",
"]",
"[",
"'_order'",
"]",
",",
"self",
".",
"cmds",
"[",
"y",
"]",
"[",
"'_order'",
"]",
")",
")",
"for",
"style",
"in",
"ordered_cmds",
":",
"cmd",
"+=",
"self",
".",
"cmds",
"[",
"style",
"]",
"[",
"self",
".",
"get",
"(",
"style",
")",
"]",
"return",
"cmd"
] | converts the current style to an escpos command string | [
"converts",
"the",
"current",
"style",
"to",
"an",
"escpos",
"command",
"string"
] | train | https://github.com/fvdsn/py-xml-escpos/blob/7f77e039c960d5773fb919aed02ba392dccbc360/xmlescpos/escpos.py#L169-L176 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.