Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
8,700 | bcbnz/pylabels | labels/sheet.py | Sheet.preview | def preview(self, page, filelike, format='png', dpi=72, background_colour=0xFFFFFF):
"""Render a preview image of a page.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
filelike: path or file-like object
Can be a filename as a string, a Python file object, or something
which behaves like a Python file object. For example, if you were
using the Django web framework, an HttpResponse object could be
passed to render the preview to the browser (as long as you remember
to set the mimetype of the response). If you pass a filename, the
existing contents will be overwritten.
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid.
"""
# Check the page number.
if page < 1 or page > self.page_count:
raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count))
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Rendering to an image (as opposed to a PDF) requires any background
# to have an integer width and height if it is a ReportLab Image
# object. Drawing objects are exempt from this.
oldw, oldh = None, None
if isinstance(self._bgimage, Image):
oldw, oldh = self._bgimage.width, self._bgimage.height
self._bgimage.width = int(oldw) + 1
self._bgimage.height = int(oldh) + 1
# Let ReportLab do the heavy lifting.
renderPM.drawToFile(self._pages[page-1], filelike, format, dpi, background_colour)
# Restore the size of the background image if we changed it.
if oldw:
self._bgimage.width = oldw
self._bgimage.height = oldh | python | def preview(self, page, filelike, format='png', dpi=72, background_colour=0xFFFFFF):
"""Render a preview image of a page.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
filelike: path or file-like object
Can be a filename as a string, a Python file object, or something
which behaves like a Python file object. For example, if you were
using the Django web framework, an HttpResponse object could be
passed to render the preview to the browser (as long as you remember
to set the mimetype of the response). If you pass a filename, the
existing contents will be overwritten.
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid.
"""
# Check the page number.
if page < 1 or page > self.page_count:
raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count))
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Rendering to an image (as opposed to a PDF) requires any background
# to have an integer width and height if it is a ReportLab Image
# object. Drawing objects are exempt from this.
oldw, oldh = None, None
if isinstance(self._bgimage, Image):
oldw, oldh = self._bgimage.width, self._bgimage.height
self._bgimage.width = int(oldw) + 1
self._bgimage.height = int(oldh) + 1
# Let ReportLab do the heavy lifting.
renderPM.drawToFile(self._pages[page-1], filelike, format, dpi, background_colour)
# Restore the size of the background image if we changed it.
if oldw:
self._bgimage.width = oldw
self._bgimage.height = oldh | ['def', 'preview', '(', 'self', ',', 'page', ',', 'filelike', ',', 'format', '=', "'png'", ',', 'dpi', '=', '72', ',', 'background_colour', '=', '0xFFFFFF', ')', ':', '# Check the page number.', 'if', 'page', '<', '1', 'or', 'page', '>', 'self', '.', 'page_count', ':', 'raise', 'ValueError', '(', '"Invalid page number; should be between 1 and {0:d}."', '.', 'format', '(', 'self', '.', 'page_count', ')', ')', '# Shade any remaining missing labels if desired.', 'self', '.', '_shade_remaining_missing', '(', ')', '# Rendering to an image (as opposed to a PDF) requires any background', '# to have an integer width and height if it is a ReportLab Image', '# object. Drawing objects are exempt from this.', 'oldw', ',', 'oldh', '=', 'None', ',', 'None', 'if', 'isinstance', '(', 'self', '.', '_bgimage', ',', 'Image', ')', ':', 'oldw', ',', 'oldh', '=', 'self', '.', '_bgimage', '.', 'width', ',', 'self', '.', '_bgimage', '.', 'height', 'self', '.', '_bgimage', '.', 'width', '=', 'int', '(', 'oldw', ')', '+', '1', 'self', '.', '_bgimage', '.', 'height', '=', 'int', '(', 'oldh', ')', '+', '1', '# Let ReportLab do the heavy lifting.', 'renderPM', '.', 'drawToFile', '(', 'self', '.', '_pages', '[', 'page', '-', '1', ']', ',', 'filelike', ',', 'format', ',', 'dpi', ',', 'background_colour', ')', '# Restore the size of the background image if we changed it.', 'if', 'oldw', ':', 'self', '.', '_bgimage', '.', 'width', '=', 'oldw', 'self', '.', '_bgimage', '.', 'height', '=', 'oldh'] | Render a preview image of a page.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
filelike: path or file-like object
Can be a filename as a string, a Python file object, or something
which behaves like a Python file object. For example, if you were
using the Django web framework, an HttpResponse object could be
passed to render the preview to the browser (as long as you remember
to set the mimetype of the response). If you pass a filename, the
existing contents will be overwritten.
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid. | ['Render', 'a', 'preview', 'image', 'of', 'a', 'page', '.'] | train | https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L494-L553 |
8,701 | hardbyte/python-can | can/interfaces/systec/ucan.py | UcanServer.get_hardware_info | def get_hardware_info(self):
"""
Returns the extended hardware information of a device. With multi-channel USB-CANmoduls the information for
both CAN channels are returned separately.
:return:
Tuple with extended hardware information structure (see structure :class:`HardwareInfoEx`) and
structures with information of CAN channel 0 and 1 (see structure :class:`ChannelInfo`).
:rtype: tuple(HardwareInfoEx, ChannelInfo, ChannelInfo)
"""
hw_info_ex = HardwareInfoEx()
can_info_ch0, can_info_ch1 = ChannelInfo(), ChannelInfo()
UcanGetHardwareInfoEx2(self._handle, byref(hw_info_ex), byref(can_info_ch0), byref(can_info_ch1))
return hw_info_ex, can_info_ch0, can_info_ch1 | python | def get_hardware_info(self):
"""
Returns the extended hardware information of a device. With multi-channel USB-CANmoduls the information for
both CAN channels are returned separately.
:return:
Tuple with extended hardware information structure (see structure :class:`HardwareInfoEx`) and
structures with information of CAN channel 0 and 1 (see structure :class:`ChannelInfo`).
:rtype: tuple(HardwareInfoEx, ChannelInfo, ChannelInfo)
"""
hw_info_ex = HardwareInfoEx()
can_info_ch0, can_info_ch1 = ChannelInfo(), ChannelInfo()
UcanGetHardwareInfoEx2(self._handle, byref(hw_info_ex), byref(can_info_ch0), byref(can_info_ch1))
return hw_info_ex, can_info_ch0, can_info_ch1 | ['def', 'get_hardware_info', '(', 'self', ')', ':', 'hw_info_ex', '=', 'HardwareInfoEx', '(', ')', 'can_info_ch0', ',', 'can_info_ch1', '=', 'ChannelInfo', '(', ')', ',', 'ChannelInfo', '(', ')', 'UcanGetHardwareInfoEx2', '(', 'self', '.', '_handle', ',', 'byref', '(', 'hw_info_ex', ')', ',', 'byref', '(', 'can_info_ch0', ')', ',', 'byref', '(', 'can_info_ch1', ')', ')', 'return', 'hw_info_ex', ',', 'can_info_ch0', ',', 'can_info_ch1'] | Returns the extended hardware information of a device. With multi-channel USB-CANmoduls the information for
both CAN channels are returned separately.
:return:
Tuple with extended hardware information structure (see structure :class:`HardwareInfoEx`) and
structures with information of CAN channel 0 and 1 (see structure :class:`ChannelInfo`).
:rtype: tuple(HardwareInfoEx, ChannelInfo, ChannelInfo) | ['Returns', 'the', 'extended', 'hardware', 'information', 'of', 'a', 'device', '.', 'With', 'multi', '-', 'channel', 'USB', '-', 'CANmoduls', 'the', 'information', 'for', 'both', 'CAN', 'channels', 'are', 'returned', 'separately', '.'] | train | https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/systec/ucan.py#L484-L497 |
8,702 | af/turrentine | turrentine/views.py | PageView._mark_html_fields_as_safe | def _mark_html_fields_as_safe(self, page):
"""
Mark the html content as safe so we don't have to use the safe
template tag in all cms templates:
"""
page.title = mark_safe(page.title)
page.content = mark_safe(page.content)
return page | python | def _mark_html_fields_as_safe(self, page):
"""
Mark the html content as safe so we don't have to use the safe
template tag in all cms templates:
"""
page.title = mark_safe(page.title)
page.content = mark_safe(page.content)
return page | ['def', '_mark_html_fields_as_safe', '(', 'self', ',', 'page', ')', ':', 'page', '.', 'title', '=', 'mark_safe', '(', 'page', '.', 'title', ')', 'page', '.', 'content', '=', 'mark_safe', '(', 'page', '.', 'content', ')', 'return', 'page'] | Mark the html content as safe so we don't have to use the safe
template tag in all cms templates: | ['Mark', 'the', 'html', 'content', 'as', 'safe', 'so', 'we', 'don', 't', 'have', 'to', 'use', 'the', 'safe', 'template', 'tag', 'in', 'all', 'cms', 'templates', ':'] | train | https://github.com/af/turrentine/blob/bbbd5139744ccc6264595cc8960784e5c308c009/turrentine/views.py#L91-L98 |
8,703 | hydraplatform/hydra-base | hydra_base/lib/users.py | get_perm_by_code | def get_perm_by_code(perm_code,**kwargs):
"""
Get a permission by its code
"""
try:
perm = db.DBSession.query(Perm).filter(Perm.code==perm_code).one()
return perm
except NoResultFound:
raise ResourceNotFoundError("Permission not found (perm_code={})".format(perm_code)) | python | def get_perm_by_code(perm_code,**kwargs):
"""
Get a permission by its code
"""
try:
perm = db.DBSession.query(Perm).filter(Perm.code==perm_code).one()
return perm
except NoResultFound:
raise ResourceNotFoundError("Permission not found (perm_code={})".format(perm_code)) | ['def', 'get_perm_by_code', '(', 'perm_code', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'perm', '=', 'db', '.', 'DBSession', '.', 'query', '(', 'Perm', ')', '.', 'filter', '(', 'Perm', '.', 'code', '==', 'perm_code', ')', '.', 'one', '(', ')', 'return', 'perm', 'except', 'NoResultFound', ':', 'raise', 'ResourceNotFoundError', '(', '"Permission not found (perm_code={})"', '.', 'format', '(', 'perm_code', ')', ')'] | Get a permission by its code | ['Get', 'a', 'permission', 'by', 'its', 'code'] | train | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/users.py#L447-L456 |
8,704 | ninuxorg/nodeshot | nodeshot/community/profiles/serializers.py | ChangePasswordSerializer.validate_current_password | def validate_current_password(self, value):
""" current password check """
if self.instance and self.instance.has_usable_password() and not self.instance.check_password(value):
raise serializers.ValidationError(_('Current password is not correct'))
return value | python | def validate_current_password(self, value):
""" current password check """
if self.instance and self.instance.has_usable_password() and not self.instance.check_password(value):
raise serializers.ValidationError(_('Current password is not correct'))
return value | ['def', 'validate_current_password', '(', 'self', ',', 'value', ')', ':', 'if', 'self', '.', 'instance', 'and', 'self', '.', 'instance', '.', 'has_usable_password', '(', ')', 'and', 'not', 'self', '.', 'instance', '.', 'check_password', '(', 'value', ')', ':', 'raise', 'serializers', '.', 'ValidationError', '(', '_', '(', "'Current password is not correct'", ')', ')', 'return', 'value'] | current password check | ['current', 'password', 'check'] | train | https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/community/profiles/serializers.py#L264-L268 |
8,705 | josuebrunel/myql | myql/contrib/finance/stockscraper/stockretriever.py | StockRetriever.get_historical_info | def get_historical_info(self, symbol,items=None, startDate=None, endDate=None, limit=None):
"""get_historical_info() uses the csv datatable to retrieve all available historical data on a typical historical prices page
"""
startDate, endDate = self.__get_time_range(startDate, endDate)
response = self.select('yahoo.finance.historicaldata',items,limit).where(['symbol','=',symbol],['startDate','=',startDate],['endDate','=',endDate])
return response | python | def get_historical_info(self, symbol,items=None, startDate=None, endDate=None, limit=None):
"""get_historical_info() uses the csv datatable to retrieve all available historical data on a typical historical prices page
"""
startDate, endDate = self.__get_time_range(startDate, endDate)
response = self.select('yahoo.finance.historicaldata',items,limit).where(['symbol','=',symbol],['startDate','=',startDate],['endDate','=',endDate])
return response | ['def', 'get_historical_info', '(', 'self', ',', 'symbol', ',', 'items', '=', 'None', ',', 'startDate', '=', 'None', ',', 'endDate', '=', 'None', ',', 'limit', '=', 'None', ')', ':', 'startDate', ',', 'endDate', '=', 'self', '.', '__get_time_range', '(', 'startDate', ',', 'endDate', ')', 'response', '=', 'self', '.', 'select', '(', "'yahoo.finance.historicaldata'", ',', 'items', ',', 'limit', ')', '.', 'where', '(', '[', "'symbol'", ',', "'='", ',', 'symbol', ']', ',', '[', "'startDate'", ',', "'='", ',', 'startDate', ']', ',', '[', "'endDate'", ',', "'='", ',', 'endDate', ']', ')', 'return', 'response'] | get_historical_info() uses the csv datatable to retrieve all available historical data on a typical historical prices page | ['get_historical_info', '()', 'uses', 'the', 'csv', 'datatable', 'to', 'retrieve', 'all', 'available', 'historical', 'data', 'on', 'a', 'typical', 'historical', 'prices', 'page'] | train | https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/finance/stockscraper/stockretriever.py#L49-L54 |
8,706 | linkedin/Zopkio | zopkio/utils.py | load_module | def load_module(filename):
"""
Loads a module by filename
"""
basename = os.path.basename(filename)
path = os.path.dirname(filename)
sys.path.append(path)
# TODO(tlan) need to figure out how to handle errors thrown here
return __import__(os.path.splitext(basename)[0]) | python | def load_module(filename):
"""
Loads a module by filename
"""
basename = os.path.basename(filename)
path = os.path.dirname(filename)
sys.path.append(path)
# TODO(tlan) need to figure out how to handle errors thrown here
return __import__(os.path.splitext(basename)[0]) | ['def', 'load_module', '(', 'filename', ')', ':', 'basename', '=', 'os', '.', 'path', '.', 'basename', '(', 'filename', ')', 'path', '=', 'os', '.', 'path', '.', 'dirname', '(', 'filename', ')', 'sys', '.', 'path', '.', 'append', '(', 'path', ')', '# TODO(tlan) need to figure out how to handle errors thrown here', 'return', '__import__', '(', 'os', '.', 'path', '.', 'splitext', '(', 'basename', ')', '[', '0', ']', ')'] | Loads a module by filename | ['Loads', 'a', 'module', 'by', 'filename'] | train | https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/utils.py#L58-L66 |
8,707 | jeffrimko/Auxly | lib/auxly/shell.py | silent | def silent(cmd, **kwargs):
"""Calls the given shell command. Output will not be displayed. Returns the
status code.
**Examples**:
::
auxly.shell.silent("ls")
"""
return call(cmd, shell=True, stdout=NULL, stderr=NULL, **kwargs) | python | def silent(cmd, **kwargs):
"""Calls the given shell command. Output will not be displayed. Returns the
status code.
**Examples**:
::
auxly.shell.silent("ls")
"""
return call(cmd, shell=True, stdout=NULL, stderr=NULL, **kwargs) | ['def', 'silent', '(', 'cmd', ',', '*', '*', 'kwargs', ')', ':', 'return', 'call', '(', 'cmd', ',', 'shell', '=', 'True', ',', 'stdout', '=', 'NULL', ',', 'stderr', '=', 'NULL', ',', '*', '*', 'kwargs', ')'] | Calls the given shell command. Output will not be displayed. Returns the
status code.
**Examples**:
::
auxly.shell.silent("ls") | ['Calls', 'the', 'given', 'shell', 'command', '.', 'Output', 'will', 'not', 'be', 'displayed', '.', 'Returns', 'the', 'status', 'code', '.'] | train | https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/shell.py#L32-L40 |
8,708 | CSchoel/nolds | nolds/datasets.py | load_qrandom | def load_qrandom():
"""
Loads a set of 10000 random numbers generated by qrandom.
This dataset can be used when you want to do some limited tests with "true"
random data without an internet connection.
Returns:
int array
the dataset
"""
fname = "datasets/qrandom.npy"
with pkg_resources.resource_stream(__name__, fname) as f:
return np.load(f) | python | def load_qrandom():
"""
Loads a set of 10000 random numbers generated by qrandom.
This dataset can be used when you want to do some limited tests with "true"
random data without an internet connection.
Returns:
int array
the dataset
"""
fname = "datasets/qrandom.npy"
with pkg_resources.resource_stream(__name__, fname) as f:
return np.load(f) | ['def', 'load_qrandom', '(', ')', ':', 'fname', '=', '"datasets/qrandom.npy"', 'with', 'pkg_resources', '.', 'resource_stream', '(', '__name__', ',', 'fname', ')', 'as', 'f', ':', 'return', 'np', '.', 'load', '(', 'f', ')'] | Loads a set of 10000 random numbers generated by qrandom.
This dataset can be used when you want to do some limited tests with "true"
random data without an internet connection.
Returns:
int array
the dataset | ['Loads', 'a', 'set', 'of', '10000', 'random', 'numbers', 'generated', 'by', 'qrandom', '.'] | train | https://github.com/CSchoel/nolds/blob/8a5ecc472d67ac08b571bd68967287668ca9058e/nolds/datasets.py#L87-L100 |
8,709 | baverman/supplement | supplement/remote.py | Environment.get_location | def get_location(self, project_path, source, position, filename):
"""Return line number and file path where name under cursor is defined
If line is None location wasn't finded. If file path is None, defenition is located in
the same source.
:param project_path: absolute project path
:param source: unicode or byte string code source
:param position: character or byte cursor position
:param filename: absolute path of file with source code
:returns: tuple (lineno, file path)
"""
return self._call('get_location', project_path, source, position, filename) | python | def get_location(self, project_path, source, position, filename):
"""Return line number and file path where name under cursor is defined
If line is None location wasn't finded. If file path is None, defenition is located in
the same source.
:param project_path: absolute project path
:param source: unicode or byte string code source
:param position: character or byte cursor position
:param filename: absolute path of file with source code
:returns: tuple (lineno, file path)
"""
return self._call('get_location', project_path, source, position, filename) | ['def', 'get_location', '(', 'self', ',', 'project_path', ',', 'source', ',', 'position', ',', 'filename', ')', ':', 'return', 'self', '.', '_call', '(', "'get_location'", ',', 'project_path', ',', 'source', ',', 'position', ',', 'filename', ')'] | Return line number and file path where name under cursor is defined
If line is None location wasn't finded. If file path is None, defenition is located in
the same source.
:param project_path: absolute project path
:param source: unicode or byte string code source
:param position: character or byte cursor position
:param filename: absolute path of file with source code
:returns: tuple (lineno, file path) | ['Return', 'line', 'number', 'and', 'file', 'path', 'where', 'name', 'under', 'cursor', 'is', 'defined'] | train | https://github.com/baverman/supplement/blob/955002fe5a5749c9f0d89002f0006ec4fcd35bc9/supplement/remote.py#L125-L137 |
8,710 | noahbenson/neuropythy | neuropythy/freesurfer/core.py | to_subject_paths | def to_subject_paths(paths):
'''
to_subject_paths(paths) accepts either a string that is a :-separated list of directories or a
list of directories and yields a list of all the existing directories.
'''
if paths is None: return []
if pimms.is_str(paths): paths = paths.split(':')
paths = [os.path.expanduser(p) for p in paths]
return [p for p in paths if os.path.isdir(p)] | python | def to_subject_paths(paths):
'''
to_subject_paths(paths) accepts either a string that is a :-separated list of directories or a
list of directories and yields a list of all the existing directories.
'''
if paths is None: return []
if pimms.is_str(paths): paths = paths.split(':')
paths = [os.path.expanduser(p) for p in paths]
return [p for p in paths if os.path.isdir(p)] | ['def', 'to_subject_paths', '(', 'paths', ')', ':', 'if', 'paths', 'is', 'None', ':', 'return', '[', ']', 'if', 'pimms', '.', 'is_str', '(', 'paths', ')', ':', 'paths', '=', 'paths', '.', 'split', '(', "':'", ')', 'paths', '=', '[', 'os', '.', 'path', '.', 'expanduser', '(', 'p', ')', 'for', 'p', 'in', 'paths', ']', 'return', '[', 'p', 'for', 'p', 'in', 'paths', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'p', ')', ']'] | to_subject_paths(paths) accepts either a string that is a :-separated list of directories or a
list of directories and yields a list of all the existing directories. | ['to_subject_paths', '(', 'paths', ')', 'accepts', 'either', 'a', 'string', 'that', 'is', 'a', ':', '-', 'separated', 'list', 'of', 'directories', 'or', 'a', 'list', 'of', 'directories', 'and', 'yields', 'a', 'list', 'of', 'all', 'the', 'existing', 'directories', '.'] | train | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/freesurfer/core.py#L22-L30 |
8,711 | lazygunner/xunleipy | xunleipy/rsa_lib.py | extendedEuclid | def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y | python | def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y | ['def', 'extendedEuclid', '(', 'a', ',', 'b', ')', ':', 'if', 'a', '==', '0', ':', 'return', 'b', ',', '0', ',', '1', 'else', ':', 'g', ',', 'y', ',', 'x', '=', 'extendedEuclid', '(', 'b', '%', 'a', ',', 'a', ')', 'return', 'g', ',', 'x', '-', '(', 'b', '//', 'a', ')', '*', 'y', ',', 'y'] | return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b | ['return', 'a', 'tuple', 'of', 'three', 'values', ':', 'x', 'y', 'and', 'z', 'such', 'that', 'x', 'is', 'the', 'GCD', 'of', 'a', 'and', 'b', 'and', 'x', '=', 'y', '*', 'a', '+', 'z', '*', 'b'] | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L33-L40 |
8,712 | toumorokoshi/sprinter | sprinter/core/globals.py | load_global_config | def load_global_config(config_path):
""" Load a global configuration object, and query for any required variables along the way """
config = configparser.RawConfigParser()
if os.path.exists(config_path):
logger.debug("Checking and setting global parameters...")
config.read(config_path)
else:
_initial_run()
logger.info("Unable to find a global sprinter configuration!")
logger.info("Creating one now. Please answer some questions" +
" about what you would like sprinter to do.")
logger.info("")
# checks and sets sections
if not config.has_section('global'):
config.add_section('global')
configure_config(config)
write_config(config, config_path)
return config | python | def load_global_config(config_path):
""" Load a global configuration object, and query for any required variables along the way """
config = configparser.RawConfigParser()
if os.path.exists(config_path):
logger.debug("Checking and setting global parameters...")
config.read(config_path)
else:
_initial_run()
logger.info("Unable to find a global sprinter configuration!")
logger.info("Creating one now. Please answer some questions" +
" about what you would like sprinter to do.")
logger.info("")
# checks and sets sections
if not config.has_section('global'):
config.add_section('global')
configure_config(config)
write_config(config, config_path)
return config | ['def', 'load_global_config', '(', 'config_path', ')', ':', 'config', '=', 'configparser', '.', 'RawConfigParser', '(', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'config_path', ')', ':', 'logger', '.', 'debug', '(', '"Checking and setting global parameters..."', ')', 'config', '.', 'read', '(', 'config_path', ')', 'else', ':', '_initial_run', '(', ')', 'logger', '.', 'info', '(', '"Unable to find a global sprinter configuration!"', ')', 'logger', '.', 'info', '(', '"Creating one now. Please answer some questions"', '+', '" about what you would like sprinter to do."', ')', 'logger', '.', 'info', '(', '""', ')', '# checks and sets sections', 'if', 'not', 'config', '.', 'has_section', '(', "'global'", ')', ':', 'config', '.', 'add_section', '(', "'global'", ')', 'configure_config', '(', 'config', ')', 'write_config', '(', 'config', ',', 'config_path', ')', 'return', 'config'] | Load a global configuration object, and query for any required variables along the way | ['Load', 'a', 'global', 'configuration', 'object', 'and', 'query', 'for', 'any', 'required', 'variables', 'along', 'the', 'way'] | train | https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/core/globals.py#L34-L53 |
8,713 | PythonCharmers/python-future | src/future/utils/__init__.py | _get_caller_globals_and_locals | def _get_caller_globals_and_locals():
"""
Returns the globals and locals of the calling frame.
Is there an alternative to frame hacking here?
"""
caller_frame = inspect.stack()[2]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
return myglobals, mylocals | python | def _get_caller_globals_and_locals():
"""
Returns the globals and locals of the calling frame.
Is there an alternative to frame hacking here?
"""
caller_frame = inspect.stack()[2]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
return myglobals, mylocals | ['def', '_get_caller_globals_and_locals', '(', ')', ':', 'caller_frame', '=', 'inspect', '.', 'stack', '(', ')', '[', '2', ']', 'myglobals', '=', 'caller_frame', '[', '0', ']', '.', 'f_globals', 'mylocals', '=', 'caller_frame', '[', '0', ']', '.', 'f_locals', 'return', 'myglobals', ',', 'mylocals'] | Returns the globals and locals of the calling frame.
Is there an alternative to frame hacking here? | ['Returns', 'the', 'globals', 'and', 'locals', 'of', 'the', 'calling', 'frame', '.'] | train | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/utils/__init__.py#L360-L369 |
8,714 | sourceperl/pyModbusTCP | pyModbusTCP/utils.py | word_list_to_long | def word_list_to_long(val_list, big_endian=True):
"""Word list (16 bits int) to long list (32 bits int)
By default word_list_to_long() use big endian order. For use little endian, set
big_endian param to False.
:param val_list: list of 16 bits int value
:type val_list: list
:param big_endian: True for big endian/False for little (optional)
:type big_endian: bool
:returns: list of 32 bits int value
:rtype: list
"""
# allocate list for long int
long_list = [None] * int(len(val_list) / 2)
# fill registers list with register items
for i, item in enumerate(long_list):
if big_endian:
long_list[i] = (val_list[i * 2] << 16) + val_list[(i * 2) + 1]
else:
long_list[i] = (val_list[(i * 2) + 1] << 16) + val_list[i * 2]
# return long list
return long_list | python | def word_list_to_long(val_list, big_endian=True):
"""Word list (16 bits int) to long list (32 bits int)
By default word_list_to_long() use big endian order. For use little endian, set
big_endian param to False.
:param val_list: list of 16 bits int value
:type val_list: list
:param big_endian: True for big endian/False for little (optional)
:type big_endian: bool
:returns: list of 32 bits int value
:rtype: list
"""
# allocate list for long int
long_list = [None] * int(len(val_list) / 2)
# fill registers list with register items
for i, item in enumerate(long_list):
if big_endian:
long_list[i] = (val_list[i * 2] << 16) + val_list[(i * 2) + 1]
else:
long_list[i] = (val_list[(i * 2) + 1] << 16) + val_list[i * 2]
# return long list
return long_list | ['def', 'word_list_to_long', '(', 'val_list', ',', 'big_endian', '=', 'True', ')', ':', '# allocate list for long int', 'long_list', '=', '[', 'None', ']', '*', 'int', '(', 'len', '(', 'val_list', ')', '/', '2', ')', '# fill registers list with register items', 'for', 'i', ',', 'item', 'in', 'enumerate', '(', 'long_list', ')', ':', 'if', 'big_endian', ':', 'long_list', '[', 'i', ']', '=', '(', 'val_list', '[', 'i', '*', '2', ']', '<<', '16', ')', '+', 'val_list', '[', '(', 'i', '*', '2', ')', '+', '1', ']', 'else', ':', 'long_list', '[', 'i', ']', '=', '(', 'val_list', '[', '(', 'i', '*', '2', ')', '+', '1', ']', '<<', '16', ')', '+', 'val_list', '[', 'i', '*', '2', ']', '# return long list', 'return', 'long_list'] | Word list (16 bits int) to long list (32 bits int)
By default word_list_to_long() use big endian order. For use little endian, set
big_endian param to False.
:param val_list: list of 16 bits int value
:type val_list: list
:param big_endian: True for big endian/False for little (optional)
:type big_endian: bool
:returns: list of 32 bits int value
:rtype: list | ['Word', 'list', '(', '16', 'bits', 'int', ')', 'to', 'long', 'list', '(', '32', 'bits', 'int', ')'] | train | https://github.com/sourceperl/pyModbusTCP/blob/993f6e2f5ab52eba164be049e42cea560c3751a5/pyModbusTCP/utils.py#L65-L87 |
8,715 | ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_map/mp_elevation.py | ElevationModel.GetElevation | def GetElevation(self, latitude, longitude, timeout=0):
'''Returns the altitude (m ASL) of a given lat/long pair, or None if unknown'''
if latitude is None or longitude is None:
return None
if self.database == 'srtm':
TileID = (numpy.floor(latitude), numpy.floor(longitude))
if TileID in self.tileDict:
alt = self.tileDict[TileID].getAltitudeFromLatLon(latitude, longitude)
else:
tile = self.downloader.getTile(numpy.floor(latitude), numpy.floor(longitude))
if tile == 0:
if timeout > 0:
t0 = time.time()
while time.time() < t0+timeout and tile == 0:
tile = self.downloader.getTile(numpy.floor(latitude), numpy.floor(longitude))
if tile == 0:
time.sleep(0.1)
if tile == 0:
return None
self.tileDict[TileID] = tile
alt = tile.getAltitudeFromLatLon(latitude, longitude)
if self.database == 'geoscience':
alt = self.mappy.getAltitudeAtPoint(latitude, longitude)
return alt | python | def GetElevation(self, latitude, longitude, timeout=0):
'''Returns the altitude (m ASL) of a given lat/long pair, or None if unknown'''
if latitude is None or longitude is None:
return None
if self.database == 'srtm':
TileID = (numpy.floor(latitude), numpy.floor(longitude))
if TileID in self.tileDict:
alt = self.tileDict[TileID].getAltitudeFromLatLon(latitude, longitude)
else:
tile = self.downloader.getTile(numpy.floor(latitude), numpy.floor(longitude))
if tile == 0:
if timeout > 0:
t0 = time.time()
while time.time() < t0+timeout and tile == 0:
tile = self.downloader.getTile(numpy.floor(latitude), numpy.floor(longitude))
if tile == 0:
time.sleep(0.1)
if tile == 0:
return None
self.tileDict[TileID] = tile
alt = tile.getAltitudeFromLatLon(latitude, longitude)
if self.database == 'geoscience':
alt = self.mappy.getAltitudeAtPoint(latitude, longitude)
return alt | ['def', 'GetElevation', '(', 'self', ',', 'latitude', ',', 'longitude', ',', 'timeout', '=', '0', ')', ':', 'if', 'latitude', 'is', 'None', 'or', 'longitude', 'is', 'None', ':', 'return', 'None', 'if', 'self', '.', 'database', '==', "'srtm'", ':', 'TileID', '=', '(', 'numpy', '.', 'floor', '(', 'latitude', ')', ',', 'numpy', '.', 'floor', '(', 'longitude', ')', ')', 'if', 'TileID', 'in', 'self', '.', 'tileDict', ':', 'alt', '=', 'self', '.', 'tileDict', '[', 'TileID', ']', '.', 'getAltitudeFromLatLon', '(', 'latitude', ',', 'longitude', ')', 'else', ':', 'tile', '=', 'self', '.', 'downloader', '.', 'getTile', '(', 'numpy', '.', 'floor', '(', 'latitude', ')', ',', 'numpy', '.', 'floor', '(', 'longitude', ')', ')', 'if', 'tile', '==', '0', ':', 'if', 'timeout', '>', '0', ':', 't0', '=', 'time', '.', 'time', '(', ')', 'while', 'time', '.', 'time', '(', ')', '<', 't0', '+', 'timeout', 'and', 'tile', '==', '0', ':', 'tile', '=', 'self', '.', 'downloader', '.', 'getTile', '(', 'numpy', '.', 'floor', '(', 'latitude', ')', ',', 'numpy', '.', 'floor', '(', 'longitude', ')', ')', 'if', 'tile', '==', '0', ':', 'time', '.', 'sleep', '(', '0.1', ')', 'if', 'tile', '==', '0', ':', 'return', 'None', 'self', '.', 'tileDict', '[', 'TileID', ']', '=', 'tile', 'alt', '=', 'tile', '.', 'getAltitudeFromLatLon', '(', 'latitude', ',', 'longitude', ')', 'if', 'self', '.', 'database', '==', "'geoscience'", ':', 'alt', '=', 'self', '.', 'mappy', '.', 'getAltitudeAtPoint', '(', 'latitude', ',', 'longitude', ')', 'return', 'alt'] | Returns the altitude (m ASL) of a given lat/long pair, or None if unknown | ['Returns', 'the', 'altitude', '(', 'm', 'ASL', ')', 'of', 'a', 'given', 'lat', '/', 'long', 'pair', 'or', 'None', 'if', 'unknown'] | train | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_map/mp_elevation.py#L34-L57 |
8,716 | twilio/twilio-python | twilio/rest/authy/v1/service/entity/__init__.py | EntityContext.factors | def factors(self):
"""
Access the factors
:returns: twilio.rest.authy.v1.service.entity.factor.FactorList
:rtype: twilio.rest.authy.v1.service.entity.factor.FactorList
"""
if self._factors is None:
self._factors = FactorList(
self._version,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
)
return self._factors | python | def factors(self):
"""
Access the factors
:returns: twilio.rest.authy.v1.service.entity.factor.FactorList
:rtype: twilio.rest.authy.v1.service.entity.factor.FactorList
"""
if self._factors is None:
self._factors = FactorList(
self._version,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
)
return self._factors | ['def', 'factors', '(', 'self', ')', ':', 'if', 'self', '.', '_factors', 'is', 'None', ':', 'self', '.', '_factors', '=', 'FactorList', '(', 'self', '.', '_version', ',', 'service_sid', '=', 'self', '.', '_solution', '[', "'service_sid'", ']', ',', 'identity', '=', 'self', '.', '_solution', '[', "'identity'", ']', ',', ')', 'return', 'self', '.', '_factors'] | Access the factors
:returns: twilio.rest.authy.v1.service.entity.factor.FactorList
:rtype: twilio.rest.authy.v1.service.entity.factor.FactorList | ['Access', 'the', 'factors'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/authy/v1/service/entity/__init__.py#L270-L283 |
8,717 | ihmeuw/vivarium | src/vivarium/interface/utilities.py | log_progress | def log_progress(sequence, every=None, size=None, name='Items'):
"""Taken from https://github.com/alexanderkuk/log-progress"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = '{name}: {index} / ?'.format(
name=name,
index=index
)
else:
progress.value = index
label.value = u'{name}: {index} / {size}'.format(
name=name,
index=index,
size=size
)
yield record
except Exception as e:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = index
label.value = "{name}: {index}".format(
name=name,
index=str(index or '?')
) | python | def log_progress(sequence, every=None, size=None, name='Items'):
"""Taken from https://github.com/alexanderkuk/log-progress"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = '{name}: {index} / ?'.format(
name=name,
index=index
)
else:
progress.value = index
label.value = u'{name}: {index} / {size}'.format(
name=name,
index=index,
size=size
)
yield record
except Exception as e:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = index
label.value = "{name}: {index}".format(
name=name,
index=str(index or '?')
) | ['def', 'log_progress', '(', 'sequence', ',', 'every', '=', 'None', ',', 'size', '=', 'None', ',', 'name', '=', "'Items'", ')', ':', 'from', 'ipywidgets', 'import', 'IntProgress', ',', 'HTML', ',', 'VBox', 'from', 'IPython', '.', 'display', 'import', 'display', 'is_iterator', '=', 'False', 'if', 'size', 'is', 'None', ':', 'try', ':', 'size', '=', 'len', '(', 'sequence', ')', 'except', 'TypeError', ':', 'is_iterator', '=', 'True', 'if', 'size', 'is', 'not', 'None', ':', 'if', 'every', 'is', 'None', ':', 'if', 'size', '<=', '200', ':', 'every', '=', '1', 'else', ':', 'every', '=', 'int', '(', 'size', '/', '200', ')', '# every 0.5%', 'else', ':', 'assert', 'every', 'is', 'not', 'None', ',', "'sequence is iterator, set every'", 'if', 'is_iterator', ':', 'progress', '=', 'IntProgress', '(', 'min', '=', '0', ',', 'max', '=', '1', ',', 'value', '=', '1', ')', 'progress', '.', 'bar_style', '=', "'info'", 'else', ':', 'progress', '=', 'IntProgress', '(', 'min', '=', '0', ',', 'max', '=', 'size', ',', 'value', '=', '0', ')', 'label', '=', 'HTML', '(', ')', 'box', '=', 'VBox', '(', 'children', '=', '[', 'label', ',', 'progress', ']', ')', 'display', '(', 'box', ')', 'index', '=', '0', 'try', ':', 'for', 'index', ',', 'record', 'in', 'enumerate', '(', 'sequence', ',', '1', ')', ':', 'if', 'index', '==', '1', 'or', 'index', '%', 'every', '==', '0', ':', 'if', 'is_iterator', ':', 'label', '.', 'value', '=', "'{name}: {index} / ?'", '.', 'format', '(', 'name', '=', 'name', ',', 'index', '=', 'index', ')', 'else', ':', 'progress', '.', 'value', '=', 'index', 'label', '.', 'value', '=', "u'{name}: {index} / {size}'", '.', 'format', '(', 'name', '=', 'name', ',', 'index', '=', 'index', ',', 'size', '=', 'size', ')', 'yield', 'record', 'except', 'Exception', 'as', 'e', ':', 'progress', '.', 'bar_style', '=', "'danger'", 'raise', 'else', ':', 'progress', '.', 'bar_style', '=', "'success'", 'progress', '.', 'value', '=', 'index', 'label', '.', 'value', '=', '"{name}: {index}"', '.', 'format', '(', 'name', '=', 'name', ',', 'index', '=', 'str', '(', 'index', 'or', "'?'", ')', ')'] | Taken from https://github.com/alexanderkuk/log-progress | ['Taken', 'from', 'https', ':', '//', 'github', '.', 'com', '/', 'alexanderkuk', '/', 'log', '-', 'progress'] | train | https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/interface/utilities.py#L21-L76 |
8,718 | pydata/xarray | xarray/core/variable.py | Variable.quantile | def quantile(self, q, dim=None, interpolation='linear'):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float in range of [0,1] (or sequence of floats)
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile
"""
if isinstance(self.data, dask_array_type):
raise TypeError("quantile does not work for arrays stored as dask "
"arrays. Load the data via .compute() or .load() "
"prior to calling this method.")
q = np.asarray(q, dtype=np.float64)
new_dims = list(self.dims)
if dim is not None:
axis = self.get_axis_num(dim)
if utils.is_scalar(dim):
new_dims.remove(dim)
else:
for d in dim:
new_dims.remove(d)
else:
axis = None
new_dims = []
# only add the quantile dimension if q is array like
if q.ndim != 0:
new_dims = ['quantile'] + new_dims
qs = np.nanpercentile(self.data, q * 100., axis=axis,
interpolation=interpolation)
return Variable(new_dims, qs) | python | def quantile(self, q, dim=None, interpolation='linear'):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float in range of [0,1] (or sequence of floats)
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile
"""
if isinstance(self.data, dask_array_type):
raise TypeError("quantile does not work for arrays stored as dask "
"arrays. Load the data via .compute() or .load() "
"prior to calling this method.")
q = np.asarray(q, dtype=np.float64)
new_dims = list(self.dims)
if dim is not None:
axis = self.get_axis_num(dim)
if utils.is_scalar(dim):
new_dims.remove(dim)
else:
for d in dim:
new_dims.remove(d)
else:
axis = None
new_dims = []
# only add the quantile dimension if q is array like
if q.ndim != 0:
new_dims = ['quantile'] + new_dims
qs = np.nanpercentile(self.data, q * 100., axis=axis,
interpolation=interpolation)
return Variable(new_dims, qs) | ['def', 'quantile', '(', 'self', ',', 'q', ',', 'dim', '=', 'None', ',', 'interpolation', '=', "'linear'", ')', ':', 'if', 'isinstance', '(', 'self', '.', 'data', ',', 'dask_array_type', ')', ':', 'raise', 'TypeError', '(', '"quantile does not work for arrays stored as dask "', '"arrays. Load the data via .compute() or .load() "', '"prior to calling this method."', ')', 'q', '=', 'np', '.', 'asarray', '(', 'q', ',', 'dtype', '=', 'np', '.', 'float64', ')', 'new_dims', '=', 'list', '(', 'self', '.', 'dims', ')', 'if', 'dim', 'is', 'not', 'None', ':', 'axis', '=', 'self', '.', 'get_axis_num', '(', 'dim', ')', 'if', 'utils', '.', 'is_scalar', '(', 'dim', ')', ':', 'new_dims', '.', 'remove', '(', 'dim', ')', 'else', ':', 'for', 'd', 'in', 'dim', ':', 'new_dims', '.', 'remove', '(', 'd', ')', 'else', ':', 'axis', '=', 'None', 'new_dims', '=', '[', ']', '# only add the quantile dimension if q is array like', 'if', 'q', '.', 'ndim', '!=', '0', ':', 'new_dims', '=', '[', "'quantile'", ']', '+', 'new_dims', 'qs', '=', 'np', '.', 'nanpercentile', '(', 'self', '.', 'data', ',', 'q', '*', '100.', ',', 'axis', '=', 'axis', ',', 'interpolation', '=', 'interpolation', ')', 'return', 'Variable', '(', 'new_dims', ',', 'qs', ')'] | Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float in range of [0,1] (or sequence of floats)
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile | ['Compute', 'the', 'qth', 'quantile', 'of', 'the', 'data', 'along', 'the', 'specified', 'dimension', '.'] | train | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/variable.py#L1510-L1573 |
8,719 | arista-eosplus/pyeapi | pyeapi/eapilib.py | EapiConnection.authentication | def authentication(self, username, password):
"""Configures the user authentication for eAPI
This method configures the username and password combination to use
for authenticating to eAPI.
Args:
username (str): The username to use to authenticate the eAPI
connection with
password (str): The password in clear text to use to authenticate
the eAPI connection with
"""
_auth_text = '{}:{}'.format(username, password)
# Work around for Python 2.7/3.x compatibility
if int(sys.version[0]) > 2:
# For Python 3.x
_auth_bin = base64.encodebytes(_auth_text.encode())
_auth = _auth_bin.decode()
_auth = _auth.replace('\n', '')
self._auth = _auth
else:
# For Python 2.7
_auth = base64.encodestring(_auth_text)
self._auth = str(_auth).replace('\n', '')
_LOGGER.debug('Autentication string is: {}:***'.format(username)) | python | def authentication(self, username, password):
"""Configures the user authentication for eAPI
This method configures the username and password combination to use
for authenticating to eAPI.
Args:
username (str): The username to use to authenticate the eAPI
connection with
password (str): The password in clear text to use to authenticate
the eAPI connection with
"""
_auth_text = '{}:{}'.format(username, password)
# Work around for Python 2.7/3.x compatibility
if int(sys.version[0]) > 2:
# For Python 3.x
_auth_bin = base64.encodebytes(_auth_text.encode())
_auth = _auth_bin.decode()
_auth = _auth.replace('\n', '')
self._auth = _auth
else:
# For Python 2.7
_auth = base64.encodestring(_auth_text)
self._auth = str(_auth).replace('\n', '')
_LOGGER.debug('Autentication string is: {}:***'.format(username)) | ['def', 'authentication', '(', 'self', ',', 'username', ',', 'password', ')', ':', '_auth_text', '=', "'{}:{}'", '.', 'format', '(', 'username', ',', 'password', ')', '# Work around for Python 2.7/3.x compatibility', 'if', 'int', '(', 'sys', '.', 'version', '[', '0', ']', ')', '>', '2', ':', '# For Python 3.x', '_auth_bin', '=', 'base64', '.', 'encodebytes', '(', '_auth_text', '.', 'encode', '(', ')', ')', '_auth', '=', '_auth_bin', '.', 'decode', '(', ')', '_auth', '=', '_auth', '.', 'replace', '(', "'\\n'", ',', "''", ')', 'self', '.', '_auth', '=', '_auth', 'else', ':', '# For Python 2.7', '_auth', '=', 'base64', '.', 'encodestring', '(', '_auth_text', ')', 'self', '.', '_auth', '=', 'str', '(', '_auth', ')', '.', 'replace', '(', "'\\n'", ',', "''", ')', '_LOGGER', '.', 'debug', '(', "'Autentication string is: {}:***'", '.', 'format', '(', 'username', ')', ')'] | Configures the user authentication for eAPI
This method configures the username and password combination to use
for authenticating to eAPI.
Args:
username (str): The username to use to authenticate the eAPI
connection with
password (str): The password in clear text to use to authenticate
the eAPI connection with | ['Configures', 'the', 'user', 'authentication', 'for', 'eAPI'] | train | https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/eapilib.py#L230-L257 |
8,720 | msiemens/tinydb | tinydb/database.py | Table.upsert | def upsert(self, document, cond):
"""
Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID
"""
updated_docs = self.update(document, cond)
if updated_docs:
return updated_docs
else:
return [self.insert(document)] | python | def upsert(self, document, cond):
"""
Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID
"""
updated_docs = self.update(document, cond)
if updated_docs:
return updated_docs
else:
return [self.insert(document)] | ['def', 'upsert', '(', 'self', ',', 'document', ',', 'cond', ')', ':', 'updated_docs', '=', 'self', '.', 'update', '(', 'document', ',', 'cond', ')', 'if', 'updated_docs', ':', 'return', 'updated_docs', 'else', ':', 'return', '[', 'self', '.', 'insert', '(', 'document', ')', ']'] | Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID | ['Update', 'a', 'document', 'if', 'it', 'exist', '-', 'insert', 'it', 'otherwise', '.'] | train | https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L566-L581 |
8,721 | prezi/django-zipkin | django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py | Client.getServiceNamesToTraceIds | def getServiceNamesToTraceIds(self, time_stamp, service_name, rpc_name):
"""
Given a time stamp, server service name, and rpc name, fetch all of the client services calling in paired
with the lists of every trace Ids (list<i64>) from the server to client.
The three arguments specify epoch time in microseconds, server side service name and rpc name. The return maps
contains the key - client_service_name and value - list<trace_id>.
Parameters:
- time_stamp
- service_name
- rpc_name
"""
self.send_getServiceNamesToTraceIds(time_stamp, service_name, rpc_name)
return self.recv_getServiceNamesToTraceIds() | python | def getServiceNamesToTraceIds(self, time_stamp, service_name, rpc_name):
"""
Given a time stamp, server service name, and rpc name, fetch all of the client services calling in paired
with the lists of every trace Ids (list<i64>) from the server to client.
The three arguments specify epoch time in microseconds, server side service name and rpc name. The return maps
contains the key - client_service_name and value - list<trace_id>.
Parameters:
- time_stamp
- service_name
- rpc_name
"""
self.send_getServiceNamesToTraceIds(time_stamp, service_name, rpc_name)
return self.recv_getServiceNamesToTraceIds() | ['def', 'getServiceNamesToTraceIds', '(', 'self', ',', 'time_stamp', ',', 'service_name', ',', 'rpc_name', ')', ':', 'self', '.', 'send_getServiceNamesToTraceIds', '(', 'time_stamp', ',', 'service_name', ',', 'rpc_name', ')', 'return', 'self', '.', 'recv_getServiceNamesToTraceIds', '(', ')'] | Given a time stamp, server service name, and rpc name, fetch all of the client services calling in paired
with the lists of every trace Ids (list<i64>) from the server to client.
The three arguments specify epoch time in microseconds, server side service name and rpc name. The return maps
contains the key - client_service_name and value - list<trace_id>.
Parameters:
- time_stamp
- service_name
- rpc_name | ['Given', 'a', 'time', 'stamp', 'server', 'service', 'name', 'and', 'rpc', 'name', 'fetch', 'all', 'of', 'the', 'client', 'services', 'calling', 'in', 'paired', 'with', 'the', 'lists', 'of', 'every', 'trace', 'Ids', '(', 'list<i64', '>', ')', 'from', 'the', 'server', 'to', 'client', '.'] | train | https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L928-L942 |
8,722 | pokerregion/poker | poker/room/pokerstars.py | Notes.add_label | def add_label(self, name, color):
"""Add a new label. It's id will automatically be calculated."""
color_upper = color.upper()
if not self._color_re.match(color_upper):
raise ValueError('Invalid color: {}'.format(color))
labels_tag = self.root[0]
last_id = int(labels_tag[-1].get('id'))
new_id = str(last_id + 1)
new_label = etree.Element('label', id=new_id, color=color_upper)
new_label.text = name
labels_tag.append(new_label) | python | def add_label(self, name, color):
"""Add a new label. It's id will automatically be calculated."""
color_upper = color.upper()
if not self._color_re.match(color_upper):
raise ValueError('Invalid color: {}'.format(color))
labels_tag = self.root[0]
last_id = int(labels_tag[-1].get('id'))
new_id = str(last_id + 1)
new_label = etree.Element('label', id=new_id, color=color_upper)
new_label.text = name
labels_tag.append(new_label) | ['def', 'add_label', '(', 'self', ',', 'name', ',', 'color', ')', ':', 'color_upper', '=', 'color', '.', 'upper', '(', ')', 'if', 'not', 'self', '.', '_color_re', '.', 'match', '(', 'color_upper', ')', ':', 'raise', 'ValueError', '(', "'Invalid color: {}'", '.', 'format', '(', 'color', ')', ')', 'labels_tag', '=', 'self', '.', 'root', '[', '0', ']', 'last_id', '=', 'int', '(', 'labels_tag', '[', '-', '1', ']', '.', 'get', '(', "'id'", ')', ')', 'new_id', '=', 'str', '(', 'last_id', '+', '1', ')', 'new_label', '=', 'etree', '.', 'Element', '(', "'label'", ',', 'id', '=', 'new_id', ',', 'color', '=', 'color_upper', ')', 'new_label', '.', 'text', '=', 'name', 'labels_tag', '.', 'append', '(', 'new_label', ')'] | Add a new label. It's id will automatically be calculated. | ['Add', 'a', 'new', 'label', '.', 'It', 's', 'id', 'will', 'automatically', 'be', 'calculated', '.'] | train | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L417-L430 |
8,723 | 20c/xbahn | xbahn/connection/link.py | Link.cut | def cut(self, name, disconnect=False):
"""
Cut a wire (undo a wire() call)
Arguments:
- name (str): name of the wire
Keyword Arguments:
- disconnect (bool): if True also disconnect all connections on the
specified wire
"""
wire = getattr(self, name, None)
if wire and isinstance(wire, Wire):
if name != "main":
delattr(self, name)
if disconnect:
wire.disconnect()
wire.off("receive", self.on_receive)
if self.main == wire:
self.main = None
self.set_main_wire() | python | def cut(self, name, disconnect=False):
"""
Cut a wire (undo a wire() call)
Arguments:
- name (str): name of the wire
Keyword Arguments:
- disconnect (bool): if True also disconnect all connections on the
specified wire
"""
wire = getattr(self, name, None)
if wire and isinstance(wire, Wire):
if name != "main":
delattr(self, name)
if disconnect:
wire.disconnect()
wire.off("receive", self.on_receive)
if self.main == wire:
self.main = None
self.set_main_wire() | ['def', 'cut', '(', 'self', ',', 'name', ',', 'disconnect', '=', 'False', ')', ':', 'wire', '=', 'getattr', '(', 'self', ',', 'name', ',', 'None', ')', 'if', 'wire', 'and', 'isinstance', '(', 'wire', ',', 'Wire', ')', ':', 'if', 'name', '!=', '"main"', ':', 'delattr', '(', 'self', ',', 'name', ')', 'if', 'disconnect', ':', 'wire', '.', 'disconnect', '(', ')', 'wire', '.', 'off', '(', '"receive"', ',', 'self', '.', 'on_receive', ')', 'if', 'self', '.', 'main', '==', 'wire', ':', 'self', '.', 'main', '=', 'None', 'self', '.', 'set_main_wire', '(', ')'] | Cut a wire (undo a wire() call)
Arguments:
- name (str): name of the wire
Keyword Arguments:
- disconnect (bool): if True also disconnect all connections on the
specified wire | ['Cut', 'a', 'wire', '(', 'undo', 'a', 'wire', '()', 'call', ')'] | train | https://github.com/20c/xbahn/blob/afb27b0576841338a366d7cac0200a782bd84be6/xbahn/connection/link.py#L201-L225 |
8,724 | spyder-ide/conda-manager | conda_manager/api/download_api.py | NetworkProxyFactory.proxy_servers | def proxy_servers(self):
"""
Return the proxy servers available.
First env variables will be searched and updated with values from
condarc config file.
"""
proxy_servers = {}
if self._load_rc_func is None:
return proxy_servers
else:
HTTP_PROXY = os.environ.get('HTTP_PROXY')
HTTPS_PROXY = os.environ.get('HTTPS_PROXY')
if HTTP_PROXY:
proxy_servers['http'] = HTTP_PROXY
if HTTPS_PROXY:
proxy_servers['https'] = HTTPS_PROXY
proxy_servers_conf = self._load_rc_func().get('proxy_servers', {})
proxy_servers.update(proxy_servers_conf)
return proxy_servers | python | def proxy_servers(self):
"""
Return the proxy servers available.
First env variables will be searched and updated with values from
condarc config file.
"""
proxy_servers = {}
if self._load_rc_func is None:
return proxy_servers
else:
HTTP_PROXY = os.environ.get('HTTP_PROXY')
HTTPS_PROXY = os.environ.get('HTTPS_PROXY')
if HTTP_PROXY:
proxy_servers['http'] = HTTP_PROXY
if HTTPS_PROXY:
proxy_servers['https'] = HTTPS_PROXY
proxy_servers_conf = self._load_rc_func().get('proxy_servers', {})
proxy_servers.update(proxy_servers_conf)
return proxy_servers | ['def', 'proxy_servers', '(', 'self', ')', ':', 'proxy_servers', '=', '{', '}', 'if', 'self', '.', '_load_rc_func', 'is', 'None', ':', 'return', 'proxy_servers', 'else', ':', 'HTTP_PROXY', '=', 'os', '.', 'environ', '.', 'get', '(', "'HTTP_PROXY'", ')', 'HTTPS_PROXY', '=', 'os', '.', 'environ', '.', 'get', '(', "'HTTPS_PROXY'", ')', 'if', 'HTTP_PROXY', ':', 'proxy_servers', '[', "'http'", ']', '=', 'HTTP_PROXY', 'if', 'HTTPS_PROXY', ':', 'proxy_servers', '[', "'https'", ']', '=', 'HTTPS_PROXY', 'proxy_servers_conf', '=', 'self', '.', '_load_rc_func', '(', ')', '.', 'get', '(', "'proxy_servers'", ',', '{', '}', ')', 'proxy_servers', '.', 'update', '(', 'proxy_servers_conf', ')', 'return', 'proxy_servers'] | Return the proxy servers available.
First env variables will be searched and updated with values from
condarc config file. | ['Return', 'the', 'proxy', 'servers', 'available', '.'] | train | https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/download_api.py#L72-L95 |
8,725 | Opentrons/opentrons | api/src/opentrons/hardware_control/types.py | Axis.gantry_axes | def gantry_axes(cls) -> Tuple['Axis', 'Axis', 'Axis', 'Axis']:
""" The axes which are tied to the gantry and require the deck
calibration transform
"""
return (cls.X, cls.Y, cls.Z, cls.A) | python | def gantry_axes(cls) -> Tuple['Axis', 'Axis', 'Axis', 'Axis']:
""" The axes which are tied to the gantry and require the deck
calibration transform
"""
return (cls.X, cls.Y, cls.Z, cls.A) | ['def', 'gantry_axes', '(', 'cls', ')', '->', 'Tuple', '[', "'Axis'", ',', "'Axis'", ',', "'Axis'", ',', "'Axis'", ']', ':', 'return', '(', 'cls', '.', 'X', ',', 'cls', '.', 'Y', ',', 'cls', '.', 'Z', ',', 'cls', '.', 'A', ')'] | The axes which are tied to the gantry and require the deck
calibration transform | ['The', 'axes', 'which', 'are', 'tied', 'to', 'the', 'gantry', 'and', 'require', 'the', 'deck', 'calibration', 'transform'] | train | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/hardware_control/types.py#L22-L26 |
8,726 | zsimic/runez | src/runez/config.py | to_dict | def to_dict(value, prefix=None, separators="=,"):
"""
Args:
value: Value to turn into a dict
prefix (str | unicode | None): Optional prefix for keys (if provided, `prefix.` is added to all keys)
separators (str | unicode): 2 chars: 1st is assignment separator, 2nd is key-value pair separator
Returns:
(dict): Parse key/values
"""
if not value or isinstance(value, dict):
return value or {}
result = {}
for val in flattened(value, split=(separators[1], SANITIZED)):
if not val:
continue
if hasattr(val, "partition"):
k, _, v = val.partition(separators[0])
k = k.strip()
if k:
v = v.strip()
if prefix and not k.startswith(prefix):
k = "%s.%s" % (prefix, k)
result[k] = v
return result | python | def to_dict(value, prefix=None, separators="=,"):
"""
Args:
value: Value to turn into a dict
prefix (str | unicode | None): Optional prefix for keys (if provided, `prefix.` is added to all keys)
separators (str | unicode): 2 chars: 1st is assignment separator, 2nd is key-value pair separator
Returns:
(dict): Parse key/values
"""
if not value or isinstance(value, dict):
return value or {}
result = {}
for val in flattened(value, split=(separators[1], SANITIZED)):
if not val:
continue
if hasattr(val, "partition"):
k, _, v = val.partition(separators[0])
k = k.strip()
if k:
v = v.strip()
if prefix and not k.startswith(prefix):
k = "%s.%s" % (prefix, k)
result[k] = v
return result | ['def', 'to_dict', '(', 'value', ',', 'prefix', '=', 'None', ',', 'separators', '=', '"=,"', ')', ':', 'if', 'not', 'value', 'or', 'isinstance', '(', 'value', ',', 'dict', ')', ':', 'return', 'value', 'or', '{', '}', 'result', '=', '{', '}', 'for', 'val', 'in', 'flattened', '(', 'value', ',', 'split', '=', '(', 'separators', '[', '1', ']', ',', 'SANITIZED', ')', ')', ':', 'if', 'not', 'val', ':', 'continue', 'if', 'hasattr', '(', 'val', ',', '"partition"', ')', ':', 'k', ',', '_', ',', 'v', '=', 'val', '.', 'partition', '(', 'separators', '[', '0', ']', ')', 'k', '=', 'k', '.', 'strip', '(', ')', 'if', 'k', ':', 'v', '=', 'v', '.', 'strip', '(', ')', 'if', 'prefix', 'and', 'not', 'k', '.', 'startswith', '(', 'prefix', ')', ':', 'k', '=', '"%s.%s"', '%', '(', 'prefix', ',', 'k', ')', 'result', '[', 'k', ']', '=', 'v', 'return', 'result'] | Args:
value: Value to turn into a dict
prefix (str | unicode | None): Optional prefix for keys (if provided, `prefix.` is added to all keys)
separators (str | unicode): 2 chars: 1st is assignment separator, 2nd is key-value pair separator
Returns:
(dict): Parse key/values | ['Args', ':', 'value', ':', 'Value', 'to', 'turn', 'into', 'a', 'dict', 'prefix', '(', 'str', '|', 'unicode', '|', 'None', ')', ':', 'Optional', 'prefix', 'for', 'keys', '(', 'if', 'provided', 'prefix', '.', 'is', 'added', 'to', 'all', 'keys', ')', 'separators', '(', 'str', '|', 'unicode', ')', ':', '2', 'chars', ':', '1st', 'is', 'assignment', 'separator', '2nd', 'is', 'key', '-', 'value', 'pair', 'separator'] | train | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/config.py#L407-L434 |
8,727 | saltstack/salt | salt/modules/ldap3.py | add | def add(connect_spec, dn, attributes):
'''Add an entry to an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param attributes:
Non-empty dict mapping each of the new entry's attributes to a
non-empty iterable of values.
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.add "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret',
},
}" "dn='dc=example,dc=com'" "attributes={'example': 'values'}"
'''
l = connect(connect_spec)
# convert the "iterable of values" to lists in case that's what
# addModlist() expects (also to ensure that the caller's objects
# are not modified)
attributes = dict(((attr, salt.utils.data.encode(list(vals)))
for attr, vals in six.iteritems(attributes)))
log.info('adding entry: dn: %s attributes: %s', repr(dn), repr(attributes))
if 'unicodePwd' in attributes:
attributes['unicodePwd'] = [_format_unicode_password(x) for x in attributes['unicodePwd']]
modlist = ldap.modlist.addModlist(attributes),
try:
l.c.add_s(dn, modlist)
except ldap.LDAPError as e:
_convert_exception(e)
return True | python | def add(connect_spec, dn, attributes):
'''Add an entry to an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param attributes:
Non-empty dict mapping each of the new entry's attributes to a
non-empty iterable of values.
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.add "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret',
},
}" "dn='dc=example,dc=com'" "attributes={'example': 'values'}"
'''
l = connect(connect_spec)
# convert the "iterable of values" to lists in case that's what
# addModlist() expects (also to ensure that the caller's objects
# are not modified)
attributes = dict(((attr, salt.utils.data.encode(list(vals)))
for attr, vals in six.iteritems(attributes)))
log.info('adding entry: dn: %s attributes: %s', repr(dn), repr(attributes))
if 'unicodePwd' in attributes:
attributes['unicodePwd'] = [_format_unicode_password(x) for x in attributes['unicodePwd']]
modlist = ldap.modlist.addModlist(attributes),
try:
l.c.add_s(dn, modlist)
except ldap.LDAPError as e:
_convert_exception(e)
return True | ['def', 'add', '(', 'connect_spec', ',', 'dn', ',', 'attributes', ')', ':', 'l', '=', 'connect', '(', 'connect_spec', ')', '# convert the "iterable of values" to lists in case that\'s what', "# addModlist() expects (also to ensure that the caller's objects", '# are not modified)', 'attributes', '=', 'dict', '(', '(', '(', 'attr', ',', 'salt', '.', 'utils', '.', 'data', '.', 'encode', '(', 'list', '(', 'vals', ')', ')', ')', 'for', 'attr', ',', 'vals', 'in', 'six', '.', 'iteritems', '(', 'attributes', ')', ')', ')', 'log', '.', 'info', '(', "'adding entry: dn: %s attributes: %s'", ',', 'repr', '(', 'dn', ')', ',', 'repr', '(', 'attributes', ')', ')', 'if', "'unicodePwd'", 'in', 'attributes', ':', 'attributes', '[', "'unicodePwd'", ']', '=', '[', '_format_unicode_password', '(', 'x', ')', 'for', 'x', 'in', 'attributes', '[', "'unicodePwd'", ']', ']', 'modlist', '=', 'ldap', '.', 'modlist', '.', 'addModlist', '(', 'attributes', ')', ',', 'try', ':', 'l', '.', 'c', '.', 'add_s', '(', 'dn', ',', 'modlist', ')', 'except', 'ldap', '.', 'LDAPError', 'as', 'e', ':', '_convert_exception', '(', 'e', ')', 'return', 'True'] | Add an entry to an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param attributes:
Non-empty dict mapping each of the new entry's attributes to a
non-empty iterable of values.
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.add "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret',
},
}" "dn='dc=example,dc=com'" "attributes={'example': 'values'}" | ['Add', 'an', 'entry', 'to', 'an', 'LDAP', 'database', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ldap3.py#L372-L417 |
8,728 | nitmir/django-cas-server | cas_server/utils.py | regexpr_validator | def regexpr_validator(value):
"""
Test that ``value`` is a valid regular expression
:param unicode value: A regular expression to test
:raises ValidationError: if ``value`` is not a valid regular expression
"""
try:
re.compile(value)
except re.error:
raise ValidationError(
_('"%(value)s" is not a valid regular expression'),
params={'value': value}
) | python | def regexpr_validator(value):
"""
Test that ``value`` is a valid regular expression
:param unicode value: A regular expression to test
:raises ValidationError: if ``value`` is not a valid regular expression
"""
try:
re.compile(value)
except re.error:
raise ValidationError(
_('"%(value)s" is not a valid regular expression'),
params={'value': value}
) | ['def', 'regexpr_validator', '(', 'value', ')', ':', 'try', ':', 're', '.', 'compile', '(', 'value', ')', 'except', 're', '.', 'error', ':', 'raise', 'ValidationError', '(', '_', '(', '\'"%(value)s" is not a valid regular expression\'', ')', ',', 'params', '=', '{', "'value'", ':', 'value', '}', ')'] | Test that ``value`` is a valid regular expression
:param unicode value: A regular expression to test
:raises ValidationError: if ``value`` is not a valid regular expression | ['Test', 'that', 'value', 'is', 'a', 'valid', 'regular', 'expression'] | train | https://github.com/nitmir/django-cas-server/blob/d106181b94c444f1946269da5c20f6c904840ad3/cas_server/utils.py#L736-L749 |
8,729 | pantsbuild/pex | pex/util.py | CacheHelper.hash | def hash(cls, path, digest=None, hasher=sha1):
"""Return the digest of a single file in a memory-efficient manner."""
if digest is None:
digest = hasher()
with open(path, 'rb') as fh:
cls.update_hash(fh, digest)
return digest.hexdigest() | python | def hash(cls, path, digest=None, hasher=sha1):
"""Return the digest of a single file in a memory-efficient manner."""
if digest is None:
digest = hasher()
with open(path, 'rb') as fh:
cls.update_hash(fh, digest)
return digest.hexdigest() | ['def', 'hash', '(', 'cls', ',', 'path', ',', 'digest', '=', 'None', ',', 'hasher', '=', 'sha1', ')', ':', 'if', 'digest', 'is', 'None', ':', 'digest', '=', 'hasher', '(', ')', 'with', 'open', '(', 'path', ',', "'rb'", ')', 'as', 'fh', ':', 'cls', '.', 'update_hash', '(', 'fh', ',', 'digest', ')', 'return', 'digest', '.', 'hexdigest', '(', ')'] | Return the digest of a single file in a memory-efficient manner. | ['Return', 'the', 'digest', 'of', 'a', 'single', 'file', 'in', 'a', 'memory', '-', 'efficient', 'manner', '.'] | train | https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/util.py#L115-L121 |
8,730 | python-gitlab/python-gitlab | gitlab/v4/objects.py | ProjectExport.download | def download(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Download the archive of a project export.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
reatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
Returns:
str: The blob content if streamed is False, None otherwise
"""
path = '/projects/%s/export/download' % self.project_id
result = self.manager.gitlab.http_get(path, streamed=streamed,
raw=True, **kwargs)
return utils.response_content(result, streamed, action, chunk_size) | python | def download(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Download the archive of a project export.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
reatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
Returns:
str: The blob content if streamed is False, None otherwise
"""
path = '/projects/%s/export/download' % self.project_id
result = self.manager.gitlab.http_get(path, streamed=streamed,
raw=True, **kwargs)
return utils.response_content(result, streamed, action, chunk_size) | ['def', 'download', '(', 'self', ',', 'streamed', '=', 'False', ',', 'action', '=', 'None', ',', 'chunk_size', '=', '1024', ',', '*', '*', 'kwargs', ')', ':', 'path', '=', "'/projects/%s/export/download'", '%', 'self', '.', 'project_id', 'result', '=', 'self', '.', 'manager', '.', 'gitlab', '.', 'http_get', '(', 'path', ',', 'streamed', '=', 'streamed', ',', 'raw', '=', 'True', ',', '*', '*', 'kwargs', ')', 'return', 'utils', '.', 'response_content', '(', 'result', ',', 'streamed', ',', 'action', ',', 'chunk_size', ')'] | Download the archive of a project export.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
reatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
Returns:
str: The blob content if streamed is False, None otherwise | ['Download', 'the', 'archive', 'of', 'a', 'project', 'export', '.'] | train | https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/v4/objects.py#L3212-L3234 |
8,731 | Robpol86/libnl | libnl/handlers.py | nl_msg_in_handler_debug | def nl_msg_in_handler_debug(msg, arg):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L114."""
ofd = arg or _LOGGER.debug
ofd('-- Debug: Received Message:')
nl_msg_dump(msg, ofd)
return NL_OK | python | def nl_msg_in_handler_debug(msg, arg):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L114."""
ofd = arg or _LOGGER.debug
ofd('-- Debug: Received Message:')
nl_msg_dump(msg, ofd)
return NL_OK | ['def', 'nl_msg_in_handler_debug', '(', 'msg', ',', 'arg', ')', ':', 'ofd', '=', 'arg', 'or', '_LOGGER', '.', 'debug', 'ofd', '(', "'-- Debug: Received Message:'", ')', 'nl_msg_dump', '(', 'msg', ',', 'ofd', ')', 'return', 'NL_OK'] | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L114. | ['https', ':', '//', 'github', '.', 'com', '/', 'thom311', '/', 'libnl', '/', 'blob', '/', 'libnl3_2_25', '/', 'lib', '/', 'handlers', '.', 'c#L114', '.'] | train | https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/handlers.py#L108-L113 |
8,732 | qubole/qds-sdk-py | qds_sdk/commands.py | Command.cancel_id | def cancel_id(cls, id):
"""
Cancels command denoted by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
data = {"status": "kill"}
return conn.put(cls.element_path(id), data) | python | def cancel_id(cls, id):
"""
Cancels command denoted by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
data = {"status": "kill"}
return conn.put(cls.element_path(id), data) | ['def', 'cancel_id', '(', 'cls', ',', 'id', ')', ':', 'conn', '=', 'Qubole', '.', 'agent', '(', ')', 'data', '=', '{', '"status"', ':', '"kill"', '}', 'return', 'conn', '.', 'put', '(', 'cls', '.', 'element_path', '(', 'id', ')', ',', 'data', ')'] | Cancels command denoted by this id
Args:
`id`: command id | ['Cancels', 'command', 'denoted', 'by', 'this', 'id'] | train | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/commands.py#L181-L190 |
8,733 | JdeRobot/base | src/libs/comm_py/comm/ice/sonarIceClient.py | Sonar.getSonarData | def getSonarData(self):
'''
Returns last LaserData.
@return last JdeRobotTypes LaserData saved
'''
if self.hasproxy():
self.lock.acquire()
sonar = self.sonar
self.lock.release()
return sonar
return None | python | def getSonarData(self):
'''
Returns last LaserData.
@return last JdeRobotTypes LaserData saved
'''
if self.hasproxy():
self.lock.acquire()
sonar = self.sonar
self.lock.release()
return sonar
return None | ['def', 'getSonarData', '(', 'self', ')', ':', 'if', 'self', '.', 'hasproxy', '(', ')', ':', 'self', '.', 'lock', '.', 'acquire', '(', ')', 'sonar', '=', 'self', '.', 'sonar', 'self', '.', 'lock', '.', 'release', '(', ')', 'return', 'sonar', 'return', 'None'] | Returns last LaserData.
@return last JdeRobotTypes LaserData saved | ['Returns', 'last', 'LaserData', '.'] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/libs/comm_py/comm/ice/sonarIceClient.py#L97-L110 |
8,734 | portfors-lab/sparkle | sparkle/run/protocol_runner.py | ProtocolRunner.set_comment | def set_comment(self, cellid, comment):
"""Saves the provided comment to the current dataset.
:param cellid: number of the current cell
:type cellid: int
:param comment: a message to add documentation to data
:type comment: str
"""
info = {'cellid': cellid, 'comment': comment}
self.datafile.set_metadata(self.current_dataset_name, info) | python | def set_comment(self, cellid, comment):
"""Saves the provided comment to the current dataset.
:param cellid: number of the current cell
:type cellid: int
:param comment: a message to add documentation to data
:type comment: str
"""
info = {'cellid': cellid, 'comment': comment}
self.datafile.set_metadata(self.current_dataset_name, info) | ['def', 'set_comment', '(', 'self', ',', 'cellid', ',', 'comment', ')', ':', 'info', '=', '{', "'cellid'", ':', 'cellid', ',', "'comment'", ':', 'comment', '}', 'self', '.', 'datafile', '.', 'set_metadata', '(', 'self', '.', 'current_dataset_name', ',', 'info', ')'] | Saves the provided comment to the current dataset.
:param cellid: number of the current cell
:type cellid: int
:param comment: a message to add documentation to data
:type comment: str | ['Saves', 'the', 'provided', 'comment', 'to', 'the', 'current', 'dataset', '.'] | train | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/protocol_runner.py#L82-L91 |
8,735 | pyviz/geoviews | geoviews/util.py | geom_dict_to_array_dict | def geom_dict_to_array_dict(geom_dict, coord_names=['Longitude', 'Latitude']):
"""
Converts a dictionary containing an geometry key to a dictionary
of x- and y-coordinate arrays and if present a list-of-lists of
hole array.
"""
x, y = coord_names
geom = geom_dict['geometry']
new_dict = {k: v for k, v in geom_dict.items() if k != 'geometry'}
array = geom_to_array(geom)
new_dict[x] = array[:, 0]
new_dict[y] = array[:, 1]
if geom.geom_type == 'Polygon':
holes = []
for interior in geom.interiors:
holes.append(geom_to_array(interior))
if holes:
new_dict['holes'] = [holes]
elif geom.geom_type == 'MultiPolygon':
outer_holes = []
for g in geom:
holes = []
for interior in g.interiors:
holes.append(geom_to_array(interior))
outer_holes.append(holes)
if any(hs for hs in outer_holes):
new_dict['holes'] = outer_holes
return new_dict | python | def geom_dict_to_array_dict(geom_dict, coord_names=['Longitude', 'Latitude']):
"""
Converts a dictionary containing an geometry key to a dictionary
of x- and y-coordinate arrays and if present a list-of-lists of
hole array.
"""
x, y = coord_names
geom = geom_dict['geometry']
new_dict = {k: v for k, v in geom_dict.items() if k != 'geometry'}
array = geom_to_array(geom)
new_dict[x] = array[:, 0]
new_dict[y] = array[:, 1]
if geom.geom_type == 'Polygon':
holes = []
for interior in geom.interiors:
holes.append(geom_to_array(interior))
if holes:
new_dict['holes'] = [holes]
elif geom.geom_type == 'MultiPolygon':
outer_holes = []
for g in geom:
holes = []
for interior in g.interiors:
holes.append(geom_to_array(interior))
outer_holes.append(holes)
if any(hs for hs in outer_holes):
new_dict['holes'] = outer_holes
return new_dict | ['def', 'geom_dict_to_array_dict', '(', 'geom_dict', ',', 'coord_names', '=', '[', "'Longitude'", ',', "'Latitude'", ']', ')', ':', 'x', ',', 'y', '=', 'coord_names', 'geom', '=', 'geom_dict', '[', "'geometry'", ']', 'new_dict', '=', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'geom_dict', '.', 'items', '(', ')', 'if', 'k', '!=', "'geometry'", '}', 'array', '=', 'geom_to_array', '(', 'geom', ')', 'new_dict', '[', 'x', ']', '=', 'array', '[', ':', ',', '0', ']', 'new_dict', '[', 'y', ']', '=', 'array', '[', ':', ',', '1', ']', 'if', 'geom', '.', 'geom_type', '==', "'Polygon'", ':', 'holes', '=', '[', ']', 'for', 'interior', 'in', 'geom', '.', 'interiors', ':', 'holes', '.', 'append', '(', 'geom_to_array', '(', 'interior', ')', ')', 'if', 'holes', ':', 'new_dict', '[', "'holes'", ']', '=', '[', 'holes', ']', 'elif', 'geom', '.', 'geom_type', '==', "'MultiPolygon'", ':', 'outer_holes', '=', '[', ']', 'for', 'g', 'in', 'geom', ':', 'holes', '=', '[', ']', 'for', 'interior', 'in', 'g', '.', 'interiors', ':', 'holes', '.', 'append', '(', 'geom_to_array', '(', 'interior', ')', ')', 'outer_holes', '.', 'append', '(', 'holes', ')', 'if', 'any', '(', 'hs', 'for', 'hs', 'in', 'outer_holes', ')', ':', 'new_dict', '[', "'holes'", ']', '=', 'outer_holes', 'return', 'new_dict'] | Converts a dictionary containing an geometry key to a dictionary
of x- and y-coordinate arrays and if present a list-of-lists of
hole array. | ['Converts', 'a', 'dictionary', 'containing', 'an', 'geometry', 'key', 'to', 'a', 'dictionary', 'of', 'x', '-', 'and', 'y', '-', 'coordinate', 'arrays', 'and', 'if', 'present', 'a', 'list', '-', 'of', '-', 'lists', 'of', 'hole', 'array', '.'] | train | https://github.com/pyviz/geoviews/blob/cc70ac2d5a96307769bc6192eaef8576c3d24b30/geoviews/util.py#L92-L119 |
8,736 | jeremymcrae/denovonear | denovonear/ensembl_requester.py | EnsemblRequest.get_chrom_for_transcript | def get_chrom_for_transcript(self, transcript_id, hgnc_id):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "application/json"}
self.attempt = 0
ext = "/overlap/id/{}?feature=gene".format(transcript_id)
r = self.ensembl_request(ext, headers)
for gene in json.loads(r):
if gene["external_name"] == hgnc_id:
return gene["seq_region_name"]
return None | python | def get_chrom_for_transcript(self, transcript_id, hgnc_id):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "application/json"}
self.attempt = 0
ext = "/overlap/id/{}?feature=gene".format(transcript_id)
r = self.ensembl_request(ext, headers)
for gene in json.loads(r):
if gene["external_name"] == hgnc_id:
return gene["seq_region_name"]
return None | ['def', 'get_chrom_for_transcript', '(', 'self', ',', 'transcript_id', ',', 'hgnc_id', ')', ':', 'headers', '=', '{', '"content-type"', ':', '"application/json"', '}', 'self', '.', 'attempt', '=', '0', 'ext', '=', '"/overlap/id/{}?feature=gene"', '.', 'format', '(', 'transcript_id', ')', 'r', '=', 'self', '.', 'ensembl_request', '(', 'ext', ',', 'headers', ')', 'for', 'gene', 'in', 'json', '.', 'loads', '(', 'r', ')', ':', 'if', 'gene', '[', '"external_name"', ']', '==', 'hgnc_id', ':', 'return', 'gene', '[', '"seq_region_name"', ']', 'return', 'None'] | obtain the sequence for a transcript from ensembl | ['obtain', 'the', 'sequence', 'for', 'a', 'transcript', 'from', 'ensembl'] | train | https://github.com/jeremymcrae/denovonear/blob/feaab0fc77e89d70b31e8092899e4f0e68bac9fe/denovonear/ensembl_requester.py#L308-L322 |
8,737 | jtwhite79/pyemu | pyemu/en.py | ParameterEnsemble.from_parfiles | def from_parfiles(cls,pst,parfile_names,real_names=None):
""" create a parameter ensemble from parfiles. Accepts parfiles with less than the
parameters in the control (get NaNs in the ensemble) or extra parameters in the
parfiles (get dropped)
Parameters:
pst : pyemu.Pst
parfile_names : list of str
par file names
real_names : str
optional list of realization names. If None, a single integer counter is used
Returns:
pyemu.ParameterEnsemble
"""
if isinstance(pst,str):
pst = pyemu.Pst(pst)
dfs = {}
if real_names is not None:
assert len(real_names) == len(parfile_names)
else:
real_names = np.arange(len(parfile_names))
for rname,pfile in zip(real_names,parfile_names):
assert os.path.exists(pfile), "ParameterEnsemble.read_parfiles() error: " + \
"file: {0} not found".format(pfile)
df = read_parfile(pfile)
#check for scale differences - I don't who is dumb enough
#to change scale between par files and pst...
diff = df.scale - pst.parameter_data.scale
if diff.apply(np.abs).sum() > 0.0:
warnings.warn("differences in scale detected, applying scale in par file",
PyemuWarning)
#df.loc[:,"parval1"] *= df.scale
dfs[rname] = df.parval1.values
df_all = pd.DataFrame(data=dfs).T
df_all.columns = df.index
if len(pst.par_names) != df_all.shape[1]:
#if len(pst.par_names) < df_all.shape[1]:
# raise Exception("pst is not compatible with par files")
pset = set(pst.par_names)
dset = set(df_all.columns)
diff = pset.difference(dset)
if len(diff) > 0:
warnings.warn("the following parameters are not in the par files (getting NaNs) :{0}".
format(','.join(diff)),PyemuWarning)
blank_df = pd.DataFrame(index=df_all.index,columns=diff)
df_all = pd.concat([df_all,blank_df],axis=1)
diff = dset.difference(pset)
if len(diff) > 0:
warnings.warn("the following par file parameters are not in the control (being dropped):{0}".
format(','.join(diff)),PyemuWarning)
df_all = df_all.loc[:, pst.par_names]
return ParameterEnsemble.from_dataframe(df=df_all,pst=pst) | python | def from_parfiles(cls,pst,parfile_names,real_names=None):
""" create a parameter ensemble from parfiles. Accepts parfiles with less than the
parameters in the control (get NaNs in the ensemble) or extra parameters in the
parfiles (get dropped)
Parameters:
pst : pyemu.Pst
parfile_names : list of str
par file names
real_names : str
optional list of realization names. If None, a single integer counter is used
Returns:
pyemu.ParameterEnsemble
"""
if isinstance(pst,str):
pst = pyemu.Pst(pst)
dfs = {}
if real_names is not None:
assert len(real_names) == len(parfile_names)
else:
real_names = np.arange(len(parfile_names))
for rname,pfile in zip(real_names,parfile_names):
assert os.path.exists(pfile), "ParameterEnsemble.read_parfiles() error: " + \
"file: {0} not found".format(pfile)
df = read_parfile(pfile)
#check for scale differences - I don't who is dumb enough
#to change scale between par files and pst...
diff = df.scale - pst.parameter_data.scale
if diff.apply(np.abs).sum() > 0.0:
warnings.warn("differences in scale detected, applying scale in par file",
PyemuWarning)
#df.loc[:,"parval1"] *= df.scale
dfs[rname] = df.parval1.values
df_all = pd.DataFrame(data=dfs).T
df_all.columns = df.index
if len(pst.par_names) != df_all.shape[1]:
#if len(pst.par_names) < df_all.shape[1]:
# raise Exception("pst is not compatible with par files")
pset = set(pst.par_names)
dset = set(df_all.columns)
diff = pset.difference(dset)
if len(diff) > 0:
warnings.warn("the following parameters are not in the par files (getting NaNs) :{0}".
format(','.join(diff)),PyemuWarning)
blank_df = pd.DataFrame(index=df_all.index,columns=diff)
df_all = pd.concat([df_all,blank_df],axis=1)
diff = dset.difference(pset)
if len(diff) > 0:
warnings.warn("the following par file parameters are not in the control (being dropped):{0}".
format(','.join(diff)),PyemuWarning)
df_all = df_all.loc[:, pst.par_names]
return ParameterEnsemble.from_dataframe(df=df_all,pst=pst) | ['def', 'from_parfiles', '(', 'cls', ',', 'pst', ',', 'parfile_names', ',', 'real_names', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'pst', ',', 'str', ')', ':', 'pst', '=', 'pyemu', '.', 'Pst', '(', 'pst', ')', 'dfs', '=', '{', '}', 'if', 'real_names', 'is', 'not', 'None', ':', 'assert', 'len', '(', 'real_names', ')', '==', 'len', '(', 'parfile_names', ')', 'else', ':', 'real_names', '=', 'np', '.', 'arange', '(', 'len', '(', 'parfile_names', ')', ')', 'for', 'rname', ',', 'pfile', 'in', 'zip', '(', 'real_names', ',', 'parfile_names', ')', ':', 'assert', 'os', '.', 'path', '.', 'exists', '(', 'pfile', ')', ',', '"ParameterEnsemble.read_parfiles() error: "', '+', '"file: {0} not found"', '.', 'format', '(', 'pfile', ')', 'df', '=', 'read_parfile', '(', 'pfile', ')', "#check for scale differences - I don't who is dumb enough", '#to change scale between par files and pst...', 'diff', '=', 'df', '.', 'scale', '-', 'pst', '.', 'parameter_data', '.', 'scale', 'if', 'diff', '.', 'apply', '(', 'np', '.', 'abs', ')', '.', 'sum', '(', ')', '>', '0.0', ':', 'warnings', '.', 'warn', '(', '"differences in scale detected, applying scale in par file"', ',', 'PyemuWarning', ')', '#df.loc[:,"parval1"] *= df.scale', 'dfs', '[', 'rname', ']', '=', 'df', '.', 'parval1', '.', 'values', 'df_all', '=', 'pd', '.', 'DataFrame', '(', 'data', '=', 'dfs', ')', '.', 'T', 'df_all', '.', 'columns', '=', 'df', '.', 'index', 'if', 'len', '(', 'pst', '.', 'par_names', ')', '!=', 'df_all', '.', 'shape', '[', '1', ']', ':', '#if len(pst.par_names) < df_all.shape[1]:', '# raise Exception("pst is not compatible with par files")', 'pset', '=', 'set', '(', 'pst', '.', 'par_names', ')', 'dset', '=', 'set', '(', 'df_all', '.', 'columns', ')', 'diff', '=', 'pset', '.', 'difference', '(', 'dset', ')', 'if', 'len', '(', 'diff', ')', '>', '0', ':', 'warnings', '.', 'warn', '(', '"the following parameters are not in the par files (getting NaNs) :{0}"', '.', 'format', '(', "','", '.', 'join', '(', 'diff', ')', ')', ',', 'PyemuWarning', ')', 'blank_df', '=', 'pd', '.', 'DataFrame', '(', 'index', '=', 'df_all', '.', 'index', ',', 'columns', '=', 'diff', ')', 'df_all', '=', 'pd', '.', 'concat', '(', '[', 'df_all', ',', 'blank_df', ']', ',', 'axis', '=', '1', ')', 'diff', '=', 'dset', '.', 'difference', '(', 'pset', ')', 'if', 'len', '(', 'diff', ')', '>', '0', ':', 'warnings', '.', 'warn', '(', '"the following par file parameters are not in the control (being dropped):{0}"', '.', 'format', '(', "','", '.', 'join', '(', 'diff', ')', ')', ',', 'PyemuWarning', ')', 'df_all', '=', 'df_all', '.', 'loc', '[', ':', ',', 'pst', '.', 'par_names', ']', 'return', 'ParameterEnsemble', '.', 'from_dataframe', '(', 'df', '=', 'df_all', ',', 'pst', '=', 'pst', ')'] | create a parameter ensemble from parfiles. Accepts parfiles with less than the
parameters in the control (get NaNs in the ensemble) or extra parameters in the
parfiles (get dropped)
Parameters:
pst : pyemu.Pst
parfile_names : list of str
par file names
real_names : str
optional list of realization names. If None, a single integer counter is used
Returns:
pyemu.ParameterEnsemble | ['create', 'a', 'parameter', 'ensemble', 'from', 'parfiles', '.', 'Accepts', 'parfiles', 'with', 'less', 'than', 'the', 'parameters', 'in', 'the', 'control', '(', 'get', 'NaNs', 'in', 'the', 'ensemble', ')', 'or', 'extra', 'parameters', 'in', 'the', 'parfiles', '(', 'get', 'dropped', ')'] | train | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/en.py#L1673-L1738 |
8,738 | sternoru/goscalecms | goscale/models.py | GoscaleCMSPlugin._format | def _format(self, posts):
""" This method is called by get_content() method"""
if posts.__class__ == Post:
# format a single post
return posts.dict()
formated_posts = []
for post in posts:
formated_posts.append(post.dict())
return formated_posts | python | def _format(self, posts):
""" This method is called by get_content() method"""
if posts.__class__ == Post:
# format a single post
return posts.dict()
formated_posts = []
for post in posts:
formated_posts.append(post.dict())
return formated_posts | ['def', '_format', '(', 'self', ',', 'posts', ')', ':', 'if', 'posts', '.', '__class__', '==', 'Post', ':', '# format a single post', 'return', 'posts', '.', 'dict', '(', ')', 'formated_posts', '=', '[', ']', 'for', 'post', 'in', 'posts', ':', 'formated_posts', '.', 'append', '(', 'post', '.', 'dict', '(', ')', ')', 'return', 'formated_posts'] | This method is called by get_content() method | ['This', 'method', 'is', 'called', 'by', 'get_content', '()', 'method'] | train | https://github.com/sternoru/goscalecms/blob/7eee50357c47ebdfe3e573a8b4be3b67892d229e/goscale/models.py#L282-L290 |
8,739 | LonamiWebs/Telethon | telethon/tl/custom/message.py | Message._set_buttons | def _set_buttons(self, chat, bot):
"""
Helper methods to set the buttons given the input sender and chat.
"""
if isinstance(self.reply_markup, (
types.ReplyInlineMarkup, types.ReplyKeyboardMarkup)):
self._buttons = [[
MessageButton(self._client, button, chat, bot, self.id)
for button in row.buttons
] for row in self.reply_markup.rows]
self._buttons_flat = [x for row in self._buttons for x in row] | python | def _set_buttons(self, chat, bot):
"""
Helper methods to set the buttons given the input sender and chat.
"""
if isinstance(self.reply_markup, (
types.ReplyInlineMarkup, types.ReplyKeyboardMarkup)):
self._buttons = [[
MessageButton(self._client, button, chat, bot, self.id)
for button in row.buttons
] for row in self.reply_markup.rows]
self._buttons_flat = [x for row in self._buttons for x in row] | ['def', '_set_buttons', '(', 'self', ',', 'chat', ',', 'bot', ')', ':', 'if', 'isinstance', '(', 'self', '.', 'reply_markup', ',', '(', 'types', '.', 'ReplyInlineMarkup', ',', 'types', '.', 'ReplyKeyboardMarkup', ')', ')', ':', 'self', '.', '_buttons', '=', '[', '[', 'MessageButton', '(', 'self', '.', '_client', ',', 'button', ',', 'chat', ',', 'bot', ',', 'self', '.', 'id', ')', 'for', 'button', 'in', 'row', '.', 'buttons', ']', 'for', 'row', 'in', 'self', '.', 'reply_markup', '.', 'rows', ']', 'self', '.', '_buttons_flat', '=', '[', 'x', 'for', 'row', 'in', 'self', '.', '_buttons', 'for', 'x', 'in', 'row', ']'] | Helper methods to set the buttons given the input sender and chat. | ['Helper', 'methods', 'to', 'set', 'the', 'buttons', 'given', 'the', 'input', 'sender', 'and', 'chat', '.'] | train | https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/message.py#L834-L844 |
8,740 | crytic/slither | slither/core/declarations/contract.py | Contract.get_functions_writing_to_variable | def get_functions_writing_to_variable(self, variable):
'''
Return the functions writting the variable
'''
return [f for f in self.functions if f.is_writing(variable)] | python | def get_functions_writing_to_variable(self, variable):
'''
Return the functions writting the variable
'''
return [f for f in self.functions if f.is_writing(variable)] | ['def', 'get_functions_writing_to_variable', '(', 'self', ',', 'variable', ')', ':', 'return', '[', 'f', 'for', 'f', 'in', 'self', '.', 'functions', 'if', 'f', '.', 'is_writing', '(', 'variable', ')', ']'] | Return the functions writting the variable | ['Return', 'the', 'functions', 'writting', 'the', 'variable'] | train | https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/core/declarations/contract.py#L345-L349 |
8,741 | AtteqCom/zsl | src/zsl/resource/model_resource.py | ModelResource._save_one | def _save_one(self, model, ctx):
"""
Saves the created instance.
"""
assert isinstance(ctx, ResourceQueryContext)
self._orm.add(model)
self._orm.flush() | python | def _save_one(self, model, ctx):
"""
Saves the created instance.
"""
assert isinstance(ctx, ResourceQueryContext)
self._orm.add(model)
self._orm.flush() | ['def', '_save_one', '(', 'self', ',', 'model', ',', 'ctx', ')', ':', 'assert', 'isinstance', '(', 'ctx', ',', 'ResourceQueryContext', ')', 'self', '.', '_orm', '.', 'add', '(', 'model', ')', 'self', '.', '_orm', '.', 'flush', '(', ')'] | Saves the created instance. | ['Saves', 'the', 'created', 'instance', '.'] | train | https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/resource/model_resource.py#L297-L304 |
8,742 | hadrianl/huobi | huobitrade/service.py | HBRestAPI.get_last_ticker | def get_last_ticker(self, symbol, _async=False):
"""
获取tradedetail
:param symbol
:return:
"""
params = {'symbol': symbol}
url = u.MARKET_URL + '/market/trade'
return http_get_request(url, params, _async=_async) | python | def get_last_ticker(self, symbol, _async=False):
"""
获取tradedetail
:param symbol
:return:
"""
params = {'symbol': symbol}
url = u.MARKET_URL + '/market/trade'
return http_get_request(url, params, _async=_async) | ['def', 'get_last_ticker', '(', 'self', ',', 'symbol', ',', '_async', '=', 'False', ')', ':', 'params', '=', '{', "'symbol'", ':', 'symbol', '}', 'url', '=', 'u', '.', 'MARKET_URL', '+', "'/market/trade'", 'return', 'http_get_request', '(', 'url', ',', 'params', ',', '_async', '=', '_async', ')'] | 获取tradedetail
:param symbol
:return: | ['获取tradedetail', ':', 'param', 'symbol', ':', 'return', ':'] | train | https://github.com/hadrianl/huobi/blob/bbfa2036703ee84a76d5d8e9f89c25fc8a55f2c7/huobitrade/service.py#L88-L97 |
8,743 | kata198/AdvancedHTMLParser | AdvancedHTMLParser/Tags.py | AdvancedTag.getElementById | def getElementById(self, _id):
'''
getElementById - Search children of this tag for a tag containing an id
@param _id - String of id
@return - AdvancedTag or None
'''
for child in self.children:
if child.getAttribute('id') == _id:
return child
found = child.getElementById(_id)
if found is not None:
return found
return None | python | def getElementById(self, _id):
'''
getElementById - Search children of this tag for a tag containing an id
@param _id - String of id
@return - AdvancedTag or None
'''
for child in self.children:
if child.getAttribute('id') == _id:
return child
found = child.getElementById(_id)
if found is not None:
return found
return None | ['def', 'getElementById', '(', 'self', ',', '_id', ')', ':', 'for', 'child', 'in', 'self', '.', 'children', ':', 'if', 'child', '.', 'getAttribute', '(', "'id'", ')', '==', '_id', ':', 'return', 'child', 'found', '=', 'child', '.', 'getElementById', '(', '_id', ')', 'if', 'found', 'is', 'not', 'None', ':', 'return', 'found', 'return', 'None'] | getElementById - Search children of this tag for a tag containing an id
@param _id - String of id
@return - AdvancedTag or None | ['getElementById', '-', 'Search', 'children', 'of', 'this', 'tag', 'for', 'a', 'tag', 'containing', 'an', 'id'] | train | https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Tags.py#L1781-L1795 |
8,744 | matthewdeanmartin/jiggle_version | jiggle_version/main.py | process_docopts | def process_docopts(test=None): # type: (Optional[Dict[str,Any]])->None
"""
Just process the command line options and commands
:return:
"""
if test:
arguments = test
else:
arguments = docopt(__doc__, version="Jiggle Version {0}".format(__version__))
logger.debug(arguments)
file_opener = FileOpener()
central_module_finder = CentralModuleFinder(file_opener)
if arguments["--module"]:
central_module = arguments["--module"]
elif arguments["--project"]:
# soon to be deprecated in favor of module/package
central_module = arguments["--project"]
else:
# infer it the best we can.
central_module = central_module_finder.find_central_module()
if arguments["--init"]:
force_init = arguments["--init"]
if force_init == "False":
force_init = False
if force_init == "True":
force_init = True
else:
force_init = False
if arguments["here"]:
# TODO: find better way to turn debugging on & off
# console_trace(logging.DEBUG)
module_finder = ModuleFinder(file_opener)
guess_src_dir = module_finder.extract_package_dir()
if not guess_src_dir:
guess_src_dir = ""
if not central_module:
# check if exists first?
central_module = "setup.py"
bump_version(
project=central_module, source=guess_src_dir, force_init=force_init
)
elif arguments["find"]:
# Only show errors. Rest of extraneous console output messes up this:
# jiggle_version find>version.txt
if arguments["--project"]:
central_module = arguments["--project"]
find_version(project=central_module, source="", force_init=force_init)
else:
if arguments["--project"]:
central_module = arguments["--project"]
bump_version(
project=arguments["--project"],
source=arguments["--source"],
force_init=force_init,
) | python | def process_docopts(test=None): # type: (Optional[Dict[str,Any]])->None
"""
Just process the command line options and commands
:return:
"""
if test:
arguments = test
else:
arguments = docopt(__doc__, version="Jiggle Version {0}".format(__version__))
logger.debug(arguments)
file_opener = FileOpener()
central_module_finder = CentralModuleFinder(file_opener)
if arguments["--module"]:
central_module = arguments["--module"]
elif arguments["--project"]:
# soon to be deprecated in favor of module/package
central_module = arguments["--project"]
else:
# infer it the best we can.
central_module = central_module_finder.find_central_module()
if arguments["--init"]:
force_init = arguments["--init"]
if force_init == "False":
force_init = False
if force_init == "True":
force_init = True
else:
force_init = False
if arguments["here"]:
# TODO: find better way to turn debugging on & off
# console_trace(logging.DEBUG)
module_finder = ModuleFinder(file_opener)
guess_src_dir = module_finder.extract_package_dir()
if not guess_src_dir:
guess_src_dir = ""
if not central_module:
# check if exists first?
central_module = "setup.py"
bump_version(
project=central_module, source=guess_src_dir, force_init=force_init
)
elif arguments["find"]:
# Only show errors. Rest of extraneous console output messes up this:
# jiggle_version find>version.txt
if arguments["--project"]:
central_module = arguments["--project"]
find_version(project=central_module, source="", force_init=force_init)
else:
if arguments["--project"]:
central_module = arguments["--project"]
bump_version(
project=arguments["--project"],
source=arguments["--source"],
force_init=force_init,
) | ['def', 'process_docopts', '(', 'test', '=', 'None', ')', ':', '# type: (Optional[Dict[str,Any]])->None', 'if', 'test', ':', 'arguments', '=', 'test', 'else', ':', 'arguments', '=', 'docopt', '(', '__doc__', ',', 'version', '=', '"Jiggle Version {0}"', '.', 'format', '(', '__version__', ')', ')', 'logger', '.', 'debug', '(', 'arguments', ')', 'file_opener', '=', 'FileOpener', '(', ')', 'central_module_finder', '=', 'CentralModuleFinder', '(', 'file_opener', ')', 'if', 'arguments', '[', '"--module"', ']', ':', 'central_module', '=', 'arguments', '[', '"--module"', ']', 'elif', 'arguments', '[', '"--project"', ']', ':', '# soon to be deprecated in favor of module/package', 'central_module', '=', 'arguments', '[', '"--project"', ']', 'else', ':', '# infer it the best we can.', 'central_module', '=', 'central_module_finder', '.', 'find_central_module', '(', ')', 'if', 'arguments', '[', '"--init"', ']', ':', 'force_init', '=', 'arguments', '[', '"--init"', ']', 'if', 'force_init', '==', '"False"', ':', 'force_init', '=', 'False', 'if', 'force_init', '==', '"True"', ':', 'force_init', '=', 'True', 'else', ':', 'force_init', '=', 'False', 'if', 'arguments', '[', '"here"', ']', ':', '# TODO: find better way to turn debugging on & off', '# console_trace(logging.DEBUG)', 'module_finder', '=', 'ModuleFinder', '(', 'file_opener', ')', 'guess_src_dir', '=', 'module_finder', '.', 'extract_package_dir', '(', ')', 'if', 'not', 'guess_src_dir', ':', 'guess_src_dir', '=', '""', 'if', 'not', 'central_module', ':', '# check if exists first?', 'central_module', '=', '"setup.py"', 'bump_version', '(', 'project', '=', 'central_module', ',', 'source', '=', 'guess_src_dir', ',', 'force_init', '=', 'force_init', ')', 'elif', 'arguments', '[', '"find"', ']', ':', '# Only show errors. Rest of extraneous console output messes up this:', '# jiggle_version find>version.txt', 'if', 'arguments', '[', '"--project"', ']', ':', 'central_module', '=', 'arguments', '[', '"--project"', ']', 'find_version', '(', 'project', '=', 'central_module', ',', 'source', '=', '""', ',', 'force_init', '=', 'force_init', ')', 'else', ':', 'if', 'arguments', '[', '"--project"', ']', ':', 'central_module', '=', 'arguments', '[', '"--project"', ']', 'bump_version', '(', 'project', '=', 'arguments', '[', '"--project"', ']', ',', 'source', '=', 'arguments', '[', '"--source"', ']', ',', 'force_init', '=', 'force_init', ',', ')'] | Just process the command line options and commands
:return: | ['Just', 'process', 'the', 'command', 'line', 'options', 'and', 'commands', ':', 'return', ':'] | train | https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/main.py#L86-L150 |
8,745 | ttinies/sc2players | sc2players/playerRecord.py | PlayerRecord.apmAggregate | def apmAggregate(self, **criteria):
"""collect all match history's apm data to report player's calculated MMR"""
apms = [m.apm(self) for m in self.matchSubset(**criteria)]
if not apms: return 0 # no apm information without match history
return sum(apms) / len(apms) | python | def apmAggregate(self, **criteria):
"""collect all match history's apm data to report player's calculated MMR"""
apms = [m.apm(self) for m in self.matchSubset(**criteria)]
if not apms: return 0 # no apm information without match history
return sum(apms) / len(apms) | ['def', 'apmAggregate', '(', 'self', ',', '*', '*', 'criteria', ')', ':', 'apms', '=', '[', 'm', '.', 'apm', '(', 'self', ')', 'for', 'm', 'in', 'self', '.', 'matchSubset', '(', '*', '*', 'criteria', ')', ']', 'if', 'not', 'apms', ':', 'return', '0', '# no apm information without match history', 'return', 'sum', '(', 'apms', ')', '/', 'len', '(', 'apms', ')'] | collect all match history's apm data to report player's calculated MMR | ['collect', 'all', 'match', 'history', 's', 'apm', 'data', 'to', 'report', 'player', 's', 'calculated', 'MMR'] | train | https://github.com/ttinies/sc2players/blob/fd9b37c268bf1005d9ef73a25e65ed97c8b7895f/sc2players/playerRecord.py#L229-L233 |
8,746 | PrefPy/prefpy | prefpy/preference.py | Preference.getOrderVectorEGMM | def getOrderVectorEGMM(self):
"""
Returns a list of lists. Each list represents tiers of candidates. candidates in earlier
tiers are preferred to candidates appearing in later tiers. Candidates in the same tier
are preferred equally.
"""
# We sort the candidates based on the number of incoming edges they have in the graph. If
# two candidates have the same number, we assume that they are tied.
incEdgesMap = self.getIncEdgesMap()
sortedKeys = sorted(incEdgesMap.keys())
orderVector = []
# print("sortedKeys",sortedKeys)
# print("incEdgesMap", incEdgesMap)
m = 0
for key in sortedKeys:
m += len(incEdgesMap[key])
result = [0] * m
for k in range(0, len(sortedKeys)):
key = sortedKeys[k]
cands = incEdgesMap[key]
# print("qq",cands)
for cand in cands:
result[cand] = len(sortedKeys) - (k + 1)
return result | python | def getOrderVectorEGMM(self):
"""
Returns a list of lists. Each list represents tiers of candidates. candidates in earlier
tiers are preferred to candidates appearing in later tiers. Candidates in the same tier
are preferred equally.
"""
# We sort the candidates based on the number of incoming edges they have in the graph. If
# two candidates have the same number, we assume that they are tied.
incEdgesMap = self.getIncEdgesMap()
sortedKeys = sorted(incEdgesMap.keys())
orderVector = []
# print("sortedKeys",sortedKeys)
# print("incEdgesMap", incEdgesMap)
m = 0
for key in sortedKeys:
m += len(incEdgesMap[key])
result = [0] * m
for k in range(0, len(sortedKeys)):
key = sortedKeys[k]
cands = incEdgesMap[key]
# print("qq",cands)
for cand in cands:
result[cand] = len(sortedKeys) - (k + 1)
return result | ['def', 'getOrderVectorEGMM', '(', 'self', ')', ':', '# We sort the candidates based on the number of incoming edges they have in the graph. If ', '# two candidates have the same number, we assume that they are tied.', 'incEdgesMap', '=', 'self', '.', 'getIncEdgesMap', '(', ')', 'sortedKeys', '=', 'sorted', '(', 'incEdgesMap', '.', 'keys', '(', ')', ')', 'orderVector', '=', '[', ']', '# print("sortedKeys",sortedKeys)', '# print("incEdgesMap", incEdgesMap)', 'm', '=', '0', 'for', 'key', 'in', 'sortedKeys', ':', 'm', '+=', 'len', '(', 'incEdgesMap', '[', 'key', ']', ')', 'result', '=', '[', '0', ']', '*', 'm', 'for', 'k', 'in', 'range', '(', '0', ',', 'len', '(', 'sortedKeys', ')', ')', ':', 'key', '=', 'sortedKeys', '[', 'k', ']', 'cands', '=', 'incEdgesMap', '[', 'key', ']', '# print("qq",cands)', 'for', 'cand', 'in', 'cands', ':', 'result', '[', 'cand', ']', '=', 'len', '(', 'sortedKeys', ')', '-', '(', 'k', '+', '1', ')', 'return', 'result'] | Returns a list of lists. Each list represents tiers of candidates. candidates in earlier
tiers are preferred to candidates appearing in later tiers. Candidates in the same tier
are preferred equally. | ['Returns', 'a', 'list', 'of', 'lists', '.', 'Each', 'list', 'represents', 'tiers', 'of', 'candidates', '.', 'candidates', 'in', 'earlier', 'tiers', 'are', 'preferred', 'to', 'candidates', 'appearing', 'in', 'later', 'tiers', '.', 'Candidates', 'in', 'the', 'same', 'tier', 'are', 'preferred', 'equally', '.'] | train | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/preference.py#L139-L163 |
8,747 | tomplus/kubernetes_asyncio | kubernetes_asyncio/client/api/extensions_v1beta1_api.py | ExtensionsV1beta1Api.read_namespaced_replica_set | def read_namespaced_replica_set(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_replica_set # noqa: E501
read the specified ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | python | def read_namespaced_replica_set(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_replica_set # noqa: E501
read the specified ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1ReplicaSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | ['def', 'read_namespaced_replica_set', '(', 'self', ',', 'name', ',', 'namespace', ',', '*', '*', 'kwargs', ')', ':', '# noqa: E501', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async_req'", ')', ':', 'return', 'self', '.', 'read_namespaced_replica_set_with_http_info', '(', 'name', ',', 'namespace', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'read_namespaced_replica_set_with_http_info', '(', 'name', ',', 'namespace', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'return', 'data'] | read_namespaced_replica_set # noqa: E501
read the specified ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicaSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1ReplicaSet
If the method is called asynchronously,
returns the request thread. | ['read_namespaced_replica_set', '#', 'noqa', ':', 'E501'] | train | https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/extensions_v1beta1_api.py#L6383-L6407 |
8,748 | fermiPy/fermipy | fermipy/gtanalysis.py | GTAnalysis.free_source | def free_source(self, name, free=True, pars=None, **kwargs):
"""Free/Fix parameters of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for this source. If
none then all source parameters will be freed/fixed with the
exception of those defined in the skip_pars list.
"""
free_pars = self.get_free_param_vector()
loglevel = kwargs.pop('loglevel', self.loglevel)
# Find the source
src = self.roi.get_source_by_name(name)
name = src.name
if pars is None or (isinstance(pars, list) and not pars):
pars = []
pars += norm_parameters.get(src['SpectrumType'], [])
pars += shape_parameters.get(src['SpectrumType'], [])
elif pars == 'norm':
pars = []
pars += norm_parameters.get(src['SpectrumType'], [])
elif pars == 'shape':
pars = []
pars += shape_parameters.get(src['SpectrumType'], [])
elif isinstance(pars, list):
pass
else:
raise Exception('Invalid parameter list.')
# Remove locked parameters
lck_params = self._lck_params.get(name, [])
pars = [p for p in pars if p not in lck_params]
# Deduce here the names of all parameters from the spectral type
src_par_names = pyLike.StringVector()
self.like[name].src.spectrum().getParamNames(src_par_names)
par_indices = []
par_names = []
for p in src_par_names:
if pars is not None and p not in pars:
continue
idx = self.like.par_index(name, p)
if free == free_pars[idx]:
continue
par_indices.append(idx)
par_names.append(p)
if len(par_names) == 0:
return
if free:
self.logger.log(loglevel, 'Freeing parameters for %-22s: %s',
name, par_names)
else:
self.logger.log(loglevel, 'Fixing parameters for %-22s: %s',
name, par_names)
for (idx, par_name) in zip(par_indices, par_names):
self.like[idx].setFree(free)
self._sync_params_state(name) | python | def free_source(self, name, free=True, pars=None, **kwargs):
"""Free/Fix parameters of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for this source. If
none then all source parameters will be freed/fixed with the
exception of those defined in the skip_pars list.
"""
free_pars = self.get_free_param_vector()
loglevel = kwargs.pop('loglevel', self.loglevel)
# Find the source
src = self.roi.get_source_by_name(name)
name = src.name
if pars is None or (isinstance(pars, list) and not pars):
pars = []
pars += norm_parameters.get(src['SpectrumType'], [])
pars += shape_parameters.get(src['SpectrumType'], [])
elif pars == 'norm':
pars = []
pars += norm_parameters.get(src['SpectrumType'], [])
elif pars == 'shape':
pars = []
pars += shape_parameters.get(src['SpectrumType'], [])
elif isinstance(pars, list):
pass
else:
raise Exception('Invalid parameter list.')
# Remove locked parameters
lck_params = self._lck_params.get(name, [])
pars = [p for p in pars if p not in lck_params]
# Deduce here the names of all parameters from the spectral type
src_par_names = pyLike.StringVector()
self.like[name].src.spectrum().getParamNames(src_par_names)
par_indices = []
par_names = []
for p in src_par_names:
if pars is not None and p not in pars:
continue
idx = self.like.par_index(name, p)
if free == free_pars[idx]:
continue
par_indices.append(idx)
par_names.append(p)
if len(par_names) == 0:
return
if free:
self.logger.log(loglevel, 'Freeing parameters for %-22s: %s',
name, par_names)
else:
self.logger.log(loglevel, 'Fixing parameters for %-22s: %s',
name, par_names)
for (idx, par_name) in zip(par_indices, par_names):
self.like[idx].setFree(free)
self._sync_params_state(name) | ['def', 'free_source', '(', 'self', ',', 'name', ',', 'free', '=', 'True', ',', 'pars', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'free_pars', '=', 'self', '.', 'get_free_param_vector', '(', ')', 'loglevel', '=', 'kwargs', '.', 'pop', '(', "'loglevel'", ',', 'self', '.', 'loglevel', ')', '# Find the source', 'src', '=', 'self', '.', 'roi', '.', 'get_source_by_name', '(', 'name', ')', 'name', '=', 'src', '.', 'name', 'if', 'pars', 'is', 'None', 'or', '(', 'isinstance', '(', 'pars', ',', 'list', ')', 'and', 'not', 'pars', ')', ':', 'pars', '=', '[', ']', 'pars', '+=', 'norm_parameters', '.', 'get', '(', 'src', '[', "'SpectrumType'", ']', ',', '[', ']', ')', 'pars', '+=', 'shape_parameters', '.', 'get', '(', 'src', '[', "'SpectrumType'", ']', ',', '[', ']', ')', 'elif', 'pars', '==', "'norm'", ':', 'pars', '=', '[', ']', 'pars', '+=', 'norm_parameters', '.', 'get', '(', 'src', '[', "'SpectrumType'", ']', ',', '[', ']', ')', 'elif', 'pars', '==', "'shape'", ':', 'pars', '=', '[', ']', 'pars', '+=', 'shape_parameters', '.', 'get', '(', 'src', '[', "'SpectrumType'", ']', ',', '[', ']', ')', 'elif', 'isinstance', '(', 'pars', ',', 'list', ')', ':', 'pass', 'else', ':', 'raise', 'Exception', '(', "'Invalid parameter list.'", ')', '# Remove locked parameters', 'lck_params', '=', 'self', '.', '_lck_params', '.', 'get', '(', 'name', ',', '[', ']', ')', 'pars', '=', '[', 'p', 'for', 'p', 'in', 'pars', 'if', 'p', 'not', 'in', 'lck_params', ']', '# Deduce here the names of all parameters from the spectral type', 'src_par_names', '=', 'pyLike', '.', 'StringVector', '(', ')', 'self', '.', 'like', '[', 'name', ']', '.', 'src', '.', 'spectrum', '(', ')', '.', 'getParamNames', '(', 'src_par_names', ')', 'par_indices', '=', '[', ']', 'par_names', '=', '[', ']', 'for', 'p', 'in', 'src_par_names', ':', 'if', 'pars', 'is', 'not', 'None', 'and', 'p', 'not', 'in', 'pars', ':', 'continue', 'idx', '=', 'self', '.', 'like', '.', 'par_index', '(', 'name', ',', 'p', ')', 'if', 'free', '==', 'free_pars', '[', 'idx', ']', ':', 'continue', 'par_indices', '.', 'append', '(', 'idx', ')', 'par_names', '.', 'append', '(', 'p', ')', 'if', 'len', '(', 'par_names', ')', '==', '0', ':', 'return', 'if', 'free', ':', 'self', '.', 'logger', '.', 'log', '(', 'loglevel', ',', "'Freeing parameters for %-22s: %s'", ',', 'name', ',', 'par_names', ')', 'else', ':', 'self', '.', 'logger', '.', 'log', '(', 'loglevel', ',', "'Fixing parameters for %-22s: %s'", ',', 'name', ',', 'par_names', ')', 'for', '(', 'idx', ',', 'par_name', ')', 'in', 'zip', '(', 'par_indices', ',', 'par_names', ')', ':', 'self', '.', 'like', '[', 'idx', ']', '.', 'setFree', '(', 'free', ')', 'self', '.', '_sync_params_state', '(', 'name', ')'] | Free/Fix parameters of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for this source. If
none then all source parameters will be freed/fixed with the
exception of those defined in the skip_pars list. | ['Free', '/', 'Fix', 'parameters', 'of', 'a', 'source', '.'] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L1906-L1982 |
8,749 | tasdikrahman/vocabulary | vocabulary/vocabulary.py | Vocabulary.antonym | def antonym(phrase, format="json"):
"""
queries the bighugelabs API for the antonym. The results include
- "syn" (synonym)
- "ant" (antonym)
- "rel" (related terms)
- "sim" (similar terms)
- "usr" (user suggestions)
But currently parsing only the antonym as I have already done
- synonym (using glosbe API)
:param phrase: word for which antonym is to be found
:param format: response structure type. Defaults to: "json"
:returns: returns a json object
:raises KeyError: returns False when no antonyms are found
"""
base_url = Vocabulary.__get_api_link("bighugelabs")
url = base_url.format(word=phrase)
json_obj = Vocabulary.__return_json(url)
if not json_obj:
return False
result = []
visited = {}
idx = 0
for key in json_obj.keys():
antonyms = json_obj[key].get('ant', False)
if not antonyms:
continue
for antonym in antonyms:
if visited.get(antonym, False):
continue
result.append({'seq': idx, 'text': antonym})
idx += 1
visited[antonym] = True
if not result:
return False
return Response().respond(result, format) | python | def antonym(phrase, format="json"):
"""
queries the bighugelabs API for the antonym. The results include
- "syn" (synonym)
- "ant" (antonym)
- "rel" (related terms)
- "sim" (similar terms)
- "usr" (user suggestions)
But currently parsing only the antonym as I have already done
- synonym (using glosbe API)
:param phrase: word for which antonym is to be found
:param format: response structure type. Defaults to: "json"
:returns: returns a json object
:raises KeyError: returns False when no antonyms are found
"""
base_url = Vocabulary.__get_api_link("bighugelabs")
url = base_url.format(word=phrase)
json_obj = Vocabulary.__return_json(url)
if not json_obj:
return False
result = []
visited = {}
idx = 0
for key in json_obj.keys():
antonyms = json_obj[key].get('ant', False)
if not antonyms:
continue
for antonym in antonyms:
if visited.get(antonym, False):
continue
result.append({'seq': idx, 'text': antonym})
idx += 1
visited[antonym] = True
if not result:
return False
return Response().respond(result, format) | ['def', 'antonym', '(', 'phrase', ',', 'format', '=', '"json"', ')', ':', 'base_url', '=', 'Vocabulary', '.', '__get_api_link', '(', '"bighugelabs"', ')', 'url', '=', 'base_url', '.', 'format', '(', 'word', '=', 'phrase', ')', 'json_obj', '=', 'Vocabulary', '.', '__return_json', '(', 'url', ')', 'if', 'not', 'json_obj', ':', 'return', 'False', 'result', '=', '[', ']', 'visited', '=', '{', '}', 'idx', '=', '0', 'for', 'key', 'in', 'json_obj', '.', 'keys', '(', ')', ':', 'antonyms', '=', 'json_obj', '[', 'key', ']', '.', 'get', '(', "'ant'", ',', 'False', ')', 'if', 'not', 'antonyms', ':', 'continue', 'for', 'antonym', 'in', 'antonyms', ':', 'if', 'visited', '.', 'get', '(', 'antonym', ',', 'False', ')', ':', 'continue', 'result', '.', 'append', '(', '{', "'seq'", ':', 'idx', ',', "'text'", ':', 'antonym', '}', ')', 'idx', '+=', '1', 'visited', '[', 'antonym', ']', '=', 'True', 'if', 'not', 'result', ':', 'return', 'False', 'return', 'Response', '(', ')', '.', 'respond', '(', 'result', ',', 'format', ')'] | queries the bighugelabs API for the antonym. The results include
- "syn" (synonym)
- "ant" (antonym)
- "rel" (related terms)
- "sim" (similar terms)
- "usr" (user suggestions)
But currently parsing only the antonym as I have already done
- synonym (using glosbe API)
:param phrase: word for which antonym is to be found
:param format: response structure type. Defaults to: "json"
:returns: returns a json object
:raises KeyError: returns False when no antonyms are found | ['queries', 'the', 'bighugelabs', 'API', 'for', 'the', 'antonym', '.', 'The', 'results', 'include', '-', 'syn', '(', 'synonym', ')', '-', 'ant', '(', 'antonym', ')', '-', 'rel', '(', 'related', 'terms', ')', '-', 'sim', '(', 'similar', 'terms', ')', '-', 'usr', '(', 'user', 'suggestions', ')'] | train | https://github.com/tasdikrahman/vocabulary/blob/54403c5981af25dc3457796b57048ae27f09e9be/vocabulary/vocabulary.py#L258-L301 |
8,750 | koszullab/instaGRAAL | instagraal/simu_single.py | simulation.modify_sub_vect_frags | def modify_sub_vect_frags(self):
"include repeated frags"
modified_vect_frags = dict()
init_vect_frags = self.sub_level.S_o_A_frags
# init_max_id_d = init_vect_frags["id"].max()
max_id_F = len(init_vect_frags["id"])
max_id_C = init_vect_frags["id_c"].max() + 1
# HSV_tuples = [(x*1.0/(max_id_C - 1), 0.5, 0.5) for x in range(0,
# (max_id_C-1))]
# cmap = plt.cm.gist_ncar
cmap = plt.cm.prism
# extract all colors from the .jet map
cmaplist = [cmap(i) for i in range(cmap.N)]
id_smple = np.linspace(0, cmap.N, num=max_id_C)
RGB_tuples = []
for i in range(0, max_id_C - 1):
RGB_tuples.append(cmaplist[int(id_smple[i])])
# RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)
self.init_n_sub_frags = len(init_vect_frags["id"])
modified_vect_frags["pos"] = list(init_vect_frags["pos"])
modified_vect_frags["sub_pos"] = list(init_vect_frags["sub_pos"])
modified_vect_frags["id_c"] = list(init_vect_frags["id_c"])
modified_vect_frags["start_bp"] = list(init_vect_frags["start_bp"])
modified_vect_frags["len_bp"] = list(init_vect_frags["len_bp"])
modified_vect_frags["sub_len"] = list(init_vect_frags["sub_len"])
modified_vect_frags["circ"] = list(init_vect_frags["circ"])
modified_vect_frags["id"] = list(init_vect_frags["id"])
modified_vect_frags["prev"] = list(init_vect_frags["prev"])
modified_vect_frags["next"] = list(init_vect_frags["next"])
modified_vect_frags["l_cont"] = list(init_vect_frags["l_cont"])
modified_vect_frags["sub_l_cont"] = list(init_vect_frags["sub_l_cont"])
modified_vect_frags["l_cont_bp"] = list(init_vect_frags["l_cont_bp"])
modified_vect_frags["n_accu"] = list(init_vect_frags["n_accu"])
modified_vect_frags["rep"] = list(np.zeros(max_id_F, dtype=np.int32))
modified_vect_frags["activ"] = list(np.ones(max_id_F, dtype=np.int32))
modified_vect_frags["id_d"] = list(init_vect_frags["id"])
# WARNING IMPLICT BREAKING OF THE CONTIGS
for data_dup in self.sub_candidates_output_data:
n_dup = int(data_dup[1])
id_f = data_dup[0]
for k in range(0, n_dup):
modified_vect_frags["pos"].append(0)
modified_vect_frags["sub_pos"].append(0)
modified_vect_frags["id_c"].append(max_id_C)
modified_vect_frags["start_bp"].append(0)
modified_vect_frags["len_bp"].append(
init_vect_frags["len_bp"][id_f]
)
modified_vect_frags["sub_len"].append(
init_vect_frags["sub_len"][id_f]
)
modified_vect_frags["circ"].append(
init_vect_frags["circ"][id_f]
)
modified_vect_frags["id"].append(max_id_F)
modified_vect_frags["prev"].append(-1)
modified_vect_frags["next"].append(-1)
modified_vect_frags["l_cont"].append(1)
modified_vect_frags["sub_l_cont"].append(
init_vect_frags["sub_len"][id_f]
)
modified_vect_frags["l_cont_bp"].append(
init_vect_frags["len_bp"][id_f]
)
modified_vect_frags["n_accu"].append(
init_vect_frags["n_accu"][id_f]
)
modified_vect_frags["rep"].append(1)
modified_vect_frags["activ"].append(1)
modified_vect_frags["id_d"].append(init_vect_frags["id"][id_f])
max_id_F += 1
max_id_C += 1
logger.info("MAX ID CONTIG = {}".format(max_id_C))
modified_vect_frags["pos"] = np.array(
modified_vect_frags["pos"], dtype=np.int32
)
modified_vect_frags["sub_pos"] = np.array(
modified_vect_frags["sub_pos"], dtype=np.int32
)
modified_vect_frags["id_c"] = np.array(
modified_vect_frags["id_c"], dtype=np.int32
)
modified_vect_frags["start_bp"] = np.array(
modified_vect_frags["start_bp"], dtype=np.int32
)
modified_vect_frags["len_bp"] = np.array(
modified_vect_frags["len_bp"], dtype=np.int32
)
modified_vect_frags["sub_len"] = np.array(
modified_vect_frags["sub_len"], dtype=np.int32
)
modified_vect_frags["circ"] = np.array(
modified_vect_frags["circ"], dtype=np.int32
)
modified_vect_frags["id"] = np.array(
modified_vect_frags["id"], dtype=np.int32
)
modified_vect_frags["prev"] = np.array(
modified_vect_frags["prev"], dtype=np.int32
)
modified_vect_frags["next"] = np.array(
modified_vect_frags["next"], dtype=np.int32
)
modified_vect_frags["l_cont"] = np.array(
modified_vect_frags["l_cont"], dtype=np.int32
)
modified_vect_frags["sub_l_cont"] = np.array(
modified_vect_frags["sub_l_cont"], dtype=np.int32
)
modified_vect_frags["l_cont_bp"] = np.array(
modified_vect_frags["l_cont_bp"], dtype=np.int32
)
modified_vect_frags["n_accu"] = np.array(
modified_vect_frags["n_accu"], dtype=np.int32
)
modified_vect_frags["rep"] = np.array(
modified_vect_frags["rep"], dtype=np.int32
)
modified_vect_frags["activ"] = np.array(
modified_vect_frags["activ"], dtype=np.int32
)
modified_vect_frags["id_d"] = np.array(
modified_vect_frags["id_d"], dtype=np.int32
)
id_x = 0
collector_id_repeats = []
frag_dispatcher = []
for id_f in range(0, self.init_n_sub_frags):
if id_f in self.sub_candidates_dup:
id_start = id_x
id_dup = np.nonzero(modified_vect_frags["id_d"] == id_f)[0]
collector_id_repeats.extend(list(id_dup))
n_rep = len(id_dup)
frag_dispatcher.append(
(np.int32(id_start), np.int32(id_start + n_rep))
)
id_x += n_rep
else:
id_start = id_x
n_rep = 1
frag_dispatcher.append(
(np.int32(id_start), np.int32(id_start + n_rep))
)
collector_id_repeats.append(id_f)
id_x += 1
self.sub_collector_id_repeats = np.array(
collector_id_repeats, dtype=np.int32
)
self.sub_frag_dispatcher = np.array(frag_dispatcher, dtype=self.int2)
self.sub_n_frags = len(modified_vect_frags["id"])
# pos_vect_frags_4_GL = np.ndarray((self.n_frags, 4), dtype=np.float32)
# col_vect_frags_4_GL = np.ndarray((self.n_frags, 4), dtype=np.float32)
#
# for id_f_curr in xrange(0 , self.sub_n_frags):
# id_d = modified_vect_frags['id_d'][id_f_curr]
# id_c = init_vect_frags['id_c'][id_d]
# pos_vect_frags_4_GL[id_f_curr, 0] =
# modified_vect_frags['pos'][id_f_curr]
# pos_vect_frags_4_GL[id_f_curr, 1] =
# modified_vect_frags['id_c'][id_f_curr]
# pos_vect_frags_4_GL[id_f_curr, 2] = 0.
# pos_vect_frags_4_GL[id_f_curr, 3] = np.float32(1.0)
#
# col_vect_frags_4_GL[id_f_curr, 0] =
# np.float32(RGB_tuples[id_c - 1][0])
# col_vect_frags_4_GL[id_f_curr, 1] =
# np.float32(RGB_tuples[id_c - 1][1])
# col_vect_frags_4_GL[id_f_curr, 2] =
# np.float32(RGB_tuples[id_c - 1][2])
# col_vect_frags_4_GL[id_f_curr, 3] =
# np.float32(1.0)
#
# self.sub_col_vect_frags_4_GL = col_vect_frags_4_GL
# self.sub_pos_vect_frags_4_GL = pos_vect_frags_4_GL
self.new_sub_S_o_A_frags = modified_vect_frags | python | def modify_sub_vect_frags(self):
"include repeated frags"
modified_vect_frags = dict()
init_vect_frags = self.sub_level.S_o_A_frags
# init_max_id_d = init_vect_frags["id"].max()
max_id_F = len(init_vect_frags["id"])
max_id_C = init_vect_frags["id_c"].max() + 1
# HSV_tuples = [(x*1.0/(max_id_C - 1), 0.5, 0.5) for x in range(0,
# (max_id_C-1))]
# cmap = plt.cm.gist_ncar
cmap = plt.cm.prism
# extract all colors from the .jet map
cmaplist = [cmap(i) for i in range(cmap.N)]
id_smple = np.linspace(0, cmap.N, num=max_id_C)
RGB_tuples = []
for i in range(0, max_id_C - 1):
RGB_tuples.append(cmaplist[int(id_smple[i])])
# RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)
self.init_n_sub_frags = len(init_vect_frags["id"])
modified_vect_frags["pos"] = list(init_vect_frags["pos"])
modified_vect_frags["sub_pos"] = list(init_vect_frags["sub_pos"])
modified_vect_frags["id_c"] = list(init_vect_frags["id_c"])
modified_vect_frags["start_bp"] = list(init_vect_frags["start_bp"])
modified_vect_frags["len_bp"] = list(init_vect_frags["len_bp"])
modified_vect_frags["sub_len"] = list(init_vect_frags["sub_len"])
modified_vect_frags["circ"] = list(init_vect_frags["circ"])
modified_vect_frags["id"] = list(init_vect_frags["id"])
modified_vect_frags["prev"] = list(init_vect_frags["prev"])
modified_vect_frags["next"] = list(init_vect_frags["next"])
modified_vect_frags["l_cont"] = list(init_vect_frags["l_cont"])
modified_vect_frags["sub_l_cont"] = list(init_vect_frags["sub_l_cont"])
modified_vect_frags["l_cont_bp"] = list(init_vect_frags["l_cont_bp"])
modified_vect_frags["n_accu"] = list(init_vect_frags["n_accu"])
modified_vect_frags["rep"] = list(np.zeros(max_id_F, dtype=np.int32))
modified_vect_frags["activ"] = list(np.ones(max_id_F, dtype=np.int32))
modified_vect_frags["id_d"] = list(init_vect_frags["id"])
# WARNING IMPLICT BREAKING OF THE CONTIGS
for data_dup in self.sub_candidates_output_data:
n_dup = int(data_dup[1])
id_f = data_dup[0]
for k in range(0, n_dup):
modified_vect_frags["pos"].append(0)
modified_vect_frags["sub_pos"].append(0)
modified_vect_frags["id_c"].append(max_id_C)
modified_vect_frags["start_bp"].append(0)
modified_vect_frags["len_bp"].append(
init_vect_frags["len_bp"][id_f]
)
modified_vect_frags["sub_len"].append(
init_vect_frags["sub_len"][id_f]
)
modified_vect_frags["circ"].append(
init_vect_frags["circ"][id_f]
)
modified_vect_frags["id"].append(max_id_F)
modified_vect_frags["prev"].append(-1)
modified_vect_frags["next"].append(-1)
modified_vect_frags["l_cont"].append(1)
modified_vect_frags["sub_l_cont"].append(
init_vect_frags["sub_len"][id_f]
)
modified_vect_frags["l_cont_bp"].append(
init_vect_frags["len_bp"][id_f]
)
modified_vect_frags["n_accu"].append(
init_vect_frags["n_accu"][id_f]
)
modified_vect_frags["rep"].append(1)
modified_vect_frags["activ"].append(1)
modified_vect_frags["id_d"].append(init_vect_frags["id"][id_f])
max_id_F += 1
max_id_C += 1
logger.info("MAX ID CONTIG = {}".format(max_id_C))
modified_vect_frags["pos"] = np.array(
modified_vect_frags["pos"], dtype=np.int32
)
modified_vect_frags["sub_pos"] = np.array(
modified_vect_frags["sub_pos"], dtype=np.int32
)
modified_vect_frags["id_c"] = np.array(
modified_vect_frags["id_c"], dtype=np.int32
)
modified_vect_frags["start_bp"] = np.array(
modified_vect_frags["start_bp"], dtype=np.int32
)
modified_vect_frags["len_bp"] = np.array(
modified_vect_frags["len_bp"], dtype=np.int32
)
modified_vect_frags["sub_len"] = np.array(
modified_vect_frags["sub_len"], dtype=np.int32
)
modified_vect_frags["circ"] = np.array(
modified_vect_frags["circ"], dtype=np.int32
)
modified_vect_frags["id"] = np.array(
modified_vect_frags["id"], dtype=np.int32
)
modified_vect_frags["prev"] = np.array(
modified_vect_frags["prev"], dtype=np.int32
)
modified_vect_frags["next"] = np.array(
modified_vect_frags["next"], dtype=np.int32
)
modified_vect_frags["l_cont"] = np.array(
modified_vect_frags["l_cont"], dtype=np.int32
)
modified_vect_frags["sub_l_cont"] = np.array(
modified_vect_frags["sub_l_cont"], dtype=np.int32
)
modified_vect_frags["l_cont_bp"] = np.array(
modified_vect_frags["l_cont_bp"], dtype=np.int32
)
modified_vect_frags["n_accu"] = np.array(
modified_vect_frags["n_accu"], dtype=np.int32
)
modified_vect_frags["rep"] = np.array(
modified_vect_frags["rep"], dtype=np.int32
)
modified_vect_frags["activ"] = np.array(
modified_vect_frags["activ"], dtype=np.int32
)
modified_vect_frags["id_d"] = np.array(
modified_vect_frags["id_d"], dtype=np.int32
)
id_x = 0
collector_id_repeats = []
frag_dispatcher = []
for id_f in range(0, self.init_n_sub_frags):
if id_f in self.sub_candidates_dup:
id_start = id_x
id_dup = np.nonzero(modified_vect_frags["id_d"] == id_f)[0]
collector_id_repeats.extend(list(id_dup))
n_rep = len(id_dup)
frag_dispatcher.append(
(np.int32(id_start), np.int32(id_start + n_rep))
)
id_x += n_rep
else:
id_start = id_x
n_rep = 1
frag_dispatcher.append(
(np.int32(id_start), np.int32(id_start + n_rep))
)
collector_id_repeats.append(id_f)
id_x += 1
self.sub_collector_id_repeats = np.array(
collector_id_repeats, dtype=np.int32
)
self.sub_frag_dispatcher = np.array(frag_dispatcher, dtype=self.int2)
self.sub_n_frags = len(modified_vect_frags["id"])
# pos_vect_frags_4_GL = np.ndarray((self.n_frags, 4), dtype=np.float32)
# col_vect_frags_4_GL = np.ndarray((self.n_frags, 4), dtype=np.float32)
#
# for id_f_curr in xrange(0 , self.sub_n_frags):
# id_d = modified_vect_frags['id_d'][id_f_curr]
# id_c = init_vect_frags['id_c'][id_d]
# pos_vect_frags_4_GL[id_f_curr, 0] =
# modified_vect_frags['pos'][id_f_curr]
# pos_vect_frags_4_GL[id_f_curr, 1] =
# modified_vect_frags['id_c'][id_f_curr]
# pos_vect_frags_4_GL[id_f_curr, 2] = 0.
# pos_vect_frags_4_GL[id_f_curr, 3] = np.float32(1.0)
#
# col_vect_frags_4_GL[id_f_curr, 0] =
# np.float32(RGB_tuples[id_c - 1][0])
# col_vect_frags_4_GL[id_f_curr, 1] =
# np.float32(RGB_tuples[id_c - 1][1])
# col_vect_frags_4_GL[id_f_curr, 2] =
# np.float32(RGB_tuples[id_c - 1][2])
# col_vect_frags_4_GL[id_f_curr, 3] =
# np.float32(1.0)
#
# self.sub_col_vect_frags_4_GL = col_vect_frags_4_GL
# self.sub_pos_vect_frags_4_GL = pos_vect_frags_4_GL
self.new_sub_S_o_A_frags = modified_vect_frags | ['def', 'modify_sub_vect_frags', '(', 'self', ')', ':', 'modified_vect_frags', '=', 'dict', '(', ')', 'init_vect_frags', '=', 'self', '.', 'sub_level', '.', 'S_o_A_frags', '# init_max_id_d = init_vect_frags["id"].max()', 'max_id_F', '=', 'len', '(', 'init_vect_frags', '[', '"id"', ']', ')', 'max_id_C', '=', 'init_vect_frags', '[', '"id_c"', ']', '.', 'max', '(', ')', '+', '1', '# HSV_tuples = [(x*1.0/(max_id_C - 1), 0.5, 0.5) for x in range(0,', '# (max_id_C-1))]', '# cmap = plt.cm.gist_ncar', 'cmap', '=', 'plt', '.', 'cm', '.', 'prism', '# extract all colors from the .jet map', 'cmaplist', '=', '[', 'cmap', '(', 'i', ')', 'for', 'i', 'in', 'range', '(', 'cmap', '.', 'N', ')', ']', 'id_smple', '=', 'np', '.', 'linspace', '(', '0', ',', 'cmap', '.', 'N', ',', 'num', '=', 'max_id_C', ')', 'RGB_tuples', '=', '[', ']', 'for', 'i', 'in', 'range', '(', '0', ',', 'max_id_C', '-', '1', ')', ':', 'RGB_tuples', '.', 'append', '(', 'cmaplist', '[', 'int', '(', 'id_smple', '[', 'i', ']', ')', ']', ')', '# RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)', 'self', '.', 'init_n_sub_frags', '=', 'len', '(', 'init_vect_frags', '[', '"id"', ']', ')', 'modified_vect_frags', '[', '"pos"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"pos"', ']', ')', 'modified_vect_frags', '[', '"sub_pos"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"sub_pos"', ']', ')', 'modified_vect_frags', '[', '"id_c"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"id_c"', ']', ')', 'modified_vect_frags', '[', '"start_bp"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"start_bp"', ']', ')', 'modified_vect_frags', '[', '"len_bp"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"len_bp"', ']', ')', 'modified_vect_frags', '[', '"sub_len"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"sub_len"', ']', ')', 'modified_vect_frags', '[', '"circ"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"circ"', ']', ')', 'modified_vect_frags', '[', '"id"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"id"', ']', ')', 'modified_vect_frags', '[', '"prev"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"prev"', ']', ')', 'modified_vect_frags', '[', '"next"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"next"', ']', ')', 'modified_vect_frags', '[', '"l_cont"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"l_cont"', ']', ')', 'modified_vect_frags', '[', '"sub_l_cont"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"sub_l_cont"', ']', ')', 'modified_vect_frags', '[', '"l_cont_bp"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"l_cont_bp"', ']', ')', 'modified_vect_frags', '[', '"n_accu"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"n_accu"', ']', ')', 'modified_vect_frags', '[', '"rep"', ']', '=', 'list', '(', 'np', '.', 'zeros', '(', 'max_id_F', ',', 'dtype', '=', 'np', '.', 'int32', ')', ')', 'modified_vect_frags', '[', '"activ"', ']', '=', 'list', '(', 'np', '.', 'ones', '(', 'max_id_F', ',', 'dtype', '=', 'np', '.', 'int32', ')', ')', 'modified_vect_frags', '[', '"id_d"', ']', '=', 'list', '(', 'init_vect_frags', '[', '"id"', ']', ')', '# WARNING IMPLICT BREAKING OF THE CONTIGS', 'for', 'data_dup', 'in', 'self', '.', 'sub_candidates_output_data', ':', 'n_dup', '=', 'int', '(', 'data_dup', '[', '1', ']', ')', 'id_f', '=', 'data_dup', '[', '0', ']', 'for', 'k', 'in', 'range', '(', '0', ',', 'n_dup', ')', ':', 'modified_vect_frags', '[', '"pos"', ']', '.', 'append', '(', '0', ')', 'modified_vect_frags', '[', '"sub_pos"', ']', '.', 'append', '(', '0', ')', 'modified_vect_frags', '[', '"id_c"', ']', '.', 'append', '(', 'max_id_C', ')', 'modified_vect_frags', '[', '"start_bp"', ']', '.', 'append', '(', '0', ')', 'modified_vect_frags', '[', '"len_bp"', ']', '.', 'append', '(', 'init_vect_frags', '[', '"len_bp"', ']', '[', 'id_f', ']', ')', 'modified_vect_frags', '[', '"sub_len"', ']', '.', 'append', '(', 'init_vect_frags', '[', '"sub_len"', ']', '[', 'id_f', ']', ')', 'modified_vect_frags', '[', '"circ"', ']', '.', 'append', '(', 'init_vect_frags', '[', '"circ"', ']', '[', 'id_f', ']', ')', 'modified_vect_frags', '[', '"id"', ']', '.', 'append', '(', 'max_id_F', ')', 'modified_vect_frags', '[', '"prev"', ']', '.', 'append', '(', '-', '1', ')', 'modified_vect_frags', '[', '"next"', ']', '.', 'append', '(', '-', '1', ')', 'modified_vect_frags', '[', '"l_cont"', ']', '.', 'append', '(', '1', ')', 'modified_vect_frags', '[', '"sub_l_cont"', ']', '.', 'append', '(', 'init_vect_frags', '[', '"sub_len"', ']', '[', 'id_f', ']', ')', 'modified_vect_frags', '[', '"l_cont_bp"', ']', '.', 'append', '(', 'init_vect_frags', '[', '"len_bp"', ']', '[', 'id_f', ']', ')', 'modified_vect_frags', '[', '"n_accu"', ']', '.', 'append', '(', 'init_vect_frags', '[', '"n_accu"', ']', '[', 'id_f', ']', ')', 'modified_vect_frags', '[', '"rep"', ']', '.', 'append', '(', '1', ')', 'modified_vect_frags', '[', '"activ"', ']', '.', 'append', '(', '1', ')', 'modified_vect_frags', '[', '"id_d"', ']', '.', 'append', '(', 'init_vect_frags', '[', '"id"', ']', '[', 'id_f', ']', ')', 'max_id_F', '+=', '1', 'max_id_C', '+=', '1', 'logger', '.', 'info', '(', '"MAX ID CONTIG = {}"', '.', 'format', '(', 'max_id_C', ')', ')', 'modified_vect_frags', '[', '"pos"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"pos"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"sub_pos"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"sub_pos"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"id_c"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"id_c"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"start_bp"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"start_bp"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"len_bp"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"len_bp"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"sub_len"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"sub_len"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"circ"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"circ"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"id"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"id"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"prev"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"prev"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"next"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"next"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"l_cont"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"l_cont"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"sub_l_cont"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"sub_l_cont"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"l_cont_bp"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"l_cont_bp"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"n_accu"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"n_accu"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"rep"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"rep"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"activ"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"activ"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'modified_vect_frags', '[', '"id_d"', ']', '=', 'np', '.', 'array', '(', 'modified_vect_frags', '[', '"id_d"', ']', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'id_x', '=', '0', 'collector_id_repeats', '=', '[', ']', 'frag_dispatcher', '=', '[', ']', 'for', 'id_f', 'in', 'range', '(', '0', ',', 'self', '.', 'init_n_sub_frags', ')', ':', 'if', 'id_f', 'in', 'self', '.', 'sub_candidates_dup', ':', 'id_start', '=', 'id_x', 'id_dup', '=', 'np', '.', 'nonzero', '(', 'modified_vect_frags', '[', '"id_d"', ']', '==', 'id_f', ')', '[', '0', ']', 'collector_id_repeats', '.', 'extend', '(', 'list', '(', 'id_dup', ')', ')', 'n_rep', '=', 'len', '(', 'id_dup', ')', 'frag_dispatcher', '.', 'append', '(', '(', 'np', '.', 'int32', '(', 'id_start', ')', ',', 'np', '.', 'int32', '(', 'id_start', '+', 'n_rep', ')', ')', ')', 'id_x', '+=', 'n_rep', 'else', ':', 'id_start', '=', 'id_x', 'n_rep', '=', '1', 'frag_dispatcher', '.', 'append', '(', '(', 'np', '.', 'int32', '(', 'id_start', ')', ',', 'np', '.', 'int32', '(', 'id_start', '+', 'n_rep', ')', ')', ')', 'collector_id_repeats', '.', 'append', '(', 'id_f', ')', 'id_x', '+=', '1', 'self', '.', 'sub_collector_id_repeats', '=', 'np', '.', 'array', '(', 'collector_id_repeats', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'self', '.', 'sub_frag_dispatcher', '=', 'np', '.', 'array', '(', 'frag_dispatcher', ',', 'dtype', '=', 'self', '.', 'int2', ')', 'self', '.', 'sub_n_frags', '=', 'len', '(', 'modified_vect_frags', '[', '"id"', ']', ')', '# pos_vect_frags_4_GL = np.ndarray((self.n_frags, 4), dtype=np.float32)', '# col_vect_frags_4_GL = np.ndarray((self.n_frags, 4), dtype=np.float32)', '#', '# for id_f_curr in xrange(0 , self.sub_n_frags):', "# id_d = modified_vect_frags['id_d'][id_f_curr]", "# id_c = init_vect_frags['id_c'][id_d]", '# pos_vect_frags_4_GL[id_f_curr, 0] =', "# modified_vect_frags['pos'][id_f_curr]", '# pos_vect_frags_4_GL[id_f_curr, 1] =', "# modified_vect_frags['id_c'][id_f_curr]", '# pos_vect_frags_4_GL[id_f_curr, 2] = 0.', '# pos_vect_frags_4_GL[id_f_curr, 3] = np.float32(1.0)', '#', '# col_vect_frags_4_GL[id_f_curr, 0] =', '# np.float32(RGB_tuples[id_c - 1][0])', '# col_vect_frags_4_GL[id_f_curr, 1] =', '# np.float32(RGB_tuples[id_c - 1][1])', '# col_vect_frags_4_GL[id_f_curr, 2] =', '# np.float32(RGB_tuples[id_c - 1][2])', '# col_vect_frags_4_GL[id_f_curr, 3] =', '# np.float32(1.0)', '#', '# self.sub_col_vect_frags_4_GL = col_vect_frags_4_GL', '# self.sub_pos_vect_frags_4_GL = pos_vect_frags_4_GL', 'self', '.', 'new_sub_S_o_A_frags', '=', 'modified_vect_frags'] | include repeated frags | ['include', 'repeated', 'frags'] | train | https://github.com/koszullab/instaGRAAL/blob/1c02ca838e57d8178eec79f223644b2acd0153dd/instagraal/simu_single.py#L452-L637 |
8,751 | adafruit/Adafruit_CircuitPython_ADS1x15 | adafruit_ads1x15/ads1x15.py | ADS1x15._read | def _read(self, pin):
"""Perform an ADC read. Returns the signed integer result of the read."""
config = _ADS1X15_CONFIG_OS_SINGLE
config |= (pin & 0x07) << _ADS1X15_CONFIG_MUX_OFFSET
config |= _ADS1X15_CONFIG_GAIN[self.gain]
config |= self.mode
config |= self.rate_config[self.data_rate]
config |= _ADS1X15_CONFIG_COMP_QUE_DISABLE
self._write_register(_ADS1X15_POINTER_CONFIG, config)
while not self._conversion_complete():
time.sleep(0.01)
return self.get_last_result() | python | def _read(self, pin):
"""Perform an ADC read. Returns the signed integer result of the read."""
config = _ADS1X15_CONFIG_OS_SINGLE
config |= (pin & 0x07) << _ADS1X15_CONFIG_MUX_OFFSET
config |= _ADS1X15_CONFIG_GAIN[self.gain]
config |= self.mode
config |= self.rate_config[self.data_rate]
config |= _ADS1X15_CONFIG_COMP_QUE_DISABLE
self._write_register(_ADS1X15_POINTER_CONFIG, config)
while not self._conversion_complete():
time.sleep(0.01)
return self.get_last_result() | ['def', '_read', '(', 'self', ',', 'pin', ')', ':', 'config', '=', '_ADS1X15_CONFIG_OS_SINGLE', 'config', '|=', '(', 'pin', '&', '0x07', ')', '<<', '_ADS1X15_CONFIG_MUX_OFFSET', 'config', '|=', '_ADS1X15_CONFIG_GAIN', '[', 'self', '.', 'gain', ']', 'config', '|=', 'self', '.', 'mode', 'config', '|=', 'self', '.', 'rate_config', '[', 'self', '.', 'data_rate', ']', 'config', '|=', '_ADS1X15_CONFIG_COMP_QUE_DISABLE', 'self', '.', '_write_register', '(', '_ADS1X15_POINTER_CONFIG', ',', 'config', ')', 'while', 'not', 'self', '.', '_conversion_complete', '(', ')', ':', 'time', '.', 'sleep', '(', '0.01', ')', 'return', 'self', '.', 'get_last_result', '(', ')'] | Perform an ADC read. Returns the signed integer result of the read. | ['Perform', 'an', 'ADC', 'read', '.', 'Returns', 'the', 'signed', 'integer', 'result', 'of', 'the', 'read', '.'] | train | https://github.com/adafruit/Adafruit_CircuitPython_ADS1x15/blob/5ba760c6de40824386f1df343603eab77d3e336c/adafruit_ads1x15/ads1x15.py#L151-L164 |
8,752 | mikedh/trimesh | trimesh/base.py | Trimesh.contains | def contains(self, points):
"""
Given a set of points, determine whether or not they are inside the mesh.
This raises an error if called on a non- watertight mesh.
Parameters
---------
points : (n, 3) float
Points in cartesian space
Returns
---------
contains : (n, ) bool
Whether or not each point is inside the mesh
"""
if not self.is_watertight:
log.warning('Mesh is non- watertight for contained point query!')
contains = self.ray.contains_points(points)
return contains | python | def contains(self, points):
"""
Given a set of points, determine whether or not they are inside the mesh.
This raises an error if called on a non- watertight mesh.
Parameters
---------
points : (n, 3) float
Points in cartesian space
Returns
---------
contains : (n, ) bool
Whether or not each point is inside the mesh
"""
if not self.is_watertight:
log.warning('Mesh is non- watertight for contained point query!')
contains = self.ray.contains_points(points)
return contains | ['def', 'contains', '(', 'self', ',', 'points', ')', ':', 'if', 'not', 'self', '.', 'is_watertight', ':', 'log', '.', 'warning', '(', "'Mesh is non- watertight for contained point query!'", ')', 'contains', '=', 'self', '.', 'ray', '.', 'contains_points', '(', 'points', ')', 'return', 'contains'] | Given a set of points, determine whether or not they are inside the mesh.
This raises an error if called on a non- watertight mesh.
Parameters
---------
points : (n, 3) float
Points in cartesian space
Returns
---------
contains : (n, ) bool
Whether or not each point is inside the mesh | ['Given', 'a', 'set', 'of', 'points', 'determine', 'whether', 'or', 'not', 'they', 'are', 'inside', 'the', 'mesh', '.', 'This', 'raises', 'an', 'error', 'if', 'called', 'on', 'a', 'non', '-', 'watertight', 'mesh', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/base.py#L2503-L2521 |
8,753 | kislyuk/aegea | aegea/packages/github3/gists/gist.py | Gist.iter_forks | def iter_forks(self, number=-1, etag=None):
"""Iterator of forks of this gist.
.. versionchanged:: 0.9
Added params ``number`` and ``etag``.
:param int number: (optional), number of forks to iterate over.
Default: -1 will iterate over all forks of this gist.
:param str etag: (optional), ETag from a previous request to this
endpoint.
:returns: generator of :class:`Gist <Gist>`
"""
url = self._build_url('forks', base_url=self._api)
return self._iter(int(number), url, Gist, etag=etag) | python | def iter_forks(self, number=-1, etag=None):
"""Iterator of forks of this gist.
.. versionchanged:: 0.9
Added params ``number`` and ``etag``.
:param int number: (optional), number of forks to iterate over.
Default: -1 will iterate over all forks of this gist.
:param str etag: (optional), ETag from a previous request to this
endpoint.
:returns: generator of :class:`Gist <Gist>`
"""
url = self._build_url('forks', base_url=self._api)
return self._iter(int(number), url, Gist, etag=etag) | ['def', 'iter_forks', '(', 'self', ',', 'number', '=', '-', '1', ',', 'etag', '=', 'None', ')', ':', 'url', '=', 'self', '.', '_build_url', '(', "'forks'", ',', 'base_url', '=', 'self', '.', '_api', ')', 'return', 'self', '.', '_iter', '(', 'int', '(', 'number', ')', ',', 'url', ',', 'Gist', ',', 'etag', '=', 'etag', ')'] | Iterator of forks of this gist.
.. versionchanged:: 0.9
Added params ``number`` and ``etag``.
:param int number: (optional), number of forks to iterate over.
Default: -1 will iterate over all forks of this gist.
:param str etag: (optional), ETag from a previous request to this
endpoint.
:returns: generator of :class:`Gist <Gist>` | ['Iterator', 'of', 'forks', 'of', 'this', 'gist', '.'] | train | https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/gists/gist.py#L237-L252 |
8,754 | jazzband/django-queued-storage | queued_storage/backends.py | QueuedStorage.transfer | def transfer(self, name, cache_key=None):
"""
Transfers the file with the given name to the remote storage
backend by queuing the task.
:param name: file name
:type name: str
:param cache_key: the cache key to set after a successful task run
:type cache_key: str
:rtype: task result
"""
if cache_key is None:
cache_key = self.get_cache_key(name)
return self.task.delay(name, cache_key,
self.local_path, self.remote_path,
self.local_options, self.remote_options) | python | def transfer(self, name, cache_key=None):
"""
Transfers the file with the given name to the remote storage
backend by queuing the task.
:param name: file name
:type name: str
:param cache_key: the cache key to set after a successful task run
:type cache_key: str
:rtype: task result
"""
if cache_key is None:
cache_key = self.get_cache_key(name)
return self.task.delay(name, cache_key,
self.local_path, self.remote_path,
self.local_options, self.remote_options) | ['def', 'transfer', '(', 'self', ',', 'name', ',', 'cache_key', '=', 'None', ')', ':', 'if', 'cache_key', 'is', 'None', ':', 'cache_key', '=', 'self', '.', 'get_cache_key', '(', 'name', ')', 'return', 'self', '.', 'task', '.', 'delay', '(', 'name', ',', 'cache_key', ',', 'self', '.', 'local_path', ',', 'self', '.', 'remote_path', ',', 'self', '.', 'local_options', ',', 'self', '.', 'remote_options', ')'] | Transfers the file with the given name to the remote storage
backend by queuing the task.
:param name: file name
:type name: str
:param cache_key: the cache key to set after a successful task run
:type cache_key: str
:rtype: task result | ['Transfers', 'the', 'file', 'with', 'the', 'given', 'name', 'to', 'the', 'remote', 'storage', 'backend', 'by', 'queuing', 'the', 'task', '.'] | train | https://github.com/jazzband/django-queued-storage/blob/f8225d88a01ef5ca8001aeb3f7f80818a022a12d/queued_storage/backends.py#L206-L221 |
8,755 | vmware/pyvmomi | pyVmomi/Differ.py | Differ.DiffDataObjects | def DiffDataObjects(self, oldObj, newObj):
"""Diff Data Objects"""
if oldObj == newObj:
return True
if not oldObj or not newObj:
__Log__.debug('DiffDataObjects: One of the objects in None')
return False
oldType = Type(oldObj)
newType = Type(newObj)
if oldType != newType:
__Log__.debug(
'DiffDataObjects: Types do not match for dataobjects. %s != %s'
% (oldObj._wsdlName, newObj._wsdlName))
return False
for prop in oldObj._GetPropertyList():
oldProp = getattr(oldObj, prop.name)
newProp = getattr(newObj, prop.name)
propType = oldObj._GetPropertyInfo(prop.name).type
if not oldProp and not newProp:
continue
elif ((prop.flags & VmomiSupport.F_OPTIONAL) and
self._looseMatch and (not newProp or not oldProp)):
continue
elif not oldProp or not newProp:
__Log__.debug(
'DiffDataObjects: One of the objects has property %s unset'
% prop.name)
return False
bMatch = True
if IsPrimitiveType(oldProp):
bMatch = oldProp == newProp
elif isinstance(oldProp, types.ManagedObject):
bMatch = self.DiffAnyObjects(oldProp, newProp, prop.flags
& VmomiSupport.F_LINK)
elif isinstance(oldProp, types.DataObject):
if prop.flags & VmomiSupport.F_LINK:
bMatch = oldObj.GetKey() == newObj.GetKey()
LogIf(not bMatch, 'DiffDataObjects: Key match failed %s != %s'
% (oldObj.GetKey(), newObj.GetKey()))
else:
bMatch = self.DiffAnyObjects(oldProp, newProp, prop.flags
& VmomiSupport.F_LINK)
elif isinstance(oldProp, list):
bMatch = self.DiffArrayObjects(oldProp, newProp, prop.flags
& VmomiSupport.F_LINK)
else:
raise TypeError("Unknown type: "+repr(propType))
if not bMatch:
__Log__.debug('DiffDataObjects: Objects differ in property %s'
% prop.name)
return False
return True | python | def DiffDataObjects(self, oldObj, newObj):
"""Diff Data Objects"""
if oldObj == newObj:
return True
if not oldObj or not newObj:
__Log__.debug('DiffDataObjects: One of the objects in None')
return False
oldType = Type(oldObj)
newType = Type(newObj)
if oldType != newType:
__Log__.debug(
'DiffDataObjects: Types do not match for dataobjects. %s != %s'
% (oldObj._wsdlName, newObj._wsdlName))
return False
for prop in oldObj._GetPropertyList():
oldProp = getattr(oldObj, prop.name)
newProp = getattr(newObj, prop.name)
propType = oldObj._GetPropertyInfo(prop.name).type
if not oldProp and not newProp:
continue
elif ((prop.flags & VmomiSupport.F_OPTIONAL) and
self._looseMatch and (not newProp or not oldProp)):
continue
elif not oldProp or not newProp:
__Log__.debug(
'DiffDataObjects: One of the objects has property %s unset'
% prop.name)
return False
bMatch = True
if IsPrimitiveType(oldProp):
bMatch = oldProp == newProp
elif isinstance(oldProp, types.ManagedObject):
bMatch = self.DiffAnyObjects(oldProp, newProp, prop.flags
& VmomiSupport.F_LINK)
elif isinstance(oldProp, types.DataObject):
if prop.flags & VmomiSupport.F_LINK:
bMatch = oldObj.GetKey() == newObj.GetKey()
LogIf(not bMatch, 'DiffDataObjects: Key match failed %s != %s'
% (oldObj.GetKey(), newObj.GetKey()))
else:
bMatch = self.DiffAnyObjects(oldProp, newProp, prop.flags
& VmomiSupport.F_LINK)
elif isinstance(oldProp, list):
bMatch = self.DiffArrayObjects(oldProp, newProp, prop.flags
& VmomiSupport.F_LINK)
else:
raise TypeError("Unknown type: "+repr(propType))
if not bMatch:
__Log__.debug('DiffDataObjects: Objects differ in property %s'
% prop.name)
return False
return True | ['def', 'DiffDataObjects', '(', 'self', ',', 'oldObj', ',', 'newObj', ')', ':', 'if', 'oldObj', '==', 'newObj', ':', 'return', 'True', 'if', 'not', 'oldObj', 'or', 'not', 'newObj', ':', '__Log__', '.', 'debug', '(', "'DiffDataObjects: One of the objects in None'", ')', 'return', 'False', 'oldType', '=', 'Type', '(', 'oldObj', ')', 'newType', '=', 'Type', '(', 'newObj', ')', 'if', 'oldType', '!=', 'newType', ':', '__Log__', '.', 'debug', '(', "'DiffDataObjects: Types do not match for dataobjects. %s != %s'", '%', '(', 'oldObj', '.', '_wsdlName', ',', 'newObj', '.', '_wsdlName', ')', ')', 'return', 'False', 'for', 'prop', 'in', 'oldObj', '.', '_GetPropertyList', '(', ')', ':', 'oldProp', '=', 'getattr', '(', 'oldObj', ',', 'prop', '.', 'name', ')', 'newProp', '=', 'getattr', '(', 'newObj', ',', 'prop', '.', 'name', ')', 'propType', '=', 'oldObj', '.', '_GetPropertyInfo', '(', 'prop', '.', 'name', ')', '.', 'type', 'if', 'not', 'oldProp', 'and', 'not', 'newProp', ':', 'continue', 'elif', '(', '(', 'prop', '.', 'flags', '&', 'VmomiSupport', '.', 'F_OPTIONAL', ')', 'and', 'self', '.', '_looseMatch', 'and', '(', 'not', 'newProp', 'or', 'not', 'oldProp', ')', ')', ':', 'continue', 'elif', 'not', 'oldProp', 'or', 'not', 'newProp', ':', '__Log__', '.', 'debug', '(', "'DiffDataObjects: One of the objects has property %s unset'", '%', 'prop', '.', 'name', ')', 'return', 'False', 'bMatch', '=', 'True', 'if', 'IsPrimitiveType', '(', 'oldProp', ')', ':', 'bMatch', '=', 'oldProp', '==', 'newProp', 'elif', 'isinstance', '(', 'oldProp', ',', 'types', '.', 'ManagedObject', ')', ':', 'bMatch', '=', 'self', '.', 'DiffAnyObjects', '(', 'oldProp', ',', 'newProp', ',', 'prop', '.', 'flags', '&', 'VmomiSupport', '.', 'F_LINK', ')', 'elif', 'isinstance', '(', 'oldProp', ',', 'types', '.', 'DataObject', ')', ':', 'if', 'prop', '.', 'flags', '&', 'VmomiSupport', '.', 'F_LINK', ':', 'bMatch', '=', 'oldObj', '.', 'GetKey', '(', ')', '==', 'newObj', '.', 'GetKey', '(', ')', 'LogIf', '(', 'not', 'bMatch', ',', "'DiffDataObjects: Key match failed %s != %s'", '%', '(', 'oldObj', '.', 'GetKey', '(', ')', ',', 'newObj', '.', 'GetKey', '(', ')', ')', ')', 'else', ':', 'bMatch', '=', 'self', '.', 'DiffAnyObjects', '(', 'oldProp', ',', 'newProp', ',', 'prop', '.', 'flags', '&', 'VmomiSupport', '.', 'F_LINK', ')', 'elif', 'isinstance', '(', 'oldProp', ',', 'list', ')', ':', 'bMatch', '=', 'self', '.', 'DiffArrayObjects', '(', 'oldProp', ',', 'newProp', ',', 'prop', '.', 'flags', '&', 'VmomiSupport', '.', 'F_LINK', ')', 'else', ':', 'raise', 'TypeError', '(', '"Unknown type: "', '+', 'repr', '(', 'propType', ')', ')', 'if', 'not', 'bMatch', ':', '__Log__', '.', 'debug', '(', "'DiffDataObjects: Objects differ in property %s'", '%', 'prop', '.', 'name', ')', 'return', 'False', 'return', 'True'] | Diff Data Objects | ['Diff', 'Data', 'Objects'] | train | https://github.com/vmware/pyvmomi/blob/3ffcb23bf77d757175c0d5216ba9a25345d824cd/pyVmomi/Differ.py#L169-L222 |
8,756 | NiklasRosenstein-Python/nr-deprecated | nr/archive.py | open | def open(filename=None, file=None, mode='r', suffix=None, options=None):
"""
Opens the archive at the specified *filename* or from the file-like
object *file* using the appropriate opener. A specific opener can be
specified by passing the *suffix* argument.
# Parameters
filename (str): A filename to open the archive from.
file (file-like): A file-like object as source/destination.
mode (str): The mode to open the archive in.
suffix (str): Possible override for the *filename* suffix. Must be
specified when *file* is passed instead of *filename*.
options (dict): A dictionary that will be passed to the opener
with which additional options can be specified.
return (archive-like): An object that represents the archive and follows
the interface of the #tarfile.TarFile class.
"""
if mode not in ('r', 'w', 'a'):
raise ValueError("invalid mode: {0!r}".format(mode))
if suffix is None:
suffix, opener = get_opener(filename)
if file is not None:
filename = None # We don't need it anymore.
else:
if file is not None and filename is not None:
raise ValueError("filename must not be set with file & suffix specified")
try:
opener = openers[suffix]
except KeyError:
raise UnknownArchive(suffix)
if options is None:
options = {}
if file is not None:
if mode in 'wa' and not hasattr(file, 'write'):
raise TypeError("file.write() does not exist", file)
if mode == 'r' and not hasattr(file, 'read'):
raise TypeError("file.read() does not exist", file)
if [filename, file].count(None) != 1:
raise ValueError("either filename or file must be specified")
if filename is not None:
file = builtins.open(filename, mode + 'b')
try:
return opener(file, mode, options)
except:
if filename is not None:
file.close()
raise | python | def open(filename=None, file=None, mode='r', suffix=None, options=None):
"""
Opens the archive at the specified *filename* or from the file-like
object *file* using the appropriate opener. A specific opener can be
specified by passing the *suffix* argument.
# Parameters
filename (str): A filename to open the archive from.
file (file-like): A file-like object as source/destination.
mode (str): The mode to open the archive in.
suffix (str): Possible override for the *filename* suffix. Must be
specified when *file* is passed instead of *filename*.
options (dict): A dictionary that will be passed to the opener
with which additional options can be specified.
return (archive-like): An object that represents the archive and follows
the interface of the #tarfile.TarFile class.
"""
if mode not in ('r', 'w', 'a'):
raise ValueError("invalid mode: {0!r}".format(mode))
if suffix is None:
suffix, opener = get_opener(filename)
if file is not None:
filename = None # We don't need it anymore.
else:
if file is not None and filename is not None:
raise ValueError("filename must not be set with file & suffix specified")
try:
opener = openers[suffix]
except KeyError:
raise UnknownArchive(suffix)
if options is None:
options = {}
if file is not None:
if mode in 'wa' and not hasattr(file, 'write'):
raise TypeError("file.write() does not exist", file)
if mode == 'r' and not hasattr(file, 'read'):
raise TypeError("file.read() does not exist", file)
if [filename, file].count(None) != 1:
raise ValueError("either filename or file must be specified")
if filename is not None:
file = builtins.open(filename, mode + 'b')
try:
return opener(file, mode, options)
except:
if filename is not None:
file.close()
raise | ['def', 'open', '(', 'filename', '=', 'None', ',', 'file', '=', 'None', ',', 'mode', '=', "'r'", ',', 'suffix', '=', 'None', ',', 'options', '=', 'None', ')', ':', 'if', 'mode', 'not', 'in', '(', "'r'", ',', "'w'", ',', "'a'", ')', ':', 'raise', 'ValueError', '(', '"invalid mode: {0!r}"', '.', 'format', '(', 'mode', ')', ')', 'if', 'suffix', 'is', 'None', ':', 'suffix', ',', 'opener', '=', 'get_opener', '(', 'filename', ')', 'if', 'file', 'is', 'not', 'None', ':', 'filename', '=', 'None', "# We don't need it anymore.", 'else', ':', 'if', 'file', 'is', 'not', 'None', 'and', 'filename', 'is', 'not', 'None', ':', 'raise', 'ValueError', '(', '"filename must not be set with file & suffix specified"', ')', 'try', ':', 'opener', '=', 'openers', '[', 'suffix', ']', 'except', 'KeyError', ':', 'raise', 'UnknownArchive', '(', 'suffix', ')', 'if', 'options', 'is', 'None', ':', 'options', '=', '{', '}', 'if', 'file', 'is', 'not', 'None', ':', 'if', 'mode', 'in', "'wa'", 'and', 'not', 'hasattr', '(', 'file', ',', "'write'", ')', ':', 'raise', 'TypeError', '(', '"file.write() does not exist"', ',', 'file', ')', 'if', 'mode', '==', "'r'", 'and', 'not', 'hasattr', '(', 'file', ',', "'read'", ')', ':', 'raise', 'TypeError', '(', '"file.read() does not exist"', ',', 'file', ')', 'if', '[', 'filename', ',', 'file', ']', '.', 'count', '(', 'None', ')', '!=', '1', ':', 'raise', 'ValueError', '(', '"either filename or file must be specified"', ')', 'if', 'filename', 'is', 'not', 'None', ':', 'file', '=', 'builtins', '.', 'open', '(', 'filename', ',', 'mode', '+', "'b'", ')', 'try', ':', 'return', 'opener', '(', 'file', ',', 'mode', ',', 'options', ')', 'except', ':', 'if', 'filename', 'is', 'not', 'None', ':', 'file', '.', 'close', '(', ')', 'raise'] | Opens the archive at the specified *filename* or from the file-like
object *file* using the appropriate opener. A specific opener can be
specified by passing the *suffix* argument.
# Parameters
filename (str): A filename to open the archive from.
file (file-like): A file-like object as source/destination.
mode (str): The mode to open the archive in.
suffix (str): Possible override for the *filename* suffix. Must be
specified when *file* is passed instead of *filename*.
options (dict): A dictionary that will be passed to the opener
with which additional options can be specified.
return (archive-like): An object that represents the archive and follows
the interface of the #tarfile.TarFile class. | ['Opens', 'the', 'archive', 'at', 'the', 'specified', '*', 'filename', '*', 'or', 'from', 'the', 'file', '-', 'like', 'object', '*', 'file', '*', 'using', 'the', 'appropriate', 'opener', '.', 'A', 'specific', 'opener', 'can', 'be', 'specified', 'by', 'passing', 'the', '*', 'suffix', '*', 'argument', '.'] | train | https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/archive.py#L90-L142 |
8,757 | openeemeter/eeweather | eeweather/validation.py | valid_zcta_or_raise | def valid_zcta_or_raise(zcta):
""" Check if ZCTA is valid and raise eeweather.UnrecognizedZCTAError if not. """
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select exists (
select
zcta_id
from
zcta_metadata
where
zcta_id = ?
)
""",
(zcta,),
)
(exists,) = cur.fetchone()
if exists:
return True
else:
raise UnrecognizedZCTAError(zcta) | python | def valid_zcta_or_raise(zcta):
""" Check if ZCTA is valid and raise eeweather.UnrecognizedZCTAError if not. """
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select exists (
select
zcta_id
from
zcta_metadata
where
zcta_id = ?
)
""",
(zcta,),
)
(exists,) = cur.fetchone()
if exists:
return True
else:
raise UnrecognizedZCTAError(zcta) | ['def', 'valid_zcta_or_raise', '(', 'zcta', ')', ':', 'conn', '=', 'metadata_db_connection_proxy', '.', 'get_connection', '(', ')', 'cur', '=', 'conn', '.', 'cursor', '(', ')', 'cur', '.', 'execute', '(', '"""\n select exists (\n select\n zcta_id\n from\n zcta_metadata\n where\n zcta_id = ?\n )\n """', ',', '(', 'zcta', ',', ')', ',', ')', '(', 'exists', ',', ')', '=', 'cur', '.', 'fetchone', '(', ')', 'if', 'exists', ':', 'return', 'True', 'else', ':', 'raise', 'UnrecognizedZCTAError', '(', 'zcta', ')'] | Check if ZCTA is valid and raise eeweather.UnrecognizedZCTAError if not. | ['Check', 'if', 'ZCTA', 'is', 'valid', 'and', 'raise', 'eeweather', '.', 'UnrecognizedZCTAError', 'if', 'not', '.'] | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/validation.py#L27-L49 |
8,758 | sarugaku/mork | src/mork/virtualenv.py | VirtualEnv.get_sys_path | def get_sys_path(cls, python_path):
"""Get the :data:`sys.path` data for a given python executable.
:param str python_path: Path to a specific python executable.
:return: The system path information for that python runtime.
:rtype: list
"""
command = [python_path, "-c", "import json, sys; print(json.dumps(sys.path))"]
c = vistir.misc.run(command, return_object=True, block=True, nospin=True)
assert c.returncode == 0, "failed loading virtualenv path"
sys_path = json.loads(c.out.strip())
return sys_path | python | def get_sys_path(cls, python_path):
"""Get the :data:`sys.path` data for a given python executable.
:param str python_path: Path to a specific python executable.
:return: The system path information for that python runtime.
:rtype: list
"""
command = [python_path, "-c", "import json, sys; print(json.dumps(sys.path))"]
c = vistir.misc.run(command, return_object=True, block=True, nospin=True)
assert c.returncode == 0, "failed loading virtualenv path"
sys_path = json.loads(c.out.strip())
return sys_path | ['def', 'get_sys_path', '(', 'cls', ',', 'python_path', ')', ':', 'command', '=', '[', 'python_path', ',', '"-c"', ',', '"import json, sys; print(json.dumps(sys.path))"', ']', 'c', '=', 'vistir', '.', 'misc', '.', 'run', '(', 'command', ',', 'return_object', '=', 'True', ',', 'block', '=', 'True', ',', 'nospin', '=', 'True', ')', 'assert', 'c', '.', 'returncode', '==', '0', ',', '"failed loading virtualenv path"', 'sys_path', '=', 'json', '.', 'loads', '(', 'c', '.', 'out', '.', 'strip', '(', ')', ')', 'return', 'sys_path'] | Get the :data:`sys.path` data for a given python executable.
:param str python_path: Path to a specific python executable.
:return: The system path information for that python runtime.
:rtype: list | ['Get', 'the', ':', 'data', ':', 'sys', '.', 'path', 'data', 'for', 'a', 'given', 'python', 'executable', '.'] | train | https://github.com/sarugaku/mork/blob/c1a7cd63c490ed7fbecb7714fd5590d2609366de/src/mork/virtualenv.py#L138-L150 |
8,759 | dtcooper/python-fitparse | fitparse/records.py | Crc.update | def update(self, byte_arr):
"""Read bytes and update the CRC computed."""
if byte_arr:
self.value = self.calculate(byte_arr, self.value) | python | def update(self, byte_arr):
"""Read bytes and update the CRC computed."""
if byte_arr:
self.value = self.calculate(byte_arr, self.value) | ['def', 'update', '(', 'self', ',', 'byte_arr', ')', ':', 'if', 'byte_arr', ':', 'self', '.', 'value', '=', 'self', '.', 'calculate', '(', 'byte_arr', ',', 'self', '.', 'value', ')'] | Read bytes and update the CRC computed. | ['Read', 'bytes', 'and', 'update', 'the', 'CRC', 'computed', '.'] | train | https://github.com/dtcooper/python-fitparse/blob/40fa2918c3e91bd8f89908ad3bad81c1c1189dd2/fitparse/records.py#L365-L368 |
8,760 | EVEprosper/ProsperCommon | prosper/common/prosper_logging.py | ProsperLogger.configure_discord_logger | def configure_discord_logger(
self,
discord_webhook=None,
discord_recipient=None,
log_level='ERROR',
log_format=ReportingFormats.PRETTY_PRINT.value,
custom_args=''
):
"""logger for sending messages to Discord. Easy way to alert humans of issues
Note:
Will try to overwrite minimum log level to enable requested log_level
Will warn and not attach hipchat logger if missing webhook key
Learn more about webhooks: https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks
Args:
discord_webhook (str): discord room webhook (full URL)
discord_recipient (`str`:<@int>, optional): user/group to notify
log_level (str): desired log level for handle https://docs.python.org/3/library/logging.html#logging-levels
log_format (str): format for logging messages https://docs.python.org/3/library/logging.html#logrecord-attributes
custom_args (str): special ID to include in messages
"""
# Override defaults if required #
discord_webhook = self.config.get_option(
'LOGGING', 'discord_webhook',
None, discord_webhook
)
discord_recipient = self.config.get_option(
'LOGGING', 'discord_recipient',
None, discord_recipient
)
log_level = self.config.get_option(
'LOGGING', 'discord_level',
None, log_level
)
# Actually build discord logging handler #
discord_obj = DiscordWebhook()
discord_obj.webhook(discord_webhook)
# vv TODO vv: Test review #
if discord_obj.can_query:
discord_handler = HackyDiscordHandler(
discord_obj,
discord_recipient
)
self._configure_common(
'discord_',
log_level,
log_format,
'Discord',
discord_handler,
custom_args=custom_args
)
else:
warnings.warn(
'Unable to execute webhook',
exceptions.WebhookCreateFailed
) | python | def configure_discord_logger(
self,
discord_webhook=None,
discord_recipient=None,
log_level='ERROR',
log_format=ReportingFormats.PRETTY_PRINT.value,
custom_args=''
):
"""logger for sending messages to Discord. Easy way to alert humans of issues
Note:
Will try to overwrite minimum log level to enable requested log_level
Will warn and not attach hipchat logger if missing webhook key
Learn more about webhooks: https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks
Args:
discord_webhook (str): discord room webhook (full URL)
discord_recipient (`str`:<@int>, optional): user/group to notify
log_level (str): desired log level for handle https://docs.python.org/3/library/logging.html#logging-levels
log_format (str): format for logging messages https://docs.python.org/3/library/logging.html#logrecord-attributes
custom_args (str): special ID to include in messages
"""
# Override defaults if required #
discord_webhook = self.config.get_option(
'LOGGING', 'discord_webhook',
None, discord_webhook
)
discord_recipient = self.config.get_option(
'LOGGING', 'discord_recipient',
None, discord_recipient
)
log_level = self.config.get_option(
'LOGGING', 'discord_level',
None, log_level
)
# Actually build discord logging handler #
discord_obj = DiscordWebhook()
discord_obj.webhook(discord_webhook)
# vv TODO vv: Test review #
if discord_obj.can_query:
discord_handler = HackyDiscordHandler(
discord_obj,
discord_recipient
)
self._configure_common(
'discord_',
log_level,
log_format,
'Discord',
discord_handler,
custom_args=custom_args
)
else:
warnings.warn(
'Unable to execute webhook',
exceptions.WebhookCreateFailed
) | ['def', 'configure_discord_logger', '(', 'self', ',', 'discord_webhook', '=', 'None', ',', 'discord_recipient', '=', 'None', ',', 'log_level', '=', "'ERROR'", ',', 'log_format', '=', 'ReportingFormats', '.', 'PRETTY_PRINT', '.', 'value', ',', 'custom_args', '=', "''", ')', ':', '# Override defaults if required #', 'discord_webhook', '=', 'self', '.', 'config', '.', 'get_option', '(', "'LOGGING'", ',', "'discord_webhook'", ',', 'None', ',', 'discord_webhook', ')', 'discord_recipient', '=', 'self', '.', 'config', '.', 'get_option', '(', "'LOGGING'", ',', "'discord_recipient'", ',', 'None', ',', 'discord_recipient', ')', 'log_level', '=', 'self', '.', 'config', '.', 'get_option', '(', "'LOGGING'", ',', "'discord_level'", ',', 'None', ',', 'log_level', ')', '# Actually build discord logging handler #', 'discord_obj', '=', 'DiscordWebhook', '(', ')', 'discord_obj', '.', 'webhook', '(', 'discord_webhook', ')', '# vv TODO vv: Test review #', 'if', 'discord_obj', '.', 'can_query', ':', 'discord_handler', '=', 'HackyDiscordHandler', '(', 'discord_obj', ',', 'discord_recipient', ')', 'self', '.', '_configure_common', '(', "'discord_'", ',', 'log_level', ',', 'log_format', ',', "'Discord'", ',', 'discord_handler', ',', 'custom_args', '=', 'custom_args', ')', 'else', ':', 'warnings', '.', 'warn', '(', "'Unable to execute webhook'", ',', 'exceptions', '.', 'WebhookCreateFailed', ')'] | logger for sending messages to Discord. Easy way to alert humans of issues
Note:
Will try to overwrite minimum log level to enable requested log_level
Will warn and not attach hipchat logger if missing webhook key
Learn more about webhooks: https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks
Args:
discord_webhook (str): discord room webhook (full URL)
discord_recipient (`str`:<@int>, optional): user/group to notify
log_level (str): desired log level for handle https://docs.python.org/3/library/logging.html#logging-levels
log_format (str): format for logging messages https://docs.python.org/3/library/logging.html#logrecord-attributes
custom_args (str): special ID to include in messages | ['logger', 'for', 'sending', 'messages', 'to', 'Discord', '.', 'Easy', 'way', 'to', 'alert', 'humans', 'of', 'issues'] | train | https://github.com/EVEprosper/ProsperCommon/blob/bcada3b25420099e1f204db8d55eb268e7b4dc27/prosper/common/prosper_logging.py#L243-L302 |
8,761 | gboeing/osmnx | osmnx/projection.py | project_graph | def project_graph(G, to_crs=None):
"""
Project a graph from lat-long to the UTM zone appropriate for its geographic
location.
Parameters
----------
G : networkx multidigraph
the networkx graph to be projected
to_crs : dict
if not None, just project to this CRS instead of to UTM
Returns
-------
networkx multidigraph
"""
G_proj = G.copy()
start_time = time.time()
# create a GeoDataFrame of the nodes, name it, convert osmid to str
nodes, data = zip(*G_proj.nodes(data=True))
gdf_nodes = gpd.GeoDataFrame(list(data), index=nodes)
gdf_nodes.crs = G_proj.graph['crs']
gdf_nodes.gdf_name = '{}_nodes'.format(G_proj.name)
# create new lat/lon columns just to save that data for later, and create a
# geometry column from x/y
gdf_nodes['lon'] = gdf_nodes['x']
gdf_nodes['lat'] = gdf_nodes['y']
gdf_nodes['geometry'] = gdf_nodes.apply(lambda row: Point(row['x'], row['y']), axis=1)
log('Created a GeoDataFrame from graph in {:,.2f} seconds'.format(time.time()-start_time))
# project the nodes GeoDataFrame to UTM
gdf_nodes_utm = project_gdf(gdf_nodes, to_crs=to_crs)
# extract data for all edges that have geometry attribute
edges_with_geom = []
for u, v, key, data in G_proj.edges(keys=True, data=True):
if 'geometry' in data:
edges_with_geom.append({'u':u, 'v':v, 'key':key, 'geometry':data['geometry']})
# create an edges GeoDataFrame and project to UTM, if there were any edges
# with a geometry attribute. geom attr only exists if graph has been
# simplified, otherwise you don't have to project anything for the edges
# because the nodes still contain all spatial data
if len(edges_with_geom) > 0:
gdf_edges = gpd.GeoDataFrame(edges_with_geom)
gdf_edges.crs = G_proj.graph['crs']
gdf_edges.gdf_name = '{}_edges'.format(G_proj.name)
gdf_edges_utm = project_gdf(gdf_edges, to_crs=to_crs)
# extract projected x and y values from the nodes' geometry column
start_time = time.time()
gdf_nodes_utm['x'] = gdf_nodes_utm['geometry'].map(lambda point: point.x)
gdf_nodes_utm['y'] = gdf_nodes_utm['geometry'].map(lambda point: point.y)
gdf_nodes_utm = gdf_nodes_utm.drop('geometry', axis=1)
log('Extracted projected node geometries from GeoDataFrame in {:,.2f} seconds'.format(time.time()-start_time))
# clear the graph to make it a blank slate for the projected data
start_time = time.time()
edges = list(G_proj.edges(keys=True, data=True))
graph_name = G_proj.graph['name']
G_proj.clear()
# add the projected nodes and all their attributes to the graph
G_proj.add_nodes_from(gdf_nodes_utm.index)
attributes = gdf_nodes_utm.to_dict()
for label in gdf_nodes_utm.columns:
nx.set_node_attributes(G_proj, name=label, values=attributes[label])
# add the edges and all their attributes (including reconstructed geometry,
# when it exists) to the graph
for u, v, key, attributes in edges:
if 'geometry' in attributes:
row = gdf_edges_utm[(gdf_edges_utm['u']==u) & (gdf_edges_utm['v']==v) & (gdf_edges_utm['key']==key)]
attributes['geometry'] = row['geometry'].iloc[0]
# attributes dict contains key, so we don't need to explicitly pass it here
G_proj.add_edge(u, v, **attributes)
# set the graph's CRS attribute to the new, projected CRS and return the
# projected graph
G_proj.graph['crs'] = gdf_nodes_utm.crs
G_proj.graph['name'] = '{}_UTM'.format(graph_name)
if 'streets_per_node' in G.graph:
G_proj.graph['streets_per_node'] = G.graph['streets_per_node']
log('Rebuilt projected graph in {:,.2f} seconds'.format(time.time()-start_time))
return G_proj | python | def project_graph(G, to_crs=None):
"""
Project a graph from lat-long to the UTM zone appropriate for its geographic
location.
Parameters
----------
G : networkx multidigraph
the networkx graph to be projected
to_crs : dict
if not None, just project to this CRS instead of to UTM
Returns
-------
networkx multidigraph
"""
G_proj = G.copy()
start_time = time.time()
# create a GeoDataFrame of the nodes, name it, convert osmid to str
nodes, data = zip(*G_proj.nodes(data=True))
gdf_nodes = gpd.GeoDataFrame(list(data), index=nodes)
gdf_nodes.crs = G_proj.graph['crs']
gdf_nodes.gdf_name = '{}_nodes'.format(G_proj.name)
# create new lat/lon columns just to save that data for later, and create a
# geometry column from x/y
gdf_nodes['lon'] = gdf_nodes['x']
gdf_nodes['lat'] = gdf_nodes['y']
gdf_nodes['geometry'] = gdf_nodes.apply(lambda row: Point(row['x'], row['y']), axis=1)
log('Created a GeoDataFrame from graph in {:,.2f} seconds'.format(time.time()-start_time))
# project the nodes GeoDataFrame to UTM
gdf_nodes_utm = project_gdf(gdf_nodes, to_crs=to_crs)
# extract data for all edges that have geometry attribute
edges_with_geom = []
for u, v, key, data in G_proj.edges(keys=True, data=True):
if 'geometry' in data:
edges_with_geom.append({'u':u, 'v':v, 'key':key, 'geometry':data['geometry']})
# create an edges GeoDataFrame and project to UTM, if there were any edges
# with a geometry attribute. geom attr only exists if graph has been
# simplified, otherwise you don't have to project anything for the edges
# because the nodes still contain all spatial data
if len(edges_with_geom) > 0:
gdf_edges = gpd.GeoDataFrame(edges_with_geom)
gdf_edges.crs = G_proj.graph['crs']
gdf_edges.gdf_name = '{}_edges'.format(G_proj.name)
gdf_edges_utm = project_gdf(gdf_edges, to_crs=to_crs)
# extract projected x and y values from the nodes' geometry column
start_time = time.time()
gdf_nodes_utm['x'] = gdf_nodes_utm['geometry'].map(lambda point: point.x)
gdf_nodes_utm['y'] = gdf_nodes_utm['geometry'].map(lambda point: point.y)
gdf_nodes_utm = gdf_nodes_utm.drop('geometry', axis=1)
log('Extracted projected node geometries from GeoDataFrame in {:,.2f} seconds'.format(time.time()-start_time))
# clear the graph to make it a blank slate for the projected data
start_time = time.time()
edges = list(G_proj.edges(keys=True, data=True))
graph_name = G_proj.graph['name']
G_proj.clear()
# add the projected nodes and all their attributes to the graph
G_proj.add_nodes_from(gdf_nodes_utm.index)
attributes = gdf_nodes_utm.to_dict()
for label in gdf_nodes_utm.columns:
nx.set_node_attributes(G_proj, name=label, values=attributes[label])
# add the edges and all their attributes (including reconstructed geometry,
# when it exists) to the graph
for u, v, key, attributes in edges:
if 'geometry' in attributes:
row = gdf_edges_utm[(gdf_edges_utm['u']==u) & (gdf_edges_utm['v']==v) & (gdf_edges_utm['key']==key)]
attributes['geometry'] = row['geometry'].iloc[0]
# attributes dict contains key, so we don't need to explicitly pass it here
G_proj.add_edge(u, v, **attributes)
# set the graph's CRS attribute to the new, projected CRS and return the
# projected graph
G_proj.graph['crs'] = gdf_nodes_utm.crs
G_proj.graph['name'] = '{}_UTM'.format(graph_name)
if 'streets_per_node' in G.graph:
G_proj.graph['streets_per_node'] = G.graph['streets_per_node']
log('Rebuilt projected graph in {:,.2f} seconds'.format(time.time()-start_time))
return G_proj | ['def', 'project_graph', '(', 'G', ',', 'to_crs', '=', 'None', ')', ':', 'G_proj', '=', 'G', '.', 'copy', '(', ')', 'start_time', '=', 'time', '.', 'time', '(', ')', '# create a GeoDataFrame of the nodes, name it, convert osmid to str', 'nodes', ',', 'data', '=', 'zip', '(', '*', 'G_proj', '.', 'nodes', '(', 'data', '=', 'True', ')', ')', 'gdf_nodes', '=', 'gpd', '.', 'GeoDataFrame', '(', 'list', '(', 'data', ')', ',', 'index', '=', 'nodes', ')', 'gdf_nodes', '.', 'crs', '=', 'G_proj', '.', 'graph', '[', "'crs'", ']', 'gdf_nodes', '.', 'gdf_name', '=', "'{}_nodes'", '.', 'format', '(', 'G_proj', '.', 'name', ')', '# create new lat/lon columns just to save that data for later, and create a', '# geometry column from x/y', 'gdf_nodes', '[', "'lon'", ']', '=', 'gdf_nodes', '[', "'x'", ']', 'gdf_nodes', '[', "'lat'", ']', '=', 'gdf_nodes', '[', "'y'", ']', 'gdf_nodes', '[', "'geometry'", ']', '=', 'gdf_nodes', '.', 'apply', '(', 'lambda', 'row', ':', 'Point', '(', 'row', '[', "'x'", ']', ',', 'row', '[', "'y'", ']', ')', ',', 'axis', '=', '1', ')', 'log', '(', "'Created a GeoDataFrame from graph in {:,.2f} seconds'", '.', 'format', '(', 'time', '.', 'time', '(', ')', '-', 'start_time', ')', ')', '# project the nodes GeoDataFrame to UTM', 'gdf_nodes_utm', '=', 'project_gdf', '(', 'gdf_nodes', ',', 'to_crs', '=', 'to_crs', ')', '# extract data for all edges that have geometry attribute', 'edges_with_geom', '=', '[', ']', 'for', 'u', ',', 'v', ',', 'key', ',', 'data', 'in', 'G_proj', '.', 'edges', '(', 'keys', '=', 'True', ',', 'data', '=', 'True', ')', ':', 'if', "'geometry'", 'in', 'data', ':', 'edges_with_geom', '.', 'append', '(', '{', "'u'", ':', 'u', ',', "'v'", ':', 'v', ',', "'key'", ':', 'key', ',', "'geometry'", ':', 'data', '[', "'geometry'", ']', '}', ')', '# create an edges GeoDataFrame and project to UTM, if there were any edges', '# with a geometry attribute. geom attr only exists if graph has been', "# simplified, otherwise you don't have to project anything for the edges", '# because the nodes still contain all spatial data', 'if', 'len', '(', 'edges_with_geom', ')', '>', '0', ':', 'gdf_edges', '=', 'gpd', '.', 'GeoDataFrame', '(', 'edges_with_geom', ')', 'gdf_edges', '.', 'crs', '=', 'G_proj', '.', 'graph', '[', "'crs'", ']', 'gdf_edges', '.', 'gdf_name', '=', "'{}_edges'", '.', 'format', '(', 'G_proj', '.', 'name', ')', 'gdf_edges_utm', '=', 'project_gdf', '(', 'gdf_edges', ',', 'to_crs', '=', 'to_crs', ')', "# extract projected x and y values from the nodes' geometry column", 'start_time', '=', 'time', '.', 'time', '(', ')', 'gdf_nodes_utm', '[', "'x'", ']', '=', 'gdf_nodes_utm', '[', "'geometry'", ']', '.', 'map', '(', 'lambda', 'point', ':', 'point', '.', 'x', ')', 'gdf_nodes_utm', '[', "'y'", ']', '=', 'gdf_nodes_utm', '[', "'geometry'", ']', '.', 'map', '(', 'lambda', 'point', ':', 'point', '.', 'y', ')', 'gdf_nodes_utm', '=', 'gdf_nodes_utm', '.', 'drop', '(', "'geometry'", ',', 'axis', '=', '1', ')', 'log', '(', "'Extracted projected node geometries from GeoDataFrame in {:,.2f} seconds'", '.', 'format', '(', 'time', '.', 'time', '(', ')', '-', 'start_time', ')', ')', '# clear the graph to make it a blank slate for the projected data', 'start_time', '=', 'time', '.', 'time', '(', ')', 'edges', '=', 'list', '(', 'G_proj', '.', 'edges', '(', 'keys', '=', 'True', ',', 'data', '=', 'True', ')', ')', 'graph_name', '=', 'G_proj', '.', 'graph', '[', "'name'", ']', 'G_proj', '.', 'clear', '(', ')', '# add the projected nodes and all their attributes to the graph', 'G_proj', '.', 'add_nodes_from', '(', 'gdf_nodes_utm', '.', 'index', ')', 'attributes', '=', 'gdf_nodes_utm', '.', 'to_dict', '(', ')', 'for', 'label', 'in', 'gdf_nodes_utm', '.', 'columns', ':', 'nx', '.', 'set_node_attributes', '(', 'G_proj', ',', 'name', '=', 'label', ',', 'values', '=', 'attributes', '[', 'label', ']', ')', '# add the edges and all their attributes (including reconstructed geometry,', '# when it exists) to the graph', 'for', 'u', ',', 'v', ',', 'key', ',', 'attributes', 'in', 'edges', ':', 'if', "'geometry'", 'in', 'attributes', ':', 'row', '=', 'gdf_edges_utm', '[', '(', 'gdf_edges_utm', '[', "'u'", ']', '==', 'u', ')', '&', '(', 'gdf_edges_utm', '[', "'v'", ']', '==', 'v', ')', '&', '(', 'gdf_edges_utm', '[', "'key'", ']', '==', 'key', ')', ']', 'attributes', '[', "'geometry'", ']', '=', 'row', '[', "'geometry'", ']', '.', 'iloc', '[', '0', ']', "# attributes dict contains key, so we don't need to explicitly pass it here", 'G_proj', '.', 'add_edge', '(', 'u', ',', 'v', ',', '*', '*', 'attributes', ')', "# set the graph's CRS attribute to the new, projected CRS and return the", '# projected graph', 'G_proj', '.', 'graph', '[', "'crs'", ']', '=', 'gdf_nodes_utm', '.', 'crs', 'G_proj', '.', 'graph', '[', "'name'", ']', '=', "'{}_UTM'", '.', 'format', '(', 'graph_name', ')', 'if', "'streets_per_node'", 'in', 'G', '.', 'graph', ':', 'G_proj', '.', 'graph', '[', "'streets_per_node'", ']', '=', 'G', '.', 'graph', '[', "'streets_per_node'", ']', 'log', '(', "'Rebuilt projected graph in {:,.2f} seconds'", '.', 'format', '(', 'time', '.', 'time', '(', ')', '-', 'start_time', ')', ')', 'return', 'G_proj'] | Project a graph from lat-long to the UTM zone appropriate for its geographic
location.
Parameters
----------
G : networkx multidigraph
the networkx graph to be projected
to_crs : dict
if not None, just project to this CRS instead of to UTM
Returns
-------
networkx multidigraph | ['Project', 'a', 'graph', 'from', 'lat', '-', 'long', 'to', 'the', 'UTM', 'zone', 'appropriate', 'for', 'its', 'geographic', 'location', '.'] | train | https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/projection.py#L126-L214 |
8,762 | bxlab/bx-python | lib/bx/align/epo.py | Chain.bedInterval | def bedInterval(self, who):
"return a BED6 entry, thus DOES coordinate conversion for minus strands"
if who == 't':
st, en = self.tStart, self.tEnd
if self.tStrand == '-':
st, en = self.tSize-en, self.tSize-st
return (self.tName, st, en, self.id, self.score, self.tStrand)
else:
st, en = self.qStart, self.qEnd
if self.qStrand == '-':
st, en = self.qSize-en, self.qSize-st
assert en-st == self.qEnd - self.qStart
return (self.qName, st, en, self.id, self.score, self.qStrand) | python | def bedInterval(self, who):
"return a BED6 entry, thus DOES coordinate conversion for minus strands"
if who == 't':
st, en = self.tStart, self.tEnd
if self.tStrand == '-':
st, en = self.tSize-en, self.tSize-st
return (self.tName, st, en, self.id, self.score, self.tStrand)
else:
st, en = self.qStart, self.qEnd
if self.qStrand == '-':
st, en = self.qSize-en, self.qSize-st
assert en-st == self.qEnd - self.qStart
return (self.qName, st, en, self.id, self.score, self.qStrand) | ['def', 'bedInterval', '(', 'self', ',', 'who', ')', ':', 'if', 'who', '==', "'t'", ':', 'st', ',', 'en', '=', 'self', '.', 'tStart', ',', 'self', '.', 'tEnd', 'if', 'self', '.', 'tStrand', '==', "'-'", ':', 'st', ',', 'en', '=', 'self', '.', 'tSize', '-', 'en', ',', 'self', '.', 'tSize', '-', 'st', 'return', '(', 'self', '.', 'tName', ',', 'st', ',', 'en', ',', 'self', '.', 'id', ',', 'self', '.', 'score', ',', 'self', '.', 'tStrand', ')', 'else', ':', 'st', ',', 'en', '=', 'self', '.', 'qStart', ',', 'self', '.', 'qEnd', 'if', 'self', '.', 'qStrand', '==', "'-'", ':', 'st', ',', 'en', '=', 'self', '.', 'qSize', '-', 'en', ',', 'self', '.', 'qSize', '-', 'st', 'assert', 'en', '-', 'st', '==', 'self', '.', 'qEnd', '-', 'self', '.', 'qStart', 'return', '(', 'self', '.', 'qName', ',', 'st', ',', 'en', ',', 'self', '.', 'id', ',', 'self', '.', 'score', ',', 'self', '.', 'qStrand', ')'] | return a BED6 entry, thus DOES coordinate conversion for minus strands | ['return', 'a', 'BED6', 'entry', 'thus', 'DOES', 'coordinate', 'conversion', 'for', 'minus', 'strands'] | train | https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/epo.py#L135-L148 |
8,763 | pyQode/pyqode.core | pyqode/core/api/code_edit.py | CodeEdit.cut | def cut(self):
"""
Cuts the selected text or the whole line if no text was selected.
"""
tc = self.textCursor()
helper = TextHelper(self)
tc.beginEditBlock()
no_selection = False
sText = tc.selection().toPlainText()
if not helper.current_line_text() and sText.count("\n") > 1:
tc.deleteChar()
else:
if not self.textCursor().hasSelection():
no_selection = True
TextHelper(self).select_whole_line()
super(CodeEdit, self).cut()
if no_selection:
tc.deleteChar()
tc.endEditBlock()
self.setTextCursor(tc) | python | def cut(self):
"""
Cuts the selected text or the whole line if no text was selected.
"""
tc = self.textCursor()
helper = TextHelper(self)
tc.beginEditBlock()
no_selection = False
sText = tc.selection().toPlainText()
if not helper.current_line_text() and sText.count("\n") > 1:
tc.deleteChar()
else:
if not self.textCursor().hasSelection():
no_selection = True
TextHelper(self).select_whole_line()
super(CodeEdit, self).cut()
if no_selection:
tc.deleteChar()
tc.endEditBlock()
self.setTextCursor(tc) | ['def', 'cut', '(', 'self', ')', ':', 'tc', '=', 'self', '.', 'textCursor', '(', ')', 'helper', '=', 'TextHelper', '(', 'self', ')', 'tc', '.', 'beginEditBlock', '(', ')', 'no_selection', '=', 'False', 'sText', '=', 'tc', '.', 'selection', '(', ')', '.', 'toPlainText', '(', ')', 'if', 'not', 'helper', '.', 'current_line_text', '(', ')', 'and', 'sText', '.', 'count', '(', '"\\n"', ')', '>', '1', ':', 'tc', '.', 'deleteChar', '(', ')', 'else', ':', 'if', 'not', 'self', '.', 'textCursor', '(', ')', '.', 'hasSelection', '(', ')', ':', 'no_selection', '=', 'True', 'TextHelper', '(', 'self', ')', '.', 'select_whole_line', '(', ')', 'super', '(', 'CodeEdit', ',', 'self', ')', '.', 'cut', '(', ')', 'if', 'no_selection', ':', 'tc', '.', 'deleteChar', '(', ')', 'tc', '.', 'endEditBlock', '(', ')', 'self', '.', 'setTextCursor', '(', 'tc', ')'] | Cuts the selected text or the whole line if no text was selected. | ['Cuts', 'the', 'selected', 'text', 'or', 'the', 'whole', 'line', 'if', 'no', 'text', 'was', 'selected', '.'] | train | https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/api/code_edit.py#L869-L888 |
8,764 | gbiggs/rtctree | rtctree/component.py | Component.activate_conf_set | def activate_conf_set(self, set_name):
'''Activate a configuration set by name.
@raises NoSuchConfSetError
'''
with self._mutex:
if not set_name in self.conf_sets:
raise exceptions.NoSuchConfSetError(set_name)
self._conf.activate_configuration_set(set_name) | python | def activate_conf_set(self, set_name):
'''Activate a configuration set by name.
@raises NoSuchConfSetError
'''
with self._mutex:
if not set_name in self.conf_sets:
raise exceptions.NoSuchConfSetError(set_name)
self._conf.activate_configuration_set(set_name) | ['def', 'activate_conf_set', '(', 'self', ',', 'set_name', ')', ':', 'with', 'self', '.', '_mutex', ':', 'if', 'not', 'set_name', 'in', 'self', '.', 'conf_sets', ':', 'raise', 'exceptions', '.', 'NoSuchConfSetError', '(', 'set_name', ')', 'self', '.', '_conf', '.', 'activate_configuration_set', '(', 'set_name', ')'] | Activate a configuration set by name.
@raises NoSuchConfSetError | ['Activate', 'a', 'configuration', 'set', 'by', 'name', '.'] | train | https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/component.py#L1056-L1065 |
8,765 | gmr/helper | helper/unix.py | Daemon._get_pidfile_path | def _get_pidfile_path(self):
"""Return the normalized path for the pidfile, raising an
exception if it can not written to.
:return: str
:raises: ValueError
:raises: OSError
"""
if self.config.daemon.pidfile:
pidfile = path.abspath(self.config.daemon.pidfile)
if not os.access(path.dirname(pidfile), os.W_OK):
raise ValueError('Cannot write to specified pid file path'
' %s' % pidfile)
return pidfile
app = sys.argv[0].split('/')[-1]
for pidfile in ['%s/pids/%s.pid' % (os.getcwd(), app),
'/var/run/%s.pid' % app,
'/var/run/%s/%s.pid' % (app, app),
'/var/tmp/%s.pid' % app,
'/tmp/%s.pid' % app,
'%s.pid' % app]:
if os.access(path.dirname(pidfile), os.W_OK):
return pidfile
raise OSError('Could not find an appropriate place for a pid file') | python | def _get_pidfile_path(self):
"""Return the normalized path for the pidfile, raising an
exception if it can not written to.
:return: str
:raises: ValueError
:raises: OSError
"""
if self.config.daemon.pidfile:
pidfile = path.abspath(self.config.daemon.pidfile)
if not os.access(path.dirname(pidfile), os.W_OK):
raise ValueError('Cannot write to specified pid file path'
' %s' % pidfile)
return pidfile
app = sys.argv[0].split('/')[-1]
for pidfile in ['%s/pids/%s.pid' % (os.getcwd(), app),
'/var/run/%s.pid' % app,
'/var/run/%s/%s.pid' % (app, app),
'/var/tmp/%s.pid' % app,
'/tmp/%s.pid' % app,
'%s.pid' % app]:
if os.access(path.dirname(pidfile), os.W_OK):
return pidfile
raise OSError('Could not find an appropriate place for a pid file') | ['def', '_get_pidfile_path', '(', 'self', ')', ':', 'if', 'self', '.', 'config', '.', 'daemon', '.', 'pidfile', ':', 'pidfile', '=', 'path', '.', 'abspath', '(', 'self', '.', 'config', '.', 'daemon', '.', 'pidfile', ')', 'if', 'not', 'os', '.', 'access', '(', 'path', '.', 'dirname', '(', 'pidfile', ')', ',', 'os', '.', 'W_OK', ')', ':', 'raise', 'ValueError', '(', "'Cannot write to specified pid file path'", "' %s'", '%', 'pidfile', ')', 'return', 'pidfile', 'app', '=', 'sys', '.', 'argv', '[', '0', ']', '.', 'split', '(', "'/'", ')', '[', '-', '1', ']', 'for', 'pidfile', 'in', '[', "'%s/pids/%s.pid'", '%', '(', 'os', '.', 'getcwd', '(', ')', ',', 'app', ')', ',', "'/var/run/%s.pid'", '%', 'app', ',', "'/var/run/%s/%s.pid'", '%', '(', 'app', ',', 'app', ')', ',', "'/var/tmp/%s.pid'", '%', 'app', ',', "'/tmp/%s.pid'", '%', 'app', ',', "'%s.pid'", '%', 'app', ']', ':', 'if', 'os', '.', 'access', '(', 'path', '.', 'dirname', '(', 'pidfile', ')', ',', 'os', '.', 'W_OK', ')', ':', 'return', 'pidfile', 'raise', 'OSError', '(', "'Could not find an appropriate place for a pid file'", ')'] | Return the normalized path for the pidfile, raising an
exception if it can not written to.
:return: str
:raises: ValueError
:raises: OSError | ['Return', 'the', 'normalized', 'path', 'for', 'the', 'pidfile', 'raising', 'an', 'exception', 'if', 'it', 'can', 'not', 'written', 'to', '.'] | train | https://github.com/gmr/helper/blob/fe8e45fc8eabf619429b2940c682c252ee33c082/helper/unix.py#L219-L243 |
8,766 | aws/aws-encryption-sdk-python | src/aws_encryption_sdk/internal/formatting/encryption_context.py | serialize_encryption_context | def serialize_encryption_context(encryption_context):
"""Serializes the contents of a dictionary into a byte string.
:param dict encryption_context: Dictionary of encrytion context keys/values.
:returns: Serialized encryption context
:rtype: bytes
"""
if not encryption_context:
return bytes()
serialized_context = bytearray()
dict_size = len(encryption_context)
if dict_size > aws_encryption_sdk.internal.defaults.MAX_BYTE_ARRAY_SIZE:
raise SerializationError("The encryption context contains too many elements.")
serialized_context.extend(struct.pack(">H", dict_size))
# Encode strings first to catch bad values.
encryption_context_list = []
for key, value in encryption_context.items():
try:
if isinstance(key, bytes):
key = codecs.decode(key)
if isinstance(value, bytes):
value = codecs.decode(value)
encryption_context_list.append(
(aws_encryption_sdk.internal.str_ops.to_bytes(key), aws_encryption_sdk.internal.str_ops.to_bytes(value))
)
except Exception:
raise SerializationError(
"Cannot encode dictionary key or value using {}.".format(aws_encryption_sdk.internal.defaults.ENCODING)
)
for key, value in sorted(encryption_context_list, key=lambda x: x[0]):
serialized_context.extend(
struct.pack(
">H{key_size}sH{value_size}s".format(key_size=len(key), value_size=len(value)),
len(key),
key,
len(value),
value,
)
)
if len(serialized_context) > aws_encryption_sdk.internal.defaults.MAX_BYTE_ARRAY_SIZE:
raise SerializationError("The serialized context is too large.")
return bytes(serialized_context) | python | def serialize_encryption_context(encryption_context):
"""Serializes the contents of a dictionary into a byte string.
:param dict encryption_context: Dictionary of encrytion context keys/values.
:returns: Serialized encryption context
:rtype: bytes
"""
if not encryption_context:
return bytes()
serialized_context = bytearray()
dict_size = len(encryption_context)
if dict_size > aws_encryption_sdk.internal.defaults.MAX_BYTE_ARRAY_SIZE:
raise SerializationError("The encryption context contains too many elements.")
serialized_context.extend(struct.pack(">H", dict_size))
# Encode strings first to catch bad values.
encryption_context_list = []
for key, value in encryption_context.items():
try:
if isinstance(key, bytes):
key = codecs.decode(key)
if isinstance(value, bytes):
value = codecs.decode(value)
encryption_context_list.append(
(aws_encryption_sdk.internal.str_ops.to_bytes(key), aws_encryption_sdk.internal.str_ops.to_bytes(value))
)
except Exception:
raise SerializationError(
"Cannot encode dictionary key or value using {}.".format(aws_encryption_sdk.internal.defaults.ENCODING)
)
for key, value in sorted(encryption_context_list, key=lambda x: x[0]):
serialized_context.extend(
struct.pack(
">H{key_size}sH{value_size}s".format(key_size=len(key), value_size=len(value)),
len(key),
key,
len(value),
value,
)
)
if len(serialized_context) > aws_encryption_sdk.internal.defaults.MAX_BYTE_ARRAY_SIZE:
raise SerializationError("The serialized context is too large.")
return bytes(serialized_context) | ['def', 'serialize_encryption_context', '(', 'encryption_context', ')', ':', 'if', 'not', 'encryption_context', ':', 'return', 'bytes', '(', ')', 'serialized_context', '=', 'bytearray', '(', ')', 'dict_size', '=', 'len', '(', 'encryption_context', ')', 'if', 'dict_size', '>', 'aws_encryption_sdk', '.', 'internal', '.', 'defaults', '.', 'MAX_BYTE_ARRAY_SIZE', ':', 'raise', 'SerializationError', '(', '"The encryption context contains too many elements."', ')', 'serialized_context', '.', 'extend', '(', 'struct', '.', 'pack', '(', '">H"', ',', 'dict_size', ')', ')', '# Encode strings first to catch bad values.', 'encryption_context_list', '=', '[', ']', 'for', 'key', ',', 'value', 'in', 'encryption_context', '.', 'items', '(', ')', ':', 'try', ':', 'if', 'isinstance', '(', 'key', ',', 'bytes', ')', ':', 'key', '=', 'codecs', '.', 'decode', '(', 'key', ')', 'if', 'isinstance', '(', 'value', ',', 'bytes', ')', ':', 'value', '=', 'codecs', '.', 'decode', '(', 'value', ')', 'encryption_context_list', '.', 'append', '(', '(', 'aws_encryption_sdk', '.', 'internal', '.', 'str_ops', '.', 'to_bytes', '(', 'key', ')', ',', 'aws_encryption_sdk', '.', 'internal', '.', 'str_ops', '.', 'to_bytes', '(', 'value', ')', ')', ')', 'except', 'Exception', ':', 'raise', 'SerializationError', '(', '"Cannot encode dictionary key or value using {}."', '.', 'format', '(', 'aws_encryption_sdk', '.', 'internal', '.', 'defaults', '.', 'ENCODING', ')', ')', 'for', 'key', ',', 'value', 'in', 'sorted', '(', 'encryption_context_list', ',', 'key', '=', 'lambda', 'x', ':', 'x', '[', '0', ']', ')', ':', 'serialized_context', '.', 'extend', '(', 'struct', '.', 'pack', '(', '">H{key_size}sH{value_size}s"', '.', 'format', '(', 'key_size', '=', 'len', '(', 'key', ')', ',', 'value_size', '=', 'len', '(', 'value', ')', ')', ',', 'len', '(', 'key', ')', ',', 'key', ',', 'len', '(', 'value', ')', ',', 'value', ',', ')', ')', 'if', 'len', '(', 'serialized_context', ')', '>', 'aws_encryption_sdk', '.', 'internal', '.', 'defaults', '.', 'MAX_BYTE_ARRAY_SIZE', ':', 'raise', 'SerializationError', '(', '"The serialized context is too large."', ')', 'return', 'bytes', '(', 'serialized_context', ')'] | Serializes the contents of a dictionary into a byte string.
:param dict encryption_context: Dictionary of encrytion context keys/values.
:returns: Serialized encryption context
:rtype: bytes | ['Serializes', 'the', 'contents', 'of', 'a', 'dictionary', 'into', 'a', 'byte', 'string', '.'] | train | https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/internal/formatting/encryption_context.py#L50-L96 |
8,767 | tehmaze/natural | natural/phone.py | meid | def meid(number, separator=u' '):
'''
Printable Mobile Equipment Identifier (MEID) number.
>>> print(meid(123456789012345678))
1B 69B4BA 630F34 6
>>> print(meid('1B69B4BA630F34'))
1B 69B4BA 630F34 6
'''
if isinstance(number, six.string_types):
number = re.sub(r'[\s-]', '', number)
try:
number = '%014X' % int(number, 16)
except ValueError:
if len(number) < 18 and number.isdigit():
return meid('%014X' % int(number), separator)
else:
raise ValueError(_('Invalid MEID, size mismatch'))
else:
if len(number) not in (14, 15):
raise ValueError(_('Invalid MEID, size mismatch'))
elif isinstance(number, six.integer_types):
if number > 0xfffffffffffffff:
raise ValueError(_('Invalid MEID, size mismatch'))
return meid(('%014X' % number)[:14], separator)
else:
raise TypeError(_('Invalid MEID, input type invalid'))
number = number.upper()
region = number[:2]
manufacturer = number[2:8]
serial_number = number[8:14]
check_digit = number[14:]
if check_digit == '':
check_digit = luhn_calc(number, chars='0123456789ABCDEF')
groups = (region, manufacturer, serial_number, check_digit)
return separator.join(list(filter(None, groups))) | python | def meid(number, separator=u' '):
'''
Printable Mobile Equipment Identifier (MEID) number.
>>> print(meid(123456789012345678))
1B 69B4BA 630F34 6
>>> print(meid('1B69B4BA630F34'))
1B 69B4BA 630F34 6
'''
if isinstance(number, six.string_types):
number = re.sub(r'[\s-]', '', number)
try:
number = '%014X' % int(number, 16)
except ValueError:
if len(number) < 18 and number.isdigit():
return meid('%014X' % int(number), separator)
else:
raise ValueError(_('Invalid MEID, size mismatch'))
else:
if len(number) not in (14, 15):
raise ValueError(_('Invalid MEID, size mismatch'))
elif isinstance(number, six.integer_types):
if number > 0xfffffffffffffff:
raise ValueError(_('Invalid MEID, size mismatch'))
return meid(('%014X' % number)[:14], separator)
else:
raise TypeError(_('Invalid MEID, input type invalid'))
number = number.upper()
region = number[:2]
manufacturer = number[2:8]
serial_number = number[8:14]
check_digit = number[14:]
if check_digit == '':
check_digit = luhn_calc(number, chars='0123456789ABCDEF')
groups = (region, manufacturer, serial_number, check_digit)
return separator.join(list(filter(None, groups))) | ['def', 'meid', '(', 'number', ',', 'separator', '=', "u' '", ')', ':', 'if', 'isinstance', '(', 'number', ',', 'six', '.', 'string_types', ')', ':', 'number', '=', 're', '.', 'sub', '(', "r'[\\s-]'", ',', "''", ',', 'number', ')', 'try', ':', 'number', '=', "'%014X'", '%', 'int', '(', 'number', ',', '16', ')', 'except', 'ValueError', ':', 'if', 'len', '(', 'number', ')', '<', '18', 'and', 'number', '.', 'isdigit', '(', ')', ':', 'return', 'meid', '(', "'%014X'", '%', 'int', '(', 'number', ')', ',', 'separator', ')', 'else', ':', 'raise', 'ValueError', '(', '_', '(', "'Invalid MEID, size mismatch'", ')', ')', 'else', ':', 'if', 'len', '(', 'number', ')', 'not', 'in', '(', '14', ',', '15', ')', ':', 'raise', 'ValueError', '(', '_', '(', "'Invalid MEID, size mismatch'", ')', ')', 'elif', 'isinstance', '(', 'number', ',', 'six', '.', 'integer_types', ')', ':', 'if', 'number', '>', '0xfffffffffffffff', ':', 'raise', 'ValueError', '(', '_', '(', "'Invalid MEID, size mismatch'", ')', ')', 'return', 'meid', '(', '(', "'%014X'", '%', 'number', ')', '[', ':', '14', ']', ',', 'separator', ')', 'else', ':', 'raise', 'TypeError', '(', '_', '(', "'Invalid MEID, input type invalid'", ')', ')', 'number', '=', 'number', '.', 'upper', '(', ')', 'region', '=', 'number', '[', ':', '2', ']', 'manufacturer', '=', 'number', '[', '2', ':', '8', ']', 'serial_number', '=', 'number', '[', '8', ':', '14', ']', 'check_digit', '=', 'number', '[', '14', ':', ']', 'if', 'check_digit', '==', "''", ':', 'check_digit', '=', 'luhn_calc', '(', 'number', ',', 'chars', '=', "'0123456789ABCDEF'", ')', 'groups', '=', '(', 'region', ',', 'manufacturer', ',', 'serial_number', ',', 'check_digit', ')', 'return', 'separator', '.', 'join', '(', 'list', '(', 'filter', '(', 'None', ',', 'groups', ')', ')', ')'] | Printable Mobile Equipment Identifier (MEID) number.
>>> print(meid(123456789012345678))
1B 69B4BA 630F34 6
>>> print(meid('1B69B4BA630F34'))
1B 69B4BA 630F34 6 | ['Printable', 'Mobile', 'Equipment', 'Identifier', '(', 'MEID', ')', 'number', '.'] | train | https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/phone.py#L189-L231 |
8,768 | gofed/gofedlib | gofedlib/snapshot/capturer.py | ProjectCapturer.capture | def capture(self, commit = ""):
"""Capture the current state of a project based on its provider
Commit is relevant only for upstream providers.
If empty, the latest commit from provider repository is taken.
It is ignored for distribution providers.
:param provider: project provider, e.g. upstream repository, distribution builder
:type provider: json/dict
:param commit: project's original commit
:type commit: string
"""
self._validateProvider(self._provider)
# get client for repository
# TODO(jchaloup): read config file to switch between local and remove clients
# TODO(jchaloup): remote client can cover gofed infratructure or any remove source for repository info
client = RepositoryClientBuilder().buildWithRemoteClient(self._provider)
if self._provider["provider"] == "github":
self._signature = ProjectGithubRepositoryCapturer(self._provider, client).capture(commit).signature()
elif self._provider["provider"] == "bitbucket":
self._signature = ProjectBitbucketRepositoryCapturer(self._provider, client).capture(commit).signature()
else:
raise KeyError("Provider '%s' not recognized" % self._provider["provider"])
return self | python | def capture(self, commit = ""):
"""Capture the current state of a project based on its provider
Commit is relevant only for upstream providers.
If empty, the latest commit from provider repository is taken.
It is ignored for distribution providers.
:param provider: project provider, e.g. upstream repository, distribution builder
:type provider: json/dict
:param commit: project's original commit
:type commit: string
"""
self._validateProvider(self._provider)
# get client for repository
# TODO(jchaloup): read config file to switch between local and remove clients
# TODO(jchaloup): remote client can cover gofed infratructure or any remove source for repository info
client = RepositoryClientBuilder().buildWithRemoteClient(self._provider)
if self._provider["provider"] == "github":
self._signature = ProjectGithubRepositoryCapturer(self._provider, client).capture(commit).signature()
elif self._provider["provider"] == "bitbucket":
self._signature = ProjectBitbucketRepositoryCapturer(self._provider, client).capture(commit).signature()
else:
raise KeyError("Provider '%s' not recognized" % self._provider["provider"])
return self | ['def', 'capture', '(', 'self', ',', 'commit', '=', '""', ')', ':', 'self', '.', '_validateProvider', '(', 'self', '.', '_provider', ')', '# get client for repository', '# TODO(jchaloup): read config file to switch between local and remove clients', '# TODO(jchaloup): remote client can cover gofed infratructure or any remove source for repository info', 'client', '=', 'RepositoryClientBuilder', '(', ')', '.', 'buildWithRemoteClient', '(', 'self', '.', '_provider', ')', 'if', 'self', '.', '_provider', '[', '"provider"', ']', '==', '"github"', ':', 'self', '.', '_signature', '=', 'ProjectGithubRepositoryCapturer', '(', 'self', '.', '_provider', ',', 'client', ')', '.', 'capture', '(', 'commit', ')', '.', 'signature', '(', ')', 'elif', 'self', '.', '_provider', '[', '"provider"', ']', '==', '"bitbucket"', ':', 'self', '.', '_signature', '=', 'ProjectBitbucketRepositoryCapturer', '(', 'self', '.', '_provider', ',', 'client', ')', '.', 'capture', '(', 'commit', ')', '.', 'signature', '(', ')', 'else', ':', 'raise', 'KeyError', '(', '"Provider \'%s\' not recognized"', '%', 'self', '.', '_provider', '[', '"provider"', ']', ')', 'return', 'self'] | Capture the current state of a project based on its provider
Commit is relevant only for upstream providers.
If empty, the latest commit from provider repository is taken.
It is ignored for distribution providers.
:param provider: project provider, e.g. upstream repository, distribution builder
:type provider: json/dict
:param commit: project's original commit
:type commit: string | ['Capture', 'the', 'current', 'state', 'of', 'a', 'project', 'based', 'on', 'its', 'provider'] | train | https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/snapshot/capturer.py#L25-L51 |
8,769 | glitchassassin/lackey | lackey/PlatformManagerWindows.py | PlatformManagerWindows.getWindowRect | def getWindowRect(self, hwnd):
""" Returns a rect (x,y,w,h) for the specified window's area """
rect = ctypes.wintypes.RECT()
if ctypes.windll.user32.GetWindowRect(hwnd, ctypes.byref(rect)):
x1 = rect.left
y1 = rect.top
x2 = rect.right
y2 = rect.bottom
return (x1, y1, x2-x1, y2-y1)
return None | python | def getWindowRect(self, hwnd):
""" Returns a rect (x,y,w,h) for the specified window's area """
rect = ctypes.wintypes.RECT()
if ctypes.windll.user32.GetWindowRect(hwnd, ctypes.byref(rect)):
x1 = rect.left
y1 = rect.top
x2 = rect.right
y2 = rect.bottom
return (x1, y1, x2-x1, y2-y1)
return None | ['def', 'getWindowRect', '(', 'self', ',', 'hwnd', ')', ':', 'rect', '=', 'ctypes', '.', 'wintypes', '.', 'RECT', '(', ')', 'if', 'ctypes', '.', 'windll', '.', 'user32', '.', 'GetWindowRect', '(', 'hwnd', ',', 'ctypes', '.', 'byref', '(', 'rect', ')', ')', ':', 'x1', '=', 'rect', '.', 'left', 'y1', '=', 'rect', '.', 'top', 'x2', '=', 'rect', '.', 'right', 'y2', '=', 'rect', '.', 'bottom', 'return', '(', 'x1', ',', 'y1', ',', 'x2', '-', 'x1', ',', 'y2', '-', 'y1', ')', 'return', 'None'] | Returns a rect (x,y,w,h) for the specified window's area | ['Returns', 'a', 'rect', '(', 'x', 'y', 'w', 'h', ')', 'for', 'the', 'specified', 'window', 's', 'area'] | train | https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerWindows.py#L547-L556 |
8,770 | watson-developer-cloud/python-sdk | ibm_watson/discovery_v1.py | Expansions._from_dict | def _from_dict(cls, _dict):
"""Initialize a Expansions object from a json dictionary."""
args = {}
if 'expansions' in _dict:
args['expansions'] = [
Expansion._from_dict(x) for x in (_dict.get('expansions'))
]
else:
raise ValueError(
'Required property \'expansions\' not present in Expansions JSON'
)
return cls(**args) | python | def _from_dict(cls, _dict):
"""Initialize a Expansions object from a json dictionary."""
args = {}
if 'expansions' in _dict:
args['expansions'] = [
Expansion._from_dict(x) for x in (_dict.get('expansions'))
]
else:
raise ValueError(
'Required property \'expansions\' not present in Expansions JSON'
)
return cls(**args) | ['def', '_from_dict', '(', 'cls', ',', '_dict', ')', ':', 'args', '=', '{', '}', 'if', "'expansions'", 'in', '_dict', ':', 'args', '[', "'expansions'", ']', '=', '[', 'Expansion', '.', '_from_dict', '(', 'x', ')', 'for', 'x', 'in', '(', '_dict', '.', 'get', '(', "'expansions'", ')', ')', ']', 'else', ':', 'raise', 'ValueError', '(', "'Required property \\'expansions\\' not present in Expansions JSON'", ')', 'return', 'cls', '(', '*', '*', 'args', ')'] | Initialize a Expansions object from a json dictionary. | ['Initialize', 'a', 'Expansions', 'object', 'from', 'a', 'json', 'dictionary', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L5960-L5971 |
8,771 | ANCIR/granoloader | granoloader/command.py | schema | def schema(ctx, schema):
""" Load schema definitions from a YAML file. """
data = yaml.load(schema)
if not isinstance(data, (list, tuple)):
data = [data]
with click.progressbar(data, label=schema.name) as bar:
for schema in bar:
ctx.obj['grano'].schemata.upsert(schema) | python | def schema(ctx, schema):
""" Load schema definitions from a YAML file. """
data = yaml.load(schema)
if not isinstance(data, (list, tuple)):
data = [data]
with click.progressbar(data, label=schema.name) as bar:
for schema in bar:
ctx.obj['grano'].schemata.upsert(schema) | ['def', 'schema', '(', 'ctx', ',', 'schema', ')', ':', 'data', '=', 'yaml', '.', 'load', '(', 'schema', ')', 'if', 'not', 'isinstance', '(', 'data', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'data', '=', '[', 'data', ']', 'with', 'click', '.', 'progressbar', '(', 'data', ',', 'label', '=', 'schema', '.', 'name', ')', 'as', 'bar', ':', 'for', 'schema', 'in', 'bar', ':', 'ctx', '.', 'obj', '[', "'grano'", ']', '.', 'schemata', '.', 'upsert', '(', 'schema', ')'] | Load schema definitions from a YAML file. | ['Load', 'schema', 'definitions', 'from', 'a', 'YAML', 'file', '.'] | train | https://github.com/ANCIR/granoloader/blob/c48b1bd50403dd611340c5f51637f7c5ca54059c/granoloader/command.py#L111-L118 |
8,772 | BernardFW/bernard | src/bernard/i18n/translator.py | unserialize | def unserialize(wd: WordDictionary, text: Dict):
"""
Transforms back a serialized value of `serialize()`
"""
if not isinstance(text, Mapping):
raise ValueError('Text has not the right format')
try:
t = text['type']
if t == 'string':
return text['value']
elif t == 'trans':
if not isinstance(text['params'], Mapping):
raise ValueError('Params should be a dictionary')
for param in text['params']:
if not isinstance(param, str):
raise ValueError('Params are not all text-keys')
return StringToTranslate(
wd=wd,
key=text['key'],
count=text['count'],
params=text['params'],
)
else:
raise ValueError('Unknown type "{}"'.format(t))
except KeyError:
raise ValueError('Not enough information to unserialize') | python | def unserialize(wd: WordDictionary, text: Dict):
"""
Transforms back a serialized value of `serialize()`
"""
if not isinstance(text, Mapping):
raise ValueError('Text has not the right format')
try:
t = text['type']
if t == 'string':
return text['value']
elif t == 'trans':
if not isinstance(text['params'], Mapping):
raise ValueError('Params should be a dictionary')
for param in text['params']:
if not isinstance(param, str):
raise ValueError('Params are not all text-keys')
return StringToTranslate(
wd=wd,
key=text['key'],
count=text['count'],
params=text['params'],
)
else:
raise ValueError('Unknown type "{}"'.format(t))
except KeyError:
raise ValueError('Not enough information to unserialize') | ['def', 'unserialize', '(', 'wd', ':', 'WordDictionary', ',', 'text', ':', 'Dict', ')', ':', 'if', 'not', 'isinstance', '(', 'text', ',', 'Mapping', ')', ':', 'raise', 'ValueError', '(', "'Text has not the right format'", ')', 'try', ':', 't', '=', 'text', '[', "'type'", ']', 'if', 't', '==', "'string'", ':', 'return', 'text', '[', "'value'", ']', 'elif', 't', '==', "'trans'", ':', 'if', 'not', 'isinstance', '(', 'text', '[', "'params'", ']', ',', 'Mapping', ')', ':', 'raise', 'ValueError', '(', "'Params should be a dictionary'", ')', 'for', 'param', 'in', 'text', '[', "'params'", ']', ':', 'if', 'not', 'isinstance', '(', 'param', ',', 'str', ')', ':', 'raise', 'ValueError', '(', "'Params are not all text-keys'", ')', 'return', 'StringToTranslate', '(', 'wd', '=', 'wd', ',', 'key', '=', 'text', '[', "'key'", ']', ',', 'count', '=', 'text', '[', "'count'", ']', ',', 'params', '=', 'text', '[', "'params'", ']', ',', ')', 'else', ':', 'raise', 'ValueError', '(', '\'Unknown type "{}"\'', '.', 'format', '(', 't', ')', ')', 'except', 'KeyError', ':', 'raise', 'ValueError', '(', "'Not enough information to unserialize'", ')'] | Transforms back a serialized value of `serialize()` | ['Transforms', 'back', 'a', 'serialized', 'value', 'of', 'serialize', '()'] | train | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/translator.py#L574-L604 |
8,773 | tundish/turberfield-dialogue | turberfield/dialogue/model.py | SceneScript.scripts | def scripts(cls, pkg, metadata, paths=[], **kwargs):
"""This class method is the preferred way to create SceneScript objects.
:param str pkg: The dotted name of the package containing the scripts.
:param metadata: A mapping or data object. This parameter permits searching among
scripts against particular criteria. Its use is application specific.
:param list(str) paths: A sequence of file paths to the scripts relative to the package.
You can satisfy all parameter requirements by passing in a
:py:class:`~turberfield.dialogue.model.SceneScript.Folder` object
like this::
SceneScript.scripts(**folder._asdict())
The method generates a sequence of
:py:class:`~turberfield.dialogue.model.SceneScript` objects.
"""
for path in paths:
try:
fP = pkg_resources.resource_filename(pkg, path)
except ImportError:
cls.log.warning(
"No package called {}".format(pkg)
)
else:
if not os.path.isfile(fP):
cls.log.warning(
"No script file at {}".format(os.path.join(*pkg.split(".") + [path]))
)
else:
yield cls(fP, metadata) | python | def scripts(cls, pkg, metadata, paths=[], **kwargs):
"""This class method is the preferred way to create SceneScript objects.
:param str pkg: The dotted name of the package containing the scripts.
:param metadata: A mapping or data object. This parameter permits searching among
scripts against particular criteria. Its use is application specific.
:param list(str) paths: A sequence of file paths to the scripts relative to the package.
You can satisfy all parameter requirements by passing in a
:py:class:`~turberfield.dialogue.model.SceneScript.Folder` object
like this::
SceneScript.scripts(**folder._asdict())
The method generates a sequence of
:py:class:`~turberfield.dialogue.model.SceneScript` objects.
"""
for path in paths:
try:
fP = pkg_resources.resource_filename(pkg, path)
except ImportError:
cls.log.warning(
"No package called {}".format(pkg)
)
else:
if not os.path.isfile(fP):
cls.log.warning(
"No script file at {}".format(os.path.join(*pkg.split(".") + [path]))
)
else:
yield cls(fP, metadata) | ['def', 'scripts', '(', 'cls', ',', 'pkg', ',', 'metadata', ',', 'paths', '=', '[', ']', ',', '*', '*', 'kwargs', ')', ':', 'for', 'path', 'in', 'paths', ':', 'try', ':', 'fP', '=', 'pkg_resources', '.', 'resource_filename', '(', 'pkg', ',', 'path', ')', 'except', 'ImportError', ':', 'cls', '.', 'log', '.', 'warning', '(', '"No package called {}"', '.', 'format', '(', 'pkg', ')', ')', 'else', ':', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'fP', ')', ':', 'cls', '.', 'log', '.', 'warning', '(', '"No script file at {}"', '.', 'format', '(', 'os', '.', 'path', '.', 'join', '(', '*', 'pkg', '.', 'split', '(', '"."', ')', '+', '[', 'path', ']', ')', ')', ')', 'else', ':', 'yield', 'cls', '(', 'fP', ',', 'metadata', ')'] | This class method is the preferred way to create SceneScript objects.
:param str pkg: The dotted name of the package containing the scripts.
:param metadata: A mapping or data object. This parameter permits searching among
scripts against particular criteria. Its use is application specific.
:param list(str) paths: A sequence of file paths to the scripts relative to the package.
You can satisfy all parameter requirements by passing in a
:py:class:`~turberfield.dialogue.model.SceneScript.Folder` object
like this::
SceneScript.scripts(**folder._asdict())
The method generates a sequence of
:py:class:`~turberfield.dialogue.model.SceneScript` objects. | ['This', 'class', 'method', 'is', 'the', 'preferred', 'way', 'to', 'create', 'SceneScript', 'objects', '.'] | train | https://github.com/tundish/turberfield-dialogue/blob/e7ccf7c19ae162e2f315ddf2642394e858529b4a/turberfield/dialogue/model.py#L271-L301 |
8,774 | proycon/pynlpl | pynlpl/datatypes.py | Trie.size | def size(self):
"""Size is number of nodes under the trie, including the current node"""
if self.children:
return sum( ( c.size() for c in self.children.values() ) ) + 1
else:
return 1 | python | def size(self):
"""Size is number of nodes under the trie, including the current node"""
if self.children:
return sum( ( c.size() for c in self.children.values() ) ) + 1
else:
return 1 | ['def', 'size', '(', 'self', ')', ':', 'if', 'self', '.', 'children', ':', 'return', 'sum', '(', '(', 'c', '.', 'size', '(', ')', 'for', 'c', 'in', 'self', '.', 'children', '.', 'values', '(', ')', ')', ')', '+', '1', 'else', ':', 'return', '1'] | Size is number of nodes under the trie, including the current node | ['Size', 'is', 'number', 'of', 'nodes', 'under', 'the', 'trie', 'including', 'the', 'current', 'node'] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L361-L366 |
8,775 | saltstack/salt | salt/utils/openstack/nova.py | SaltNova.volume_detach | def volume_detach(self,
name,
timeout=300):
'''
Detach a block device
'''
try:
volume = self.volume_show(name)
except KeyError as exc:
raise SaltCloudSystemExit('Unable to find {0} volume: {1}'.format(name, exc))
if not volume['attachments']:
return True
response = self.compute_conn.volumes.delete_server_volume(
volume['attachments'][0]['server_id'],
volume['attachments'][0]['id']
)
trycount = 0
start = time.time()
while True:
trycount += 1
try:
response = self._volume_get(volume['id'])
if response['status'] == 'available':
return response
except Exception as exc:
log.debug('Volume is detaching: %s', name)
time.sleep(1)
if time.time() - start > timeout:
log.error('Timed out after %d seconds '
'while waiting for data', timeout)
return False
log.debug(
'Retrying volume_show() (try %d)', trycount
) | python | def volume_detach(self,
name,
timeout=300):
'''
Detach a block device
'''
try:
volume = self.volume_show(name)
except KeyError as exc:
raise SaltCloudSystemExit('Unable to find {0} volume: {1}'.format(name, exc))
if not volume['attachments']:
return True
response = self.compute_conn.volumes.delete_server_volume(
volume['attachments'][0]['server_id'],
volume['attachments'][0]['id']
)
trycount = 0
start = time.time()
while True:
trycount += 1
try:
response = self._volume_get(volume['id'])
if response['status'] == 'available':
return response
except Exception as exc:
log.debug('Volume is detaching: %s', name)
time.sleep(1)
if time.time() - start > timeout:
log.error('Timed out after %d seconds '
'while waiting for data', timeout)
return False
log.debug(
'Retrying volume_show() (try %d)', trycount
) | ['def', 'volume_detach', '(', 'self', ',', 'name', ',', 'timeout', '=', '300', ')', ':', 'try', ':', 'volume', '=', 'self', '.', 'volume_show', '(', 'name', ')', 'except', 'KeyError', 'as', 'exc', ':', 'raise', 'SaltCloudSystemExit', '(', "'Unable to find {0} volume: {1}'", '.', 'format', '(', 'name', ',', 'exc', ')', ')', 'if', 'not', 'volume', '[', "'attachments'", ']', ':', 'return', 'True', 'response', '=', 'self', '.', 'compute_conn', '.', 'volumes', '.', 'delete_server_volume', '(', 'volume', '[', "'attachments'", ']', '[', '0', ']', '[', "'server_id'", ']', ',', 'volume', '[', "'attachments'", ']', '[', '0', ']', '[', "'id'", ']', ')', 'trycount', '=', '0', 'start', '=', 'time', '.', 'time', '(', ')', 'while', 'True', ':', 'trycount', '+=', '1', 'try', ':', 'response', '=', 'self', '.', '_volume_get', '(', 'volume', '[', "'id'", ']', ')', 'if', 'response', '[', "'status'", ']', '==', "'available'", ':', 'return', 'response', 'except', 'Exception', 'as', 'exc', ':', 'log', '.', 'debug', '(', "'Volume is detaching: %s'", ',', 'name', ')', 'time', '.', 'sleep', '(', '1', ')', 'if', 'time', '.', 'time', '(', ')', '-', 'start', '>', 'timeout', ':', 'log', '.', 'error', '(', "'Timed out after %d seconds '", "'while waiting for data'", ',', 'timeout', ')', 'return', 'False', 'log', '.', 'debug', '(', "'Retrying volume_show() (try %d)'", ',', 'trycount', ')'] | Detach a block device | ['Detach', 'a', 'block', 'device'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/nova.py#L647-L681 |
8,776 | PMEAL/OpenPNM | openpnm/models/geometry/pore_surface_area.py | cube | def cube(target, pore_diameter='pore.diameter', throat_area='throat.area'):
r"""
Calculates internal surface area of pore bodies assuming they are cubes
then subtracts the area of the neighboring throats.
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
pore_diameter : string
The dictionary key to the pore diameter array.
throat_area : string
The dictioanry key to the throat area array. Throat areas are needed
since their insection with the pore are removed from the computation.
"""
network = target.project.network
D = target[pore_diameter]
Tn = network.find_neighbor_throats(pores=target.Ps, flatten=False)
Tsurf = _np.array([_np.sum(network[throat_area][Ts]) for Ts in Tn])
value = 6*D**2 - Tsurf
return value | python | def cube(target, pore_diameter='pore.diameter', throat_area='throat.area'):
r"""
Calculates internal surface area of pore bodies assuming they are cubes
then subtracts the area of the neighboring throats.
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
pore_diameter : string
The dictionary key to the pore diameter array.
throat_area : string
The dictioanry key to the throat area array. Throat areas are needed
since their insection with the pore are removed from the computation.
"""
network = target.project.network
D = target[pore_diameter]
Tn = network.find_neighbor_throats(pores=target.Ps, flatten=False)
Tsurf = _np.array([_np.sum(network[throat_area][Ts]) for Ts in Tn])
value = 6*D**2 - Tsurf
return value | ['def', 'cube', '(', 'target', ',', 'pore_diameter', '=', "'pore.diameter'", ',', 'throat_area', '=', "'throat.area'", ')', ':', 'network', '=', 'target', '.', 'project', '.', 'network', 'D', '=', 'target', '[', 'pore_diameter', ']', 'Tn', '=', 'network', '.', 'find_neighbor_throats', '(', 'pores', '=', 'target', '.', 'Ps', ',', 'flatten', '=', 'False', ')', 'Tsurf', '=', '_np', '.', 'array', '(', '[', '_np', '.', 'sum', '(', 'network', '[', 'throat_area', ']', '[', 'Ts', ']', ')', 'for', 'Ts', 'in', 'Tn', ']', ')', 'value', '=', '6', '*', 'D', '**', '2', '-', 'Tsurf', 'return', 'value'] | r"""
Calculates internal surface area of pore bodies assuming they are cubes
then subtracts the area of the neighboring throats.
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
pore_diameter : string
The dictionary key to the pore diameter array.
throat_area : string
The dictioanry key to the throat area array. Throat areas are needed
since their insection with the pore are removed from the computation. | ['r', 'Calculates', 'internal', 'surface', 'area', 'of', 'pore', 'bodies', 'assuming', 'they', 'are', 'cubes', 'then', 'subtracts', 'the', 'area', 'of', 'the', 'neighboring', 'throats', '.'] | train | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/geometry/pore_surface_area.py#L35-L59 |
8,777 | pandas-dev/pandas | pandas/core/arrays/datetimes.py | _to_M8 | def _to_M8(key, tz=None):
"""
Timestamp-like => dt64
"""
if not isinstance(key, Timestamp):
# this also converts strings
key = Timestamp(key)
if key.tzinfo is not None and tz is not None:
# Don't tz_localize(None) if key is already tz-aware
key = key.tz_convert(tz)
else:
key = key.tz_localize(tz)
return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE) | python | def _to_M8(key, tz=None):
"""
Timestamp-like => dt64
"""
if not isinstance(key, Timestamp):
# this also converts strings
key = Timestamp(key)
if key.tzinfo is not None and tz is not None:
# Don't tz_localize(None) if key is already tz-aware
key = key.tz_convert(tz)
else:
key = key.tz_localize(tz)
return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE) | ['def', '_to_M8', '(', 'key', ',', 'tz', '=', 'None', ')', ':', 'if', 'not', 'isinstance', '(', 'key', ',', 'Timestamp', ')', ':', '# this also converts strings', 'key', '=', 'Timestamp', '(', 'key', ')', 'if', 'key', '.', 'tzinfo', 'is', 'not', 'None', 'and', 'tz', 'is', 'not', 'None', ':', "# Don't tz_localize(None) if key is already tz-aware", 'key', '=', 'key', '.', 'tz_convert', '(', 'tz', ')', 'else', ':', 'key', '=', 'key', '.', 'tz_localize', '(', 'tz', ')', 'return', 'np', '.', 'int64', '(', 'conversion', '.', 'pydt_to_i8', '(', 'key', ')', ')', '.', 'view', '(', '_NS_DTYPE', ')'] | Timestamp-like => dt64 | ['Timestamp', '-', 'like', '=', '>', 'dt64'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L72-L85 |
8,778 | rosenbrockc/acorn | acorn/ipython.py | InteractiveDecorator.post_run_cell | def post_run_cell(self):
"""Runs after the user-entered code in a cell has been executed. It
detects any new, decoratable objects that haven't been decorated yet and
then decorates them.
"""
#We just want to detect any new, decoratable objects that haven't been
#decorated yet.
decorlist = {k: [] for k in self.atypes}
for atype in self.atypes:
for n, o in self._get_decoratables(atype):
self._decorate(atype, n, o)
#Next, check whether we have an outstanding "loop intercept" that we
#"wrapped" with respect to acorn by enabling streamlining.
if self.pre is not None:
#Re-enable the acorn logging systems so that it gets back to normal.
from acorn.logging.decoration import set_streamlining
set_streamlining(False)
from acorn import msg
from acorn.logging.database import record
from time import time
#Determine the elapsed time for the execution of the entire cell.
entry = self.pre
entry["e"] = time() - entry["s"]
#See if we can match the executed cell's code up with one that we
#intercepted in the past..
cellid = self._find_cellid(entry["c"])
if cellid is None:
cellid = self.cellid
#Store the contents of the cell *before* they get overwritten by a
#diff.
self.cellids[cellid] = entry["c"]
record("__main__.{0:d}".format(cellid), entry, diff=True)
msg.info(entry, 1)
self.pre = None
#Finally, check whether any new variables have shown up, or have had
#their values changed.
from acorn.logging.database import tracker, active_db, Instance
varchange = self._var_changes()
taskdb = active_db()
for n, o in varchange:
otrack = tracker(o)
if isinstance(otrack, Instance):
taskdb.log_uuid(otrack.uuid)
global thumb_uuid
if thumb_uuid is not None:
self._log_images()
#Reset the image tracker list so that we don't save these images
#again next cell execution.
thumb_uuid = None
self.cellid = None | python | def post_run_cell(self):
"""Runs after the user-entered code in a cell has been executed. It
detects any new, decoratable objects that haven't been decorated yet and
then decorates them.
"""
#We just want to detect any new, decoratable objects that haven't been
#decorated yet.
decorlist = {k: [] for k in self.atypes}
for atype in self.atypes:
for n, o in self._get_decoratables(atype):
self._decorate(atype, n, o)
#Next, check whether we have an outstanding "loop intercept" that we
#"wrapped" with respect to acorn by enabling streamlining.
if self.pre is not None:
#Re-enable the acorn logging systems so that it gets back to normal.
from acorn.logging.decoration import set_streamlining
set_streamlining(False)
from acorn import msg
from acorn.logging.database import record
from time import time
#Determine the elapsed time for the execution of the entire cell.
entry = self.pre
entry["e"] = time() - entry["s"]
#See if we can match the executed cell's code up with one that we
#intercepted in the past..
cellid = self._find_cellid(entry["c"])
if cellid is None:
cellid = self.cellid
#Store the contents of the cell *before* they get overwritten by a
#diff.
self.cellids[cellid] = entry["c"]
record("__main__.{0:d}".format(cellid), entry, diff=True)
msg.info(entry, 1)
self.pre = None
#Finally, check whether any new variables have shown up, or have had
#their values changed.
from acorn.logging.database import tracker, active_db, Instance
varchange = self._var_changes()
taskdb = active_db()
for n, o in varchange:
otrack = tracker(o)
if isinstance(otrack, Instance):
taskdb.log_uuid(otrack.uuid)
global thumb_uuid
if thumb_uuid is not None:
self._log_images()
#Reset the image tracker list so that we don't save these images
#again next cell execution.
thumb_uuid = None
self.cellid = None | ['def', 'post_run_cell', '(', 'self', ')', ':', "#We just want to detect any new, decoratable objects that haven't been", '#decorated yet.', 'decorlist', '=', '{', 'k', ':', '[', ']', 'for', 'k', 'in', 'self', '.', 'atypes', '}', 'for', 'atype', 'in', 'self', '.', 'atypes', ':', 'for', 'n', ',', 'o', 'in', 'self', '.', '_get_decoratables', '(', 'atype', ')', ':', 'self', '.', '_decorate', '(', 'atype', ',', 'n', ',', 'o', ')', '#Next, check whether we have an outstanding "loop intercept" that we', '#"wrapped" with respect to acorn by enabling streamlining.', 'if', 'self', '.', 'pre', 'is', 'not', 'None', ':', '#Re-enable the acorn logging systems so that it gets back to normal.', 'from', 'acorn', '.', 'logging', '.', 'decoration', 'import', 'set_streamlining', 'set_streamlining', '(', 'False', ')', 'from', 'acorn', 'import', 'msg', 'from', 'acorn', '.', 'logging', '.', 'database', 'import', 'record', 'from', 'time', 'import', 'time', '#Determine the elapsed time for the execution of the entire cell.', 'entry', '=', 'self', '.', 'pre', 'entry', '[', '"e"', ']', '=', 'time', '(', ')', '-', 'entry', '[', '"s"', ']', "#See if we can match the executed cell's code up with one that we", '#intercepted in the past..', 'cellid', '=', 'self', '.', '_find_cellid', '(', 'entry', '[', '"c"', ']', ')', 'if', 'cellid', 'is', 'None', ':', 'cellid', '=', 'self', '.', 'cellid', '#Store the contents of the cell *before* they get overwritten by a', '#diff.', 'self', '.', 'cellids', '[', 'cellid', ']', '=', 'entry', '[', '"c"', ']', 'record', '(', '"__main__.{0:d}"', '.', 'format', '(', 'cellid', ')', ',', 'entry', ',', 'diff', '=', 'True', ')', 'msg', '.', 'info', '(', 'entry', ',', '1', ')', 'self', '.', 'pre', '=', 'None', '#Finally, check whether any new variables have shown up, or have had', '#their values changed.', 'from', 'acorn', '.', 'logging', '.', 'database', 'import', 'tracker', ',', 'active_db', ',', 'Instance', 'varchange', '=', 'self', '.', '_var_changes', '(', ')', 'taskdb', '=', 'active_db', '(', ')', 'for', 'n', ',', 'o', 'in', 'varchange', ':', 'otrack', '=', 'tracker', '(', 'o', ')', 'if', 'isinstance', '(', 'otrack', ',', 'Instance', ')', ':', 'taskdb', '.', 'log_uuid', '(', 'otrack', '.', 'uuid', ')', 'global', 'thumb_uuid', 'if', 'thumb_uuid', 'is', 'not', 'None', ':', 'self', '.', '_log_images', '(', ')', "#Reset the image tracker list so that we don't save these images", '#again next cell execution.', 'thumb_uuid', '=', 'None', 'self', '.', 'cellid', '=', 'None'] | Runs after the user-entered code in a cell has been executed. It
detects any new, decoratable objects that haven't been decorated yet and
then decorates them. | ['Runs', 'after', 'the', 'user', '-', 'entered', 'code', 'in', 'a', 'cell', 'has', 'been', 'executed', '.', 'It', 'detects', 'any', 'new', 'decoratable', 'objects', 'that', 'haven', 't', 'been', 'decorated', 'yet', 'and', 'then', 'decorates', 'them', '.'] | train | https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/ipython.py#L329-L387 |
8,779 | bitlabstudio/django-development-fabfile | development_fabfile/fabfile/utils.py | require_server | def require_server(fn):
"""
Checks if the user has called the task with a server name.
Fabric tasks decorated with this decorator must be called like so::
fab <server name> <task name>
If no server name is given, the task will not be executed.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
if env.machine is None:
abort(red('ERROR: You must provide a server name to call this'
' task!'))
return fn(*args, **kwargs)
return wrapper | python | def require_server(fn):
"""
Checks if the user has called the task with a server name.
Fabric tasks decorated with this decorator must be called like so::
fab <server name> <task name>
If no server name is given, the task will not be executed.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
if env.machine is None:
abort(red('ERROR: You must provide a server name to call this'
' task!'))
return fn(*args, **kwargs)
return wrapper | ['def', 'require_server', '(', 'fn', ')', ':', '@', 'wraps', '(', 'fn', ')', 'def', 'wrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'env', '.', 'machine', 'is', 'None', ':', 'abort', '(', 'red', '(', "'ERROR: You must provide a server name to call this'", "' task!'", ')', ')', 'return', 'fn', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'wrapper'] | Checks if the user has called the task with a server name.
Fabric tasks decorated with this decorator must be called like so::
fab <server name> <task name>
If no server name is given, the task will not be executed. | ['Checks', 'if', 'the', 'user', 'has', 'called', 'the', 'task', 'with', 'a', 'server', 'name', '.'] | train | https://github.com/bitlabstudio/django-development-fabfile/blob/a135c6eb5bdd0b496a7eccfd271aca558dd99243/development_fabfile/fabfile/utils.py#L9-L26 |
8,780 | projectshift/shift-boiler | boiler/user/views_social.py | BaseSocial.next | def next(self):
""" Where to redirect after authorization """
next = request.args.get('next')
if next is None:
params = self.default_redirect_params
next = url_for(self.default_redirect_endpoint, **params)
return next | python | def next(self):
""" Where to redirect after authorization """
next = request.args.get('next')
if next is None:
params = self.default_redirect_params
next = url_for(self.default_redirect_endpoint, **params)
return next | ['def', 'next', '(', 'self', ')', ':', 'next', '=', 'request', '.', 'args', '.', 'get', '(', "'next'", ')', 'if', 'next', 'is', 'None', ':', 'params', '=', 'self', '.', 'default_redirect_params', 'next', '=', 'url_for', '(', 'self', '.', 'default_redirect_endpoint', ',', '*', '*', 'params', ')', 'return', 'next'] | Where to redirect after authorization | ['Where', 'to', 'redirect', 'after', 'authorization'] | train | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/views_social.py#L60-L66 |
8,781 | Chilipp/psyplot | psyplot/config/rcsetup.py | validate_bool | def validate_bool(b):
"""Convert b to a boolean or raise"""
if isinstance(b, six.string_types):
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True):
return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False):
return False
else:
raise ValueError('Could not convert "%s" to boolean' % b) | python | def validate_bool(b):
"""Convert b to a boolean or raise"""
if isinstance(b, six.string_types):
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True):
return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False):
return False
else:
raise ValueError('Could not convert "%s" to boolean' % b) | ['def', 'validate_bool', '(', 'b', ')', ':', 'if', 'isinstance', '(', 'b', ',', 'six', '.', 'string_types', ')', ':', 'b', '=', 'b', '.', 'lower', '(', ')', 'if', 'b', 'in', '(', "'t'", ',', "'y'", ',', "'yes'", ',', "'on'", ',', "'true'", ',', "'1'", ',', '1', ',', 'True', ')', ':', 'return', 'True', 'elif', 'b', 'in', '(', "'f'", ',', "'n'", ',', "'no'", ',', "'off'", ',', "'false'", ',', "'0'", ',', '0', ',', 'False', ')', ':', 'return', 'False', 'else', ':', 'raise', 'ValueError', '(', '\'Could not convert "%s" to boolean\'', '%', 'b', ')'] | Convert b to a boolean or raise | ['Convert', 'b', 'to', 'a', 'boolean', 'or', 'raise'] | train | https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/config/rcsetup.py#L1020-L1029 |
8,782 | IdentityPython/pysaml2 | src/saml2/mdie.py | to_dict | def to_dict(_dict, onts, mdb_safe=False):
"""
Convert a pysaml2 SAML2 message class instance into a basic dictionary
format.
The export interface.
:param _dict: The pysaml2 metadata instance
:param onts: List of schemas to use for the conversion
:return: The converted information
"""
res = {}
if isinstance(_dict, SamlBase):
res["__class__"] = "%s&%s" % (_dict.c_namespace, _dict.c_tag)
for key in _dict.keyswv():
if key in IMP_SKIP:
continue
val = getattr(_dict, key)
if key == "extension_elements":
_eel = extension_elements_to_elements(val, onts)
_val = [_eval(_v, onts, mdb_safe) for _v in _eel]
elif key == "extension_attributes":
if mdb_safe:
_val = dict([(k.replace(".", "__"), v) for k, v in
val.items()])
#_val = {k.replace(".", "__"): v for k, v in val.items()}
else:
_val = val
else:
_val = _eval(val, onts, mdb_safe)
if _val:
if mdb_safe:
key = key.replace(".", "__")
res[key] = _val
else:
for key, val in _dict.items():
_val = _eval(val, onts, mdb_safe)
if _val:
if mdb_safe and "." in key:
key = key.replace(".", "__")
res[key] = _val
return res | python | def to_dict(_dict, onts, mdb_safe=False):
"""
Convert a pysaml2 SAML2 message class instance into a basic dictionary
format.
The export interface.
:param _dict: The pysaml2 metadata instance
:param onts: List of schemas to use for the conversion
:return: The converted information
"""
res = {}
if isinstance(_dict, SamlBase):
res["__class__"] = "%s&%s" % (_dict.c_namespace, _dict.c_tag)
for key in _dict.keyswv():
if key in IMP_SKIP:
continue
val = getattr(_dict, key)
if key == "extension_elements":
_eel = extension_elements_to_elements(val, onts)
_val = [_eval(_v, onts, mdb_safe) for _v in _eel]
elif key == "extension_attributes":
if mdb_safe:
_val = dict([(k.replace(".", "__"), v) for k, v in
val.items()])
#_val = {k.replace(".", "__"): v for k, v in val.items()}
else:
_val = val
else:
_val = _eval(val, onts, mdb_safe)
if _val:
if mdb_safe:
key = key.replace(".", "__")
res[key] = _val
else:
for key, val in _dict.items():
_val = _eval(val, onts, mdb_safe)
if _val:
if mdb_safe and "." in key:
key = key.replace(".", "__")
res[key] = _val
return res | ['def', 'to_dict', '(', '_dict', ',', 'onts', ',', 'mdb_safe', '=', 'False', ')', ':', 'res', '=', '{', '}', 'if', 'isinstance', '(', '_dict', ',', 'SamlBase', ')', ':', 'res', '[', '"__class__"', ']', '=', '"%s&%s"', '%', '(', '_dict', '.', 'c_namespace', ',', '_dict', '.', 'c_tag', ')', 'for', 'key', 'in', '_dict', '.', 'keyswv', '(', ')', ':', 'if', 'key', 'in', 'IMP_SKIP', ':', 'continue', 'val', '=', 'getattr', '(', '_dict', ',', 'key', ')', 'if', 'key', '==', '"extension_elements"', ':', '_eel', '=', 'extension_elements_to_elements', '(', 'val', ',', 'onts', ')', '_val', '=', '[', '_eval', '(', '_v', ',', 'onts', ',', 'mdb_safe', ')', 'for', '_v', 'in', '_eel', ']', 'elif', 'key', '==', '"extension_attributes"', ':', 'if', 'mdb_safe', ':', '_val', '=', 'dict', '(', '[', '(', 'k', '.', 'replace', '(', '"."', ',', '"__"', ')', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'val', '.', 'items', '(', ')', ']', ')', '#_val = {k.replace(".", "__"): v for k, v in val.items()}', 'else', ':', '_val', '=', 'val', 'else', ':', '_val', '=', '_eval', '(', 'val', ',', 'onts', ',', 'mdb_safe', ')', 'if', '_val', ':', 'if', 'mdb_safe', ':', 'key', '=', 'key', '.', 'replace', '(', '"."', ',', '"__"', ')', 'res', '[', 'key', ']', '=', '_val', 'else', ':', 'for', 'key', ',', 'val', 'in', '_dict', '.', 'items', '(', ')', ':', '_val', '=', '_eval', '(', 'val', ',', 'onts', ',', 'mdb_safe', ')', 'if', '_val', ':', 'if', 'mdb_safe', 'and', '"."', 'in', 'key', ':', 'key', '=', 'key', '.', 'replace', '(', '"."', ',', '"__"', ')', 'res', '[', 'key', ']', '=', '_val', 'return', 'res'] | Convert a pysaml2 SAML2 message class instance into a basic dictionary
format.
The export interface.
:param _dict: The pysaml2 metadata instance
:param onts: List of schemas to use for the conversion
:return: The converted information | ['Convert', 'a', 'pysaml2', 'SAML2', 'message', 'class', 'instance', 'into', 'a', 'basic', 'dictionary', 'format', '.', 'The', 'export', 'interface', '.'] | train | https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/mdie.py#L46-L87 |
8,783 | ga4gh/ga4gh-server | oidc-provider/simple_op/src/provider/server/server.py | pyoidcMiddleware | def pyoidcMiddleware(func):
"""Common wrapper for the underlying pyoidc library functions.
Reads GET params and POST data before passing it on the library and
converts the response from oic.utils.http_util to wsgi.
:param func: underlying library function
"""
def wrapper(environ, start_response):
data = get_or_post(environ)
cookies = environ.get("HTTP_COOKIE", "")
resp = func(request=data, cookie=cookies)
return resp(environ, start_response)
return wrapper | python | def pyoidcMiddleware(func):
"""Common wrapper for the underlying pyoidc library functions.
Reads GET params and POST data before passing it on the library and
converts the response from oic.utils.http_util to wsgi.
:param func: underlying library function
"""
def wrapper(environ, start_response):
data = get_or_post(environ)
cookies = environ.get("HTTP_COOKIE", "")
resp = func(request=data, cookie=cookies)
return resp(environ, start_response)
return wrapper | ['def', 'pyoidcMiddleware', '(', 'func', ')', ':', 'def', 'wrapper', '(', 'environ', ',', 'start_response', ')', ':', 'data', '=', 'get_or_post', '(', 'environ', ')', 'cookies', '=', 'environ', '.', 'get', '(', '"HTTP_COOKIE"', ',', '""', ')', 'resp', '=', 'func', '(', 'request', '=', 'data', ',', 'cookie', '=', 'cookies', ')', 'return', 'resp', '(', 'environ', ',', 'start_response', ')', 'return', 'wrapper'] | Common wrapper for the underlying pyoidc library functions.
Reads GET params and POST data before passing it on the library and
converts the response from oic.utils.http_util to wsgi.
:param func: underlying library function | ['Common', 'wrapper', 'for', 'the', 'underlying', 'pyoidc', 'library', 'functions', '.', 'Reads', 'GET', 'params', 'and', 'POST', 'data', 'before', 'passing', 'it', 'on', 'the', 'library', 'and', 'converts', 'the', 'response', 'from', 'oic', '.', 'utils', '.', 'http_util', 'to', 'wsgi', '.', ':', 'param', 'func', ':', 'underlying', 'library', 'function'] | train | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/oidc-provider/simple_op/src/provider/server/server.py#L70-L83 |
8,784 | wmayner/pyphi | pyphi/utils.py | all_states | def all_states(n, big_endian=False):
"""Return all binary states for a system.
Args:
n (int): The number of elements in the system.
big_endian (bool): Whether to return the states in big-endian order
instead of little-endian order.
Yields:
tuple[int]: The next state of an ``n``-element system, in little-endian
order unless ``big_endian`` is ``True``.
"""
if n == 0:
return
for state in product((0, 1), repeat=n):
if big_endian:
yield state
else:
yield state[::-1] | python | def all_states(n, big_endian=False):
"""Return all binary states for a system.
Args:
n (int): The number of elements in the system.
big_endian (bool): Whether to return the states in big-endian order
instead of little-endian order.
Yields:
tuple[int]: The next state of an ``n``-element system, in little-endian
order unless ``big_endian`` is ``True``.
"""
if n == 0:
return
for state in product((0, 1), repeat=n):
if big_endian:
yield state
else:
yield state[::-1] | ['def', 'all_states', '(', 'n', ',', 'big_endian', '=', 'False', ')', ':', 'if', 'n', '==', '0', ':', 'return', 'for', 'state', 'in', 'product', '(', '(', '0', ',', '1', ')', ',', 'repeat', '=', 'n', ')', ':', 'if', 'big_endian', ':', 'yield', 'state', 'else', ':', 'yield', 'state', '[', ':', ':', '-', '1', ']'] | Return all binary states for a system.
Args:
n (int): The number of elements in the system.
big_endian (bool): Whether to return the states in big-endian order
instead of little-endian order.
Yields:
tuple[int]: The next state of an ``n``-element system, in little-endian
order unless ``big_endian`` is ``True``. | ['Return', 'all', 'binary', 'states', 'for', 'a', 'system', '.'] | train | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/utils.py#L27-L46 |
8,785 | boriel/zxbasic | zxbpp.py | p_ifdef_else | def p_ifdef_else(p):
""" ifdef : ifdefelsea ifdefelseb ENDIF
"""
global ENABLED
p[0] = p[1] + p[2]
p[0] += ['#line %i "%s"' % (p.lineno(3) + 1, CURRENT_FILE[-1])]
ENABLED = IFDEFS[-1][0]
IFDEFS.pop() | python | def p_ifdef_else(p):
""" ifdef : ifdefelsea ifdefelseb ENDIF
"""
global ENABLED
p[0] = p[1] + p[2]
p[0] += ['#line %i "%s"' % (p.lineno(3) + 1, CURRENT_FILE[-1])]
ENABLED = IFDEFS[-1][0]
IFDEFS.pop() | ['def', 'p_ifdef_else', '(', 'p', ')', ':', 'global', 'ENABLED', 'p', '[', '0', ']', '=', 'p', '[', '1', ']', '+', 'p', '[', '2', ']', 'p', '[', '0', ']', '+=', '[', '\'#line %i "%s"\'', '%', '(', 'p', '.', 'lineno', '(', '3', ')', '+', '1', ',', 'CURRENT_FILE', '[', '-', '1', ']', ')', ']', 'ENABLED', '=', 'IFDEFS', '[', '-', '1', ']', '[', '0', ']', 'IFDEFS', '.', 'pop', '(', ')'] | ifdef : ifdefelsea ifdefelseb ENDIF | ['ifdef', ':', 'ifdefelsea', 'ifdefelseb', 'ENDIF'] | train | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbpp.py#L506-L514 |
8,786 | lemieuxl/pyGenClean | pyGenClean/NoCallHetero/heterozygosity_plot.py | compute_heterozygosity | def compute_heterozygosity(in_prefix, nb_samples):
"""Computes the heterozygosity ratio of samples (from tped)."""
tped_name = in_prefix + ".tped"
tfam_name = in_prefix + ".tfam"
# The function we want to use
check_heterozygosity = np.vectorize(is_heterozygous)
# The autosomes
autosomes = {str(i) for i in xrange(1, 23)}
# The tfam
samples = None
with open(tfam_name, 'rb') as input_file:
samples = input_file.readlines()
samples = [tuple(i.rstrip("\r\n").split("\t")[:2]) for i in samples]
heterozygosity = np.zeros(nb_samples, dtype=int)
nb_markers = np.zeros(nb_samples, dtype=int)
with open(tped_name, 'rb') as input_file:
# There is no header
for line in input_file:
row = np.array(line.rstrip("\r\n").split("\t"))
chromosome = row[0]
if chromosome not in autosomes:
# This is not an autosome, so we skip
continue
# Getting the genotypes
genotypes = row[4:]
# Finding the heterozygous genotypes
heterozygosity += check_heterozygosity(genotypes)
# Adding to number of markers for each samples (excluding no calls)
nb_markers += genotypes != "0 0"
return np.true_divide(heterozygosity, nb_markers), samples | python | def compute_heterozygosity(in_prefix, nb_samples):
"""Computes the heterozygosity ratio of samples (from tped)."""
tped_name = in_prefix + ".tped"
tfam_name = in_prefix + ".tfam"
# The function we want to use
check_heterozygosity = np.vectorize(is_heterozygous)
# The autosomes
autosomes = {str(i) for i in xrange(1, 23)}
# The tfam
samples = None
with open(tfam_name, 'rb') as input_file:
samples = input_file.readlines()
samples = [tuple(i.rstrip("\r\n").split("\t")[:2]) for i in samples]
heterozygosity = np.zeros(nb_samples, dtype=int)
nb_markers = np.zeros(nb_samples, dtype=int)
with open(tped_name, 'rb') as input_file:
# There is no header
for line in input_file:
row = np.array(line.rstrip("\r\n").split("\t"))
chromosome = row[0]
if chromosome not in autosomes:
# This is not an autosome, so we skip
continue
# Getting the genotypes
genotypes = row[4:]
# Finding the heterozygous genotypes
heterozygosity += check_heterozygosity(genotypes)
# Adding to number of markers for each samples (excluding no calls)
nb_markers += genotypes != "0 0"
return np.true_divide(heterozygosity, nb_markers), samples | ['def', 'compute_heterozygosity', '(', 'in_prefix', ',', 'nb_samples', ')', ':', 'tped_name', '=', 'in_prefix', '+', '".tped"', 'tfam_name', '=', 'in_prefix', '+', '".tfam"', '# The function we want to use', 'check_heterozygosity', '=', 'np', '.', 'vectorize', '(', 'is_heterozygous', ')', '# The autosomes', 'autosomes', '=', '{', 'str', '(', 'i', ')', 'for', 'i', 'in', 'xrange', '(', '1', ',', '23', ')', '}', '# The tfam', 'samples', '=', 'None', 'with', 'open', '(', 'tfam_name', ',', "'rb'", ')', 'as', 'input_file', ':', 'samples', '=', 'input_file', '.', 'readlines', '(', ')', 'samples', '=', '[', 'tuple', '(', 'i', '.', 'rstrip', '(', '"\\r\\n"', ')', '.', 'split', '(', '"\\t"', ')', '[', ':', '2', ']', ')', 'for', 'i', 'in', 'samples', ']', 'heterozygosity', '=', 'np', '.', 'zeros', '(', 'nb_samples', ',', 'dtype', '=', 'int', ')', 'nb_markers', '=', 'np', '.', 'zeros', '(', 'nb_samples', ',', 'dtype', '=', 'int', ')', 'with', 'open', '(', 'tped_name', ',', "'rb'", ')', 'as', 'input_file', ':', '# There is no header', 'for', 'line', 'in', 'input_file', ':', 'row', '=', 'np', '.', 'array', '(', 'line', '.', 'rstrip', '(', '"\\r\\n"', ')', '.', 'split', '(', '"\\t"', ')', ')', 'chromosome', '=', 'row', '[', '0', ']', 'if', 'chromosome', 'not', 'in', 'autosomes', ':', '# This is not an autosome, so we skip', 'continue', '# Getting the genotypes', 'genotypes', '=', 'row', '[', '4', ':', ']', '# Finding the heterozygous genotypes', 'heterozygosity', '+=', 'check_heterozygosity', '(', 'genotypes', ')', '# Adding to number of markers for each samples (excluding no calls)', 'nb_markers', '+=', 'genotypes', '!=', '"0 0"', 'return', 'np', '.', 'true_divide', '(', 'heterozygosity', ',', 'nb_markers', ')', ',', 'samples'] | Computes the heterozygosity ratio of samples (from tped). | ['Computes', 'the', 'heterozygosity', 'ratio', 'of', 'samples', '(', 'from', 'tped', ')', '.'] | train | https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/NoCallHetero/heterozygosity_plot.py#L141-L179 |
8,787 | facebook/pyre-check | sapp/sapp/analysis_output.py | AnalysisOutput.file_names | def file_names(self) -> Iterable[str]:
"""Generates all file names that are used to generate file_handles.
"""
if self.is_sharded():
yield from ShardedFile(self.filename_spec).get_filenames()
elif self.filename_spec:
yield self.filename_spec | python | def file_names(self) -> Iterable[str]:
"""Generates all file names that are used to generate file_handles.
"""
if self.is_sharded():
yield from ShardedFile(self.filename_spec).get_filenames()
elif self.filename_spec:
yield self.filename_spec | ['def', 'file_names', '(', 'self', ')', '->', 'Iterable', '[', 'str', ']', ':', 'if', 'self', '.', 'is_sharded', '(', ')', ':', 'yield', 'from', 'ShardedFile', '(', 'self', '.', 'filename_spec', ')', '.', 'get_filenames', '(', ')', 'elif', 'self', '.', 'filename_spec', ':', 'yield', 'self', '.', 'filename_spec'] | Generates all file names that are used to generate file_handles. | ['Generates', 'all', 'file', 'names', 'that', 'are', 'used', 'to', 'generate', 'file_handles', '.'] | train | https://github.com/facebook/pyre-check/blob/4a9604d943d28ef20238505a51acfb1f666328d7/sapp/sapp/analysis_output.py#L121-L127 |
8,788 | tanghaibao/jcvi | jcvi/projects/str.py | simulate | def simulate(args):
"""
%prog simulate run_dir 1 300
Simulate BAMs with varying inserts with dwgsim. The above command will
simulate between 1 to 300 CAGs in the HD region, in a directory called
`run_dir`.
"""
p = OptionParser(simulate.__doc__)
p.add_option("--method", choices=("wgsim", "eagle"), default="eagle",
help="Read simulator")
p.add_option("--ref", default="hg38", choices=("hg38", "hg19"),
help="Reference genome version")
p.add_option("--tred", default="HD", help="TRED locus")
add_simulate_options(p)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
rundir, startunits, endunits = args
ref = opts.ref
ref_fasta = "/mnt/ref/{}.upper.fa".format(ref)
startunits, endunits = int(startunits), int(endunits)
basecwd = os.getcwd()
mkdir(rundir)
os.chdir(rundir)
cwd = os.getcwd()
# TRED region (e.g. Huntington)
pad_left, pad_right = 1000, 10000
repo = TREDsRepo(ref=ref)
tred = repo[opts.tred]
chr, start, end = tred.chr, tred.repeat_start, tred.repeat_end
logging.debug("Simulating {}".format(tred))
fasta = Fasta(ref_fasta)
seq_left = fasta[chr][start - pad_left:start - 1]
seq_right = fasta[chr][end: end + pad_right]
motif = tred.repeat
simulate_method = wgsim if opts.method == "wgsim" else eagle
# Write fake sequence
for units in range(startunits, endunits + 1):
pf = str(units)
mkdir(pf)
os.chdir(pf)
seq = str(seq_left) + motif * units + str(seq_right)
fastafile = pf + ".fasta"
make_fasta(seq, fastafile, id=chr.upper())
# Simulate reads on it
simulate_method([fastafile, "--depth={}".format(opts.depth),
"--readlen={}".format(opts.readlen),
"--distance={}".format(opts.distance),
"--outfile={}".format(pf)])
read1 = pf + ".bwa.read1.fastq"
read2 = pf + ".bwa.read2.fastq"
samfile, _ = align([ref_fasta, read1, read2])
indexed_samfile = index([samfile])
sh("mv {} ../{}.bam".format(indexed_samfile, pf))
sh("mv {}.bai ../{}.bam.bai".format(indexed_samfile, pf))
os.chdir(cwd)
shutil.rmtree(pf)
os.chdir(basecwd) | python | def simulate(args):
"""
%prog simulate run_dir 1 300
Simulate BAMs with varying inserts with dwgsim. The above command will
simulate between 1 to 300 CAGs in the HD region, in a directory called
`run_dir`.
"""
p = OptionParser(simulate.__doc__)
p.add_option("--method", choices=("wgsim", "eagle"), default="eagle",
help="Read simulator")
p.add_option("--ref", default="hg38", choices=("hg38", "hg19"),
help="Reference genome version")
p.add_option("--tred", default="HD", help="TRED locus")
add_simulate_options(p)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
rundir, startunits, endunits = args
ref = opts.ref
ref_fasta = "/mnt/ref/{}.upper.fa".format(ref)
startunits, endunits = int(startunits), int(endunits)
basecwd = os.getcwd()
mkdir(rundir)
os.chdir(rundir)
cwd = os.getcwd()
# TRED region (e.g. Huntington)
pad_left, pad_right = 1000, 10000
repo = TREDsRepo(ref=ref)
tred = repo[opts.tred]
chr, start, end = tred.chr, tred.repeat_start, tred.repeat_end
logging.debug("Simulating {}".format(tred))
fasta = Fasta(ref_fasta)
seq_left = fasta[chr][start - pad_left:start - 1]
seq_right = fasta[chr][end: end + pad_right]
motif = tred.repeat
simulate_method = wgsim if opts.method == "wgsim" else eagle
# Write fake sequence
for units in range(startunits, endunits + 1):
pf = str(units)
mkdir(pf)
os.chdir(pf)
seq = str(seq_left) + motif * units + str(seq_right)
fastafile = pf + ".fasta"
make_fasta(seq, fastafile, id=chr.upper())
# Simulate reads on it
simulate_method([fastafile, "--depth={}".format(opts.depth),
"--readlen={}".format(opts.readlen),
"--distance={}".format(opts.distance),
"--outfile={}".format(pf)])
read1 = pf + ".bwa.read1.fastq"
read2 = pf + ".bwa.read2.fastq"
samfile, _ = align([ref_fasta, read1, read2])
indexed_samfile = index([samfile])
sh("mv {} ../{}.bam".format(indexed_samfile, pf))
sh("mv {}.bai ../{}.bam.bai".format(indexed_samfile, pf))
os.chdir(cwd)
shutil.rmtree(pf)
os.chdir(basecwd) | ['def', 'simulate', '(', 'args', ')', ':', 'p', '=', 'OptionParser', '(', 'simulate', '.', '__doc__', ')', 'p', '.', 'add_option', '(', '"--method"', ',', 'choices', '=', '(', '"wgsim"', ',', '"eagle"', ')', ',', 'default', '=', '"eagle"', ',', 'help', '=', '"Read simulator"', ')', 'p', '.', 'add_option', '(', '"--ref"', ',', 'default', '=', '"hg38"', ',', 'choices', '=', '(', '"hg38"', ',', '"hg19"', ')', ',', 'help', '=', '"Reference genome version"', ')', 'p', '.', 'add_option', '(', '"--tred"', ',', 'default', '=', '"HD"', ',', 'help', '=', '"TRED locus"', ')', 'add_simulate_options', '(', 'p', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', '!=', '3', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'rundir', ',', 'startunits', ',', 'endunits', '=', 'args', 'ref', '=', 'opts', '.', 'ref', 'ref_fasta', '=', '"/mnt/ref/{}.upper.fa"', '.', 'format', '(', 'ref', ')', 'startunits', ',', 'endunits', '=', 'int', '(', 'startunits', ')', ',', 'int', '(', 'endunits', ')', 'basecwd', '=', 'os', '.', 'getcwd', '(', ')', 'mkdir', '(', 'rundir', ')', 'os', '.', 'chdir', '(', 'rundir', ')', 'cwd', '=', 'os', '.', 'getcwd', '(', ')', '# TRED region (e.g. Huntington)', 'pad_left', ',', 'pad_right', '=', '1000', ',', '10000', 'repo', '=', 'TREDsRepo', '(', 'ref', '=', 'ref', ')', 'tred', '=', 'repo', '[', 'opts', '.', 'tred', ']', 'chr', ',', 'start', ',', 'end', '=', 'tred', '.', 'chr', ',', 'tred', '.', 'repeat_start', ',', 'tred', '.', 'repeat_end', 'logging', '.', 'debug', '(', '"Simulating {}"', '.', 'format', '(', 'tred', ')', ')', 'fasta', '=', 'Fasta', '(', 'ref_fasta', ')', 'seq_left', '=', 'fasta', '[', 'chr', ']', '[', 'start', '-', 'pad_left', ':', 'start', '-', '1', ']', 'seq_right', '=', 'fasta', '[', 'chr', ']', '[', 'end', ':', 'end', '+', 'pad_right', ']', 'motif', '=', 'tred', '.', 'repeat', 'simulate_method', '=', 'wgsim', 'if', 'opts', '.', 'method', '==', '"wgsim"', 'else', 'eagle', '# Write fake sequence', 'for', 'units', 'in', 'range', '(', 'startunits', ',', 'endunits', '+', '1', ')', ':', 'pf', '=', 'str', '(', 'units', ')', 'mkdir', '(', 'pf', ')', 'os', '.', 'chdir', '(', 'pf', ')', 'seq', '=', 'str', '(', 'seq_left', ')', '+', 'motif', '*', 'units', '+', 'str', '(', 'seq_right', ')', 'fastafile', '=', 'pf', '+', '".fasta"', 'make_fasta', '(', 'seq', ',', 'fastafile', ',', 'id', '=', 'chr', '.', 'upper', '(', ')', ')', '# Simulate reads on it', 'simulate_method', '(', '[', 'fastafile', ',', '"--depth={}"', '.', 'format', '(', 'opts', '.', 'depth', ')', ',', '"--readlen={}"', '.', 'format', '(', 'opts', '.', 'readlen', ')', ',', '"--distance={}"', '.', 'format', '(', 'opts', '.', 'distance', ')', ',', '"--outfile={}"', '.', 'format', '(', 'pf', ')', ']', ')', 'read1', '=', 'pf', '+', '".bwa.read1.fastq"', 'read2', '=', 'pf', '+', '".bwa.read2.fastq"', 'samfile', ',', '_', '=', 'align', '(', '[', 'ref_fasta', ',', 'read1', ',', 'read2', ']', ')', 'indexed_samfile', '=', 'index', '(', '[', 'samfile', ']', ')', 'sh', '(', '"mv {} ../{}.bam"', '.', 'format', '(', 'indexed_samfile', ',', 'pf', ')', ')', 'sh', '(', '"mv {}.bai ../{}.bam.bai"', '.', 'format', '(', 'indexed_samfile', ',', 'pf', ')', ')', 'os', '.', 'chdir', '(', 'cwd', ')', 'shutil', '.', 'rmtree', '(', 'pf', ')', 'os', '.', 'chdir', '(', 'basecwd', ')'] | %prog simulate run_dir 1 300
Simulate BAMs with varying inserts with dwgsim. The above command will
simulate between 1 to 300 CAGs in the HD region, in a directory called
`run_dir`. | ['%prog', 'simulate', 'run_dir', '1', '300'] | train | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/str.py#L1339-L1407 |
8,789 | bcbio/bcbio-nextgen | bcbio/provenance/system.py | _slurm_info | def _slurm_info(queue):
"""Returns machine information for a slurm job scheduler.
"""
cl = "sinfo -h -p {} --format '%c %m %D'".format(queue)
num_cpus, mem, num_nodes = subprocess.check_output(shlex.split(cl)).decode().split()
# if the queue contains multiple memory configurations, the minimum value is printed with a trailing '+'
mem = float(mem.replace('+', ''))
num_cpus = int(num_cpus.replace('+', ''))
# handle small clusters where we need to allocate memory for bcbio and the controller
# This will typically be on cloud AWS machines
bcbio_mem = 2000
controller_mem = 4000
if int(num_nodes) < 3 and mem > (bcbio_mem + controller_mem) * 2:
mem = mem - bcbio_mem - controller_mem
return [{"cores": int(num_cpus), "memory": mem / 1024.0, "name": "slurm_machine"}] | python | def _slurm_info(queue):
"""Returns machine information for a slurm job scheduler.
"""
cl = "sinfo -h -p {} --format '%c %m %D'".format(queue)
num_cpus, mem, num_nodes = subprocess.check_output(shlex.split(cl)).decode().split()
# if the queue contains multiple memory configurations, the minimum value is printed with a trailing '+'
mem = float(mem.replace('+', ''))
num_cpus = int(num_cpus.replace('+', ''))
# handle small clusters where we need to allocate memory for bcbio and the controller
# This will typically be on cloud AWS machines
bcbio_mem = 2000
controller_mem = 4000
if int(num_nodes) < 3 and mem > (bcbio_mem + controller_mem) * 2:
mem = mem - bcbio_mem - controller_mem
return [{"cores": int(num_cpus), "memory": mem / 1024.0, "name": "slurm_machine"}] | ['def', '_slurm_info', '(', 'queue', ')', ':', 'cl', '=', '"sinfo -h -p {} --format \'%c %m %D\'"', '.', 'format', '(', 'queue', ')', 'num_cpus', ',', 'mem', ',', 'num_nodes', '=', 'subprocess', '.', 'check_output', '(', 'shlex', '.', 'split', '(', 'cl', ')', ')', '.', 'decode', '(', ')', '.', 'split', '(', ')', "# if the queue contains multiple memory configurations, the minimum value is printed with a trailing '+'", 'mem', '=', 'float', '(', 'mem', '.', 'replace', '(', "'+'", ',', "''", ')', ')', 'num_cpus', '=', 'int', '(', 'num_cpus', '.', 'replace', '(', "'+'", ',', "''", ')', ')', '# handle small clusters where we need to allocate memory for bcbio and the controller', '# This will typically be on cloud AWS machines', 'bcbio_mem', '=', '2000', 'controller_mem', '=', '4000', 'if', 'int', '(', 'num_nodes', ')', '<', '3', 'and', 'mem', '>', '(', 'bcbio_mem', '+', 'controller_mem', ')', '*', '2', ':', 'mem', '=', 'mem', '-', 'bcbio_mem', '-', 'controller_mem', 'return', '[', '{', '"cores"', ':', 'int', '(', 'num_cpus', ')', ',', '"memory"', ':', 'mem', '/', '1024.0', ',', '"name"', ':', '"slurm_machine"', '}', ']'] | Returns machine information for a slurm job scheduler. | ['Returns', 'machine', 'information', 'for', 'a', 'slurm', 'job', 'scheduler', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/system.py#L61-L75 |
8,790 | listen-lavender/webcrawl | webcrawl/daemon.py | Daemon.monitor | def monitor(self, timeout):
"""
Monitor the process, check whether it runs out of time.
"""
def check(self, timeout):
time.sleep(timeout)
self.stop()
wather = threading.Thread(target=check)
wather.setDaemon(True)
wather.start() | python | def monitor(self, timeout):
"""
Monitor the process, check whether it runs out of time.
"""
def check(self, timeout):
time.sleep(timeout)
self.stop()
wather = threading.Thread(target=check)
wather.setDaemon(True)
wather.start() | ['def', 'monitor', '(', 'self', ',', 'timeout', ')', ':', 'def', 'check', '(', 'self', ',', 'timeout', ')', ':', 'time', '.', 'sleep', '(', 'timeout', ')', 'self', '.', 'stop', '(', ')', 'wather', '=', 'threading', '.', 'Thread', '(', 'target', '=', 'check', ')', 'wather', '.', 'setDaemon', '(', 'True', ')', 'wather', '.', 'start', '(', ')'] | Monitor the process, check whether it runs out of time. | ['Monitor', 'the', 'process', 'check', 'whether', 'it', 'runs', 'out', 'of', 'time', '.'] | train | https://github.com/listen-lavender/webcrawl/blob/905dcfa6e6934aac764045660c0efcef28eae1e6/webcrawl/daemon.py#L164-L174 |
8,791 | nicolargo/glances | glances/plugins/glances_memswap.py | Plugin.update | def update(self):
"""Update swap memory stats using the input method."""
# Init new stats
stats = self.get_init_value()
if self.input_method == 'local':
# Update stats using the standard system lib
# Grab SWAP using the psutil swap_memory method
sm_stats = psutil.swap_memory()
# Get all the swap stats (copy/paste of the psutil documentation)
# total: total swap memory in bytes
# used: used swap memory in bytes
# free: free swap memory in bytes
# percent: the percentage usage
# sin: the number of bytes the system has swapped in from disk (cumulative)
# sout: the number of bytes the system has swapped out from disk
# (cumulative)
for swap in ['total', 'used', 'free', 'percent',
'sin', 'sout']:
if hasattr(sm_stats, swap):
stats[swap] = getattr(sm_stats, swap)
elif self.input_method == 'snmp':
# Update stats using SNMP
if self.short_system_name == 'windows':
# Mem stats for Windows OS are stored in the FS table
try:
fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
bulk=True)
except KeyError:
self.reset()
else:
for fs in fs_stat:
# The virtual memory concept is used by the operating
# system to extend (virtually) the physical memory and
# thus to run more programs by swapping unused memory
# zone (page) to a disk file.
if fs == 'Virtual Memory':
stats['total'] = int(
fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit'])
stats['used'] = int(
fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit'])
stats['percent'] = float(
stats['used'] * 100 / stats['total'])
stats['free'] = stats['total'] - stats['used']
break
else:
stats = self.get_stats_snmp(snmp_oid=snmp_oid['default'])
if stats['total'] == '':
self.reset()
return stats
for key in iterkeys(stats):
if stats[key] != '':
stats[key] = float(stats[key]) * 1024
# used=total-free
stats['used'] = stats['total'] - stats['free']
# percent: the percentage usage calculated as (total -
# available) / total * 100.
stats['percent'] = float(
(stats['total'] - stats['free']) / stats['total'] * 100)
# Update the stats
self.stats = stats
return self.stats | python | def update(self):
"""Update swap memory stats using the input method."""
# Init new stats
stats = self.get_init_value()
if self.input_method == 'local':
# Update stats using the standard system lib
# Grab SWAP using the psutil swap_memory method
sm_stats = psutil.swap_memory()
# Get all the swap stats (copy/paste of the psutil documentation)
# total: total swap memory in bytes
# used: used swap memory in bytes
# free: free swap memory in bytes
# percent: the percentage usage
# sin: the number of bytes the system has swapped in from disk (cumulative)
# sout: the number of bytes the system has swapped out from disk
# (cumulative)
for swap in ['total', 'used', 'free', 'percent',
'sin', 'sout']:
if hasattr(sm_stats, swap):
stats[swap] = getattr(sm_stats, swap)
elif self.input_method == 'snmp':
# Update stats using SNMP
if self.short_system_name == 'windows':
# Mem stats for Windows OS are stored in the FS table
try:
fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
bulk=True)
except KeyError:
self.reset()
else:
for fs in fs_stat:
# The virtual memory concept is used by the operating
# system to extend (virtually) the physical memory and
# thus to run more programs by swapping unused memory
# zone (page) to a disk file.
if fs == 'Virtual Memory':
stats['total'] = int(
fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit'])
stats['used'] = int(
fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit'])
stats['percent'] = float(
stats['used'] * 100 / stats['total'])
stats['free'] = stats['total'] - stats['used']
break
else:
stats = self.get_stats_snmp(snmp_oid=snmp_oid['default'])
if stats['total'] == '':
self.reset()
return stats
for key in iterkeys(stats):
if stats[key] != '':
stats[key] = float(stats[key]) * 1024
# used=total-free
stats['used'] = stats['total'] - stats['free']
# percent: the percentage usage calculated as (total -
# available) / total * 100.
stats['percent'] = float(
(stats['total'] - stats['free']) / stats['total'] * 100)
# Update the stats
self.stats = stats
return self.stats | ['def', 'update', '(', 'self', ')', ':', '# Init new stats', 'stats', '=', 'self', '.', 'get_init_value', '(', ')', 'if', 'self', '.', 'input_method', '==', "'local'", ':', '# Update stats using the standard system lib', '# Grab SWAP using the psutil swap_memory method', 'sm_stats', '=', 'psutil', '.', 'swap_memory', '(', ')', '# Get all the swap stats (copy/paste of the psutil documentation)', '# total: total swap memory in bytes', '# used: used swap memory in bytes', '# free: free swap memory in bytes', '# percent: the percentage usage', '# sin: the number of bytes the system has swapped in from disk (cumulative)', '# sout: the number of bytes the system has swapped out from disk', '# (cumulative)', 'for', 'swap', 'in', '[', "'total'", ',', "'used'", ',', "'free'", ',', "'percent'", ',', "'sin'", ',', "'sout'", ']', ':', 'if', 'hasattr', '(', 'sm_stats', ',', 'swap', ')', ':', 'stats', '[', 'swap', ']', '=', 'getattr', '(', 'sm_stats', ',', 'swap', ')', 'elif', 'self', '.', 'input_method', '==', "'snmp'", ':', '# Update stats using SNMP', 'if', 'self', '.', 'short_system_name', '==', "'windows'", ':', '# Mem stats for Windows OS are stored in the FS table', 'try', ':', 'fs_stat', '=', 'self', '.', 'get_stats_snmp', '(', 'snmp_oid', '=', 'snmp_oid', '[', 'self', '.', 'short_system_name', ']', ',', 'bulk', '=', 'True', ')', 'except', 'KeyError', ':', 'self', '.', 'reset', '(', ')', 'else', ':', 'for', 'fs', 'in', 'fs_stat', ':', '# The virtual memory concept is used by the operating', '# system to extend (virtually) the physical memory and', '# thus to run more programs by swapping unused memory', '# zone (page) to a disk file.', 'if', 'fs', '==', "'Virtual Memory'", ':', 'stats', '[', "'total'", ']', '=', 'int', '(', 'fs_stat', '[', 'fs', ']', '[', "'size'", ']', ')', '*', 'int', '(', 'fs_stat', '[', 'fs', ']', '[', "'alloc_unit'", ']', ')', 'stats', '[', "'used'", ']', '=', 'int', '(', 'fs_stat', '[', 'fs', ']', '[', "'used'", ']', ')', '*', 'int', '(', 'fs_stat', '[', 'fs', ']', '[', "'alloc_unit'", ']', ')', 'stats', '[', "'percent'", ']', '=', 'float', '(', 'stats', '[', "'used'", ']', '*', '100', '/', 'stats', '[', "'total'", ']', ')', 'stats', '[', "'free'", ']', '=', 'stats', '[', "'total'", ']', '-', 'stats', '[', "'used'", ']', 'break', 'else', ':', 'stats', '=', 'self', '.', 'get_stats_snmp', '(', 'snmp_oid', '=', 'snmp_oid', '[', "'default'", ']', ')', 'if', 'stats', '[', "'total'", ']', '==', "''", ':', 'self', '.', 'reset', '(', ')', 'return', 'stats', 'for', 'key', 'in', 'iterkeys', '(', 'stats', ')', ':', 'if', 'stats', '[', 'key', ']', '!=', "''", ':', 'stats', '[', 'key', ']', '=', 'float', '(', 'stats', '[', 'key', ']', ')', '*', '1024', '# used=total-free', 'stats', '[', "'used'", ']', '=', 'stats', '[', "'total'", ']', '-', 'stats', '[', "'free'", ']', '# percent: the percentage usage calculated as (total -', '# available) / total * 100.', 'stats', '[', "'percent'", ']', '=', 'float', '(', '(', 'stats', '[', "'total'", ']', '-', 'stats', '[', "'free'", ']', ')', '/', 'stats', '[', "'total'", ']', '*', '100', ')', '# Update the stats', 'self', '.', 'stats', '=', 'stats', 'return', 'self', '.', 'stats'] | Update swap memory stats using the input method. | ['Update', 'swap', 'memory', 'stats', 'using', 'the', 'input', 'method', '.'] | train | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_memswap.py#L60-L128 |
8,792 | SHDShim/pytheos | pytheos/scales/objs.py | JHEOS.cal_pth | def cal_pth(self, v, temp):
"""
calculate thermal pressure
:param v: unit-cell volume in A^3
:param temp: temperature in K
:return: thermal pressure in GPa
"""
params_t = self._set_params(self.params_therm)
return constq_pth(v, temp, *params_t, self.n, self.z,
t_ref=self.t_ref, three_r=self.three_r) | python | def cal_pth(self, v, temp):
"""
calculate thermal pressure
:param v: unit-cell volume in A^3
:param temp: temperature in K
:return: thermal pressure in GPa
"""
params_t = self._set_params(self.params_therm)
return constq_pth(v, temp, *params_t, self.n, self.z,
t_ref=self.t_ref, three_r=self.three_r) | ['def', 'cal_pth', '(', 'self', ',', 'v', ',', 'temp', ')', ':', 'params_t', '=', 'self', '.', '_set_params', '(', 'self', '.', 'params_therm', ')', 'return', 'constq_pth', '(', 'v', ',', 'temp', ',', '*', 'params_t', ',', 'self', '.', 'n', ',', 'self', '.', 'z', ',', 't_ref', '=', 'self', '.', 't_ref', ',', 'three_r', '=', 'self', '.', 'three_r', ')'] | calculate thermal pressure
:param v: unit-cell volume in A^3
:param temp: temperature in K
:return: thermal pressure in GPa | ['calculate', 'thermal', 'pressure'] | train | https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/scales/objs.py#L377-L387 |
8,793 | rbarrois/confutils | confutils/configfile.py | Section.remove | def remove(self, line):
"""Delete all lines matching the given line."""
nb = 0
for block in self.blocks:
nb += block.remove(line)
return nb | python | def remove(self, line):
"""Delete all lines matching the given line."""
nb = 0
for block in self.blocks:
nb += block.remove(line)
return nb | ['def', 'remove', '(', 'self', ',', 'line', ')', ':', 'nb', '=', '0', 'for', 'block', 'in', 'self', '.', 'blocks', ':', 'nb', '+=', 'block', '.', 'remove', '(', 'line', ')', 'return', 'nb'] | Delete all lines matching the given line. | ['Delete', 'all', 'lines', 'matching', 'the', 'given', 'line', '.'] | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L227-L233 |
8,794 | takuti/flurs | flurs/utils/feature_hash.py | feature_hash | def feature_hash(feature, dim, seed=123):
"""Feature hashing.
Args:
feature (str): Target feature represented as string.
dim (int): Number of dimensions for a hash value.
seed (float): Seed of a MurmurHash3 hash function.
Returns:
numpy 1d array: one-hot-encoded feature vector for `s`.
"""
vec = np.zeros(dim)
i = mmh3.hash(feature, seed) % dim
vec[i] = 1
return vec | python | def feature_hash(feature, dim, seed=123):
"""Feature hashing.
Args:
feature (str): Target feature represented as string.
dim (int): Number of dimensions for a hash value.
seed (float): Seed of a MurmurHash3 hash function.
Returns:
numpy 1d array: one-hot-encoded feature vector for `s`.
"""
vec = np.zeros(dim)
i = mmh3.hash(feature, seed) % dim
vec[i] = 1
return vec | ['def', 'feature_hash', '(', 'feature', ',', 'dim', ',', 'seed', '=', '123', ')', ':', 'vec', '=', 'np', '.', 'zeros', '(', 'dim', ')', 'i', '=', 'mmh3', '.', 'hash', '(', 'feature', ',', 'seed', ')', '%', 'dim', 'vec', '[', 'i', ']', '=', '1', 'return', 'vec'] | Feature hashing.
Args:
feature (str): Target feature represented as string.
dim (int): Number of dimensions for a hash value.
seed (float): Seed of a MurmurHash3 hash function.
Returns:
numpy 1d array: one-hot-encoded feature vector for `s`. | ['Feature', 'hashing', '.'] | train | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/feature_hash.py#L27-L42 |
8,795 | rigetti/grove | grove/amplification/amplification.py | decomposed_diffusion_program | def decomposed_diffusion_program(qubits: List[int]) -> Program:
"""
Constructs the diffusion operator used in Grover's Algorithm, acted on both sides by an
a Hadamard gate on each qubit. Note that this means that the matrix representation of this
operator is diag(1, -1, ..., -1). In particular, this decomposes the diffusion operator, which
is a :math:`2**{len(qubits)}\times2**{len(qubits)}` sparse matrix, into
:math:`\mathcal{O}(len(qubits)**2) single and two qubit gates.
See C. Lavor, L.R.U. Manssur, and R. Portugal (2003) `Grover's Algorithm: Quantum Database
Search`_ for more information.
.. _`Grover's Algorithm: Quantum Database Search`: https://arxiv.org/abs/quant-ph/0301079
:param qubits: A list of ints corresponding to the qubits to operate on.
The operator operates on bistrings of the form
``|qubits[0], ..., qubits[-1]>``.
"""
program = Program()
if len(qubits) == 1:
program.inst(Z(qubits[0]))
else:
program.inst([X(q) for q in qubits])
program.inst(H(qubits[-1]))
program.inst(RZ(-np.pi, qubits[0]))
program += (ControlledProgramBuilder()
.with_controls(qubits[:-1])
.with_target(qubits[-1])
.with_operation(X_GATE)
.with_gate_name(X_GATE_LABEL).build())
program.inst(RZ(-np.pi, qubits[0]))
program.inst(H(qubits[-1]))
program.inst([X(q) for q in qubits])
return program | python | def decomposed_diffusion_program(qubits: List[int]) -> Program:
"""
Constructs the diffusion operator used in Grover's Algorithm, acted on both sides by an
a Hadamard gate on each qubit. Note that this means that the matrix representation of this
operator is diag(1, -1, ..., -1). In particular, this decomposes the diffusion operator, which
is a :math:`2**{len(qubits)}\times2**{len(qubits)}` sparse matrix, into
:math:`\mathcal{O}(len(qubits)**2) single and two qubit gates.
See C. Lavor, L.R.U. Manssur, and R. Portugal (2003) `Grover's Algorithm: Quantum Database
Search`_ for more information.
.. _`Grover's Algorithm: Quantum Database Search`: https://arxiv.org/abs/quant-ph/0301079
:param qubits: A list of ints corresponding to the qubits to operate on.
The operator operates on bistrings of the form
``|qubits[0], ..., qubits[-1]>``.
"""
program = Program()
if len(qubits) == 1:
program.inst(Z(qubits[0]))
else:
program.inst([X(q) for q in qubits])
program.inst(H(qubits[-1]))
program.inst(RZ(-np.pi, qubits[0]))
program += (ControlledProgramBuilder()
.with_controls(qubits[:-1])
.with_target(qubits[-1])
.with_operation(X_GATE)
.with_gate_name(X_GATE_LABEL).build())
program.inst(RZ(-np.pi, qubits[0]))
program.inst(H(qubits[-1]))
program.inst([X(q) for q in qubits])
return program | ['def', 'decomposed_diffusion_program', '(', 'qubits', ':', 'List', '[', 'int', ']', ')', '->', 'Program', ':', 'program', '=', 'Program', '(', ')', 'if', 'len', '(', 'qubits', ')', '==', '1', ':', 'program', '.', 'inst', '(', 'Z', '(', 'qubits', '[', '0', ']', ')', ')', 'else', ':', 'program', '.', 'inst', '(', '[', 'X', '(', 'q', ')', 'for', 'q', 'in', 'qubits', ']', ')', 'program', '.', 'inst', '(', 'H', '(', 'qubits', '[', '-', '1', ']', ')', ')', 'program', '.', 'inst', '(', 'RZ', '(', '-', 'np', '.', 'pi', ',', 'qubits', '[', '0', ']', ')', ')', 'program', '+=', '(', 'ControlledProgramBuilder', '(', ')', '.', 'with_controls', '(', 'qubits', '[', ':', '-', '1', ']', ')', '.', 'with_target', '(', 'qubits', '[', '-', '1', ']', ')', '.', 'with_operation', '(', 'X_GATE', ')', '.', 'with_gate_name', '(', 'X_GATE_LABEL', ')', '.', 'build', '(', ')', ')', 'program', '.', 'inst', '(', 'RZ', '(', '-', 'np', '.', 'pi', ',', 'qubits', '[', '0', ']', ')', ')', 'program', '.', 'inst', '(', 'H', '(', 'qubits', '[', '-', '1', ']', ')', ')', 'program', '.', 'inst', '(', '[', 'X', '(', 'q', ')', 'for', 'q', 'in', 'qubits', ']', ')', 'return', 'program'] | Constructs the diffusion operator used in Grover's Algorithm, acted on both sides by an
a Hadamard gate on each qubit. Note that this means that the matrix representation of this
operator is diag(1, -1, ..., -1). In particular, this decomposes the diffusion operator, which
is a :math:`2**{len(qubits)}\times2**{len(qubits)}` sparse matrix, into
:math:`\mathcal{O}(len(qubits)**2) single and two qubit gates.
See C. Lavor, L.R.U. Manssur, and R. Portugal (2003) `Grover's Algorithm: Quantum Database
Search`_ for more information.
.. _`Grover's Algorithm: Quantum Database Search`: https://arxiv.org/abs/quant-ph/0301079
:param qubits: A list of ints corresponding to the qubits to operate on.
The operator operates on bistrings of the form
``|qubits[0], ..., qubits[-1]>``. | ['Constructs', 'the', 'diffusion', 'operator', 'used', 'in', 'Grover', 's', 'Algorithm', 'acted', 'on', 'both', 'sides', 'by', 'an', 'a', 'Hadamard', 'gate', 'on', 'each', 'qubit', '.', 'Note', 'that', 'this', 'means', 'that', 'the', 'matrix', 'representation', 'of', 'this', 'operator', 'is', 'diag', '(', '1', '-', '1', '...', '-', '1', ')', '.', 'In', 'particular', 'this', 'decomposes', 'the', 'diffusion', 'operator', 'which', 'is', 'a', ':', 'math', ':', '2', '**', '{', 'len', '(', 'qubits', ')', '}', '\\', 'times2', '**', '{', 'len', '(', 'qubits', ')', '}', 'sparse', 'matrix', 'into', ':', 'math', ':', '\\', 'mathcal', '{', 'O', '}', '(', 'len', '(', 'qubits', ')', '**', '2', ')', 'single', 'and', 'two', 'qubit', 'gates', '.'] | train | https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/amplification/amplification.py#L86-L118 |
8,796 | numenta/htmresearch | projects/sdr_paper/scalar_sdrs.py | plotMatches2 | def plotMatches2(listofNValues, errors,
listOfScales, scaleErrors,
fileName = "images/scalar_matches.pdf"):
"""
Plot two figures side by side in an aspect ratio appropriate for the paper.
"""
w, h = figaspect(0.4)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(w,h))
plotMatches(listofNValues, errors, fileName=None, fig=fig, ax=ax1)
plotScaledMatches(listOfScales, scaleErrors, fileName=None, fig=fig, ax=ax2)
plt.savefig(fileName)
plt.close() | python | def plotMatches2(listofNValues, errors,
listOfScales, scaleErrors,
fileName = "images/scalar_matches.pdf"):
"""
Plot two figures side by side in an aspect ratio appropriate for the paper.
"""
w, h = figaspect(0.4)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(w,h))
plotMatches(listofNValues, errors, fileName=None, fig=fig, ax=ax1)
plotScaledMatches(listOfScales, scaleErrors, fileName=None, fig=fig, ax=ax2)
plt.savefig(fileName)
plt.close() | ['def', 'plotMatches2', '(', 'listofNValues', ',', 'errors', ',', 'listOfScales', ',', 'scaleErrors', ',', 'fileName', '=', '"images/scalar_matches.pdf"', ')', ':', 'w', ',', 'h', '=', 'figaspect', '(', '0.4', ')', 'fig', ',', '(', 'ax1', ',', 'ax2', ')', '=', 'plt', '.', 'subplots', '(', '1', ',', '2', ',', 'figsize', '=', '(', 'w', ',', 'h', ')', ')', 'plotMatches', '(', 'listofNValues', ',', 'errors', ',', 'fileName', '=', 'None', ',', 'fig', '=', 'fig', ',', 'ax', '=', 'ax1', ')', 'plotScaledMatches', '(', 'listOfScales', ',', 'scaleErrors', ',', 'fileName', '=', 'None', ',', 'fig', '=', 'fig', ',', 'ax', '=', 'ax2', ')', 'plt', '.', 'savefig', '(', 'fileName', ')', 'plt', '.', 'close', '(', ')'] | Plot two figures side by side in an aspect ratio appropriate for the paper. | ['Plot', 'two', 'figures', 'side', 'by', 'side', 'in', 'an', 'aspect', 'ratio', 'appropriate', 'for', 'the', 'paper', '.'] | train | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sdr_paper/scalar_sdrs.py#L570-L583 |
8,797 | wmayner/pyphi | pyphi/validate.py | tpm | def tpm(tpm, check_independence=True):
"""Validate a TPM.
The TPM can be in
* 2-dimensional state-by-state form,
* 2-dimensional state-by-node form, or
* multidimensional state-by-node form.
"""
see_tpm_docs = (
'See the documentation on TPM conventions and the `pyphi.Network` '
'object for more information on TPM forms.'
)
# Cast to np.array.
tpm = np.array(tpm)
# Get the number of nodes from the state-by-node TPM.
N = tpm.shape[-1]
if tpm.ndim == 2:
if not ((tpm.shape[0] == 2**N and tpm.shape[1] == N) or
(tpm.shape[0] == tpm.shape[1])):
raise ValueError(
'Invalid shape for 2-D TPM: {}\nFor a state-by-node TPM, '
'there must be ' '2^N rows and N columns, where N is the '
'number of nodes. State-by-state TPM must be square. '
'{}'.format(tpm.shape, see_tpm_docs))
if tpm.shape[0] == tpm.shape[1] and check_independence:
conditionally_independent(tpm)
elif tpm.ndim == (N + 1):
if tpm.shape != tuple([2] * N + [N]):
raise ValueError(
'Invalid shape for multidimensional state-by-node TPM: {}\n'
'The shape should be {} for {} nodes. {}'.format(
tpm.shape, ([2] * N) + [N], N, see_tpm_docs))
else:
raise ValueError(
'Invalid TPM: Must be either 2-dimensional or multidimensional. '
'{}'.format(see_tpm_docs))
return True | python | def tpm(tpm, check_independence=True):
"""Validate a TPM.
The TPM can be in
* 2-dimensional state-by-state form,
* 2-dimensional state-by-node form, or
* multidimensional state-by-node form.
"""
see_tpm_docs = (
'See the documentation on TPM conventions and the `pyphi.Network` '
'object for more information on TPM forms.'
)
# Cast to np.array.
tpm = np.array(tpm)
# Get the number of nodes from the state-by-node TPM.
N = tpm.shape[-1]
if tpm.ndim == 2:
if not ((tpm.shape[0] == 2**N and tpm.shape[1] == N) or
(tpm.shape[0] == tpm.shape[1])):
raise ValueError(
'Invalid shape for 2-D TPM: {}\nFor a state-by-node TPM, '
'there must be ' '2^N rows and N columns, where N is the '
'number of nodes. State-by-state TPM must be square. '
'{}'.format(tpm.shape, see_tpm_docs))
if tpm.shape[0] == tpm.shape[1] and check_independence:
conditionally_independent(tpm)
elif tpm.ndim == (N + 1):
if tpm.shape != tuple([2] * N + [N]):
raise ValueError(
'Invalid shape for multidimensional state-by-node TPM: {}\n'
'The shape should be {} for {} nodes. {}'.format(
tpm.shape, ([2] * N) + [N], N, see_tpm_docs))
else:
raise ValueError(
'Invalid TPM: Must be either 2-dimensional or multidimensional. '
'{}'.format(see_tpm_docs))
return True | ['def', 'tpm', '(', 'tpm', ',', 'check_independence', '=', 'True', ')', ':', 'see_tpm_docs', '=', '(', "'See the documentation on TPM conventions and the `pyphi.Network` '", "'object for more information on TPM forms.'", ')', '# Cast to np.array.', 'tpm', '=', 'np', '.', 'array', '(', 'tpm', ')', '# Get the number of nodes from the state-by-node TPM.', 'N', '=', 'tpm', '.', 'shape', '[', '-', '1', ']', 'if', 'tpm', '.', 'ndim', '==', '2', ':', 'if', 'not', '(', '(', 'tpm', '.', 'shape', '[', '0', ']', '==', '2', '**', 'N', 'and', 'tpm', '.', 'shape', '[', '1', ']', '==', 'N', ')', 'or', '(', 'tpm', '.', 'shape', '[', '0', ']', '==', 'tpm', '.', 'shape', '[', '1', ']', ')', ')', ':', 'raise', 'ValueError', '(', "'Invalid shape for 2-D TPM: {}\\nFor a state-by-node TPM, '", "'there must be '", "'2^N rows and N columns, where N is the '", "'number of nodes. State-by-state TPM must be square. '", "'{}'", '.', 'format', '(', 'tpm', '.', 'shape', ',', 'see_tpm_docs', ')', ')', 'if', 'tpm', '.', 'shape', '[', '0', ']', '==', 'tpm', '.', 'shape', '[', '1', ']', 'and', 'check_independence', ':', 'conditionally_independent', '(', 'tpm', ')', 'elif', 'tpm', '.', 'ndim', '==', '(', 'N', '+', '1', ')', ':', 'if', 'tpm', '.', 'shape', '!=', 'tuple', '(', '[', '2', ']', '*', 'N', '+', '[', 'N', ']', ')', ':', 'raise', 'ValueError', '(', "'Invalid shape for multidimensional state-by-node TPM: {}\\n'", "'The shape should be {} for {} nodes. {}'", '.', 'format', '(', 'tpm', '.', 'shape', ',', '(', '[', '2', ']', '*', 'N', ')', '+', '[', 'N', ']', ',', 'N', ',', 'see_tpm_docs', ')', ')', 'else', ':', 'raise', 'ValueError', '(', "'Invalid TPM: Must be either 2-dimensional or multidimensional. '", "'{}'", '.', 'format', '(', 'see_tpm_docs', ')', ')', 'return', 'True'] | Validate a TPM.
The TPM can be in
* 2-dimensional state-by-state form,
* 2-dimensional state-by-node form, or
* multidimensional state-by-node form. | ['Validate', 'a', 'TPM', '.'] | train | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/validate.py#L34-L71 |
8,798 | quantopian/zipline | zipline/pipeline/filters/filter.py | binary_operator | def binary_operator(op):
"""
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__.
"""
# When combining a Filter with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted interpretation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
def binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return NumExprFilter.create(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
)
elif isinstance(other, NumericalExpression):
# NumericalExpression overrides numerical ops to correctly handle
# merging of inputs. Look up and call the appropriate
# right-binding operator with ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if other.dtype != bool_dtype:
raise BadBinaryOperator(op, self, other)
if self is other:
return NumExprFilter.create(
"x_0 {op} x_0".format(op=op),
(self,),
)
return NumExprFilter.create(
"x_0 {op} x_1".format(op=op),
(self, other),
)
elif isinstance(other, int): # Note that this is true for bool as well
return NumExprFilter.create(
"x_0 {op} {constant}".format(op=op, constant=int(other)),
binds=(self,),
)
raise BadBinaryOperator(op, self, other)
binary_operator.__doc__ = "Binary Operator: '%s'" % op
return binary_operator | python | def binary_operator(op):
"""
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__.
"""
# When combining a Filter with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted interpretation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
def binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return NumExprFilter.create(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
)
elif isinstance(other, NumericalExpression):
# NumericalExpression overrides numerical ops to correctly handle
# merging of inputs. Look up and call the appropriate
# right-binding operator with ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if other.dtype != bool_dtype:
raise BadBinaryOperator(op, self, other)
if self is other:
return NumExprFilter.create(
"x_0 {op} x_0".format(op=op),
(self,),
)
return NumExprFilter.create(
"x_0 {op} x_1".format(op=op),
(self, other),
)
elif isinstance(other, int): # Note that this is true for bool as well
return NumExprFilter.create(
"x_0 {op} {constant}".format(op=op, constant=int(other)),
binds=(self,),
)
raise BadBinaryOperator(op, self, other)
binary_operator.__doc__ = "Binary Operator: '%s'" % op
return binary_operator | ['def', 'binary_operator', '(', 'op', ')', ':', '# When combining a Filter with a NumericalExpression, we use this', '# attrgetter instance to defer to the commuted interpretation of the', '# NumericalExpression operator.', 'commuted_method_getter', '=', 'attrgetter', '(', 'method_name_for_op', '(', 'op', ',', 'commute', '=', 'True', ')', ')', 'def', 'binary_operator', '(', 'self', ',', 'other', ')', ':', 'if', 'isinstance', '(', 'self', ',', 'NumericalExpression', ')', ':', 'self_expr', ',', 'other_expr', ',', 'new_inputs', '=', 'self', '.', 'build_binary_op', '(', 'op', ',', 'other', ',', ')', 'return', 'NumExprFilter', '.', 'create', '(', '"({left}) {op} ({right})"', '.', 'format', '(', 'left', '=', 'self_expr', ',', 'op', '=', 'op', ',', 'right', '=', 'other_expr', ',', ')', ',', 'new_inputs', ',', ')', 'elif', 'isinstance', '(', 'other', ',', 'NumericalExpression', ')', ':', '# NumericalExpression overrides numerical ops to correctly handle', '# merging of inputs. Look up and call the appropriate', '# right-binding operator with ourself as the input.', 'return', 'commuted_method_getter', '(', 'other', ')', '(', 'self', ')', 'elif', 'isinstance', '(', 'other', ',', 'Term', ')', ':', 'if', 'other', '.', 'dtype', '!=', 'bool_dtype', ':', 'raise', 'BadBinaryOperator', '(', 'op', ',', 'self', ',', 'other', ')', 'if', 'self', 'is', 'other', ':', 'return', 'NumExprFilter', '.', 'create', '(', '"x_0 {op} x_0"', '.', 'format', '(', 'op', '=', 'op', ')', ',', '(', 'self', ',', ')', ',', ')', 'return', 'NumExprFilter', '.', 'create', '(', '"x_0 {op} x_1"', '.', 'format', '(', 'op', '=', 'op', ')', ',', '(', 'self', ',', 'other', ')', ',', ')', 'elif', 'isinstance', '(', 'other', ',', 'int', ')', ':', '# Note that this is true for bool as well', 'return', 'NumExprFilter', '.', 'create', '(', '"x_0 {op} {constant}"', '.', 'format', '(', 'op', '=', 'op', ',', 'constant', '=', 'int', '(', 'other', ')', ')', ',', 'binds', '=', '(', 'self', ',', ')', ',', ')', 'raise', 'BadBinaryOperator', '(', 'op', ',', 'self', ',', 'other', ')', 'binary_operator', '.', '__doc__', '=', '"Binary Operator: \'%s\'"', '%', 'op', 'return', 'binary_operator'] | Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__. | ['Factory', 'function', 'for', 'making', 'binary', 'operator', 'methods', 'on', 'a', 'Filter', 'subclass', '.'] | train | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L62-L112 |
8,799 | materialsproject/pymatgen | pymatgen/analysis/adsorption.py | get_rot | def get_rot(slab):
"""
Gets the transformation to rotate the z axis into the miller index
"""
new_z = get_mi_vec(slab)
a, b, c = slab.lattice.matrix
new_x = a / np.linalg.norm(a)
new_y = np.cross(new_z, new_x)
x, y, z = np.eye(3)
rot_matrix = np.array([np.dot(*el) for el in
itertools.product([x, y, z],
[new_x, new_y, new_z])]).reshape(3, 3)
rot_matrix = np.transpose(rot_matrix)
sop = SymmOp.from_rotation_and_translation(rot_matrix)
return sop | python | def get_rot(slab):
"""
Gets the transformation to rotate the z axis into the miller index
"""
new_z = get_mi_vec(slab)
a, b, c = slab.lattice.matrix
new_x = a / np.linalg.norm(a)
new_y = np.cross(new_z, new_x)
x, y, z = np.eye(3)
rot_matrix = np.array([np.dot(*el) for el in
itertools.product([x, y, z],
[new_x, new_y, new_z])]).reshape(3, 3)
rot_matrix = np.transpose(rot_matrix)
sop = SymmOp.from_rotation_and_translation(rot_matrix)
return sop | ['def', 'get_rot', '(', 'slab', ')', ':', 'new_z', '=', 'get_mi_vec', '(', 'slab', ')', 'a', ',', 'b', ',', 'c', '=', 'slab', '.', 'lattice', '.', 'matrix', 'new_x', '=', 'a', '/', 'np', '.', 'linalg', '.', 'norm', '(', 'a', ')', 'new_y', '=', 'np', '.', 'cross', '(', 'new_z', ',', 'new_x', ')', 'x', ',', 'y', ',', 'z', '=', 'np', '.', 'eye', '(', '3', ')', 'rot_matrix', '=', 'np', '.', 'array', '(', '[', 'np', '.', 'dot', '(', '*', 'el', ')', 'for', 'el', 'in', 'itertools', '.', 'product', '(', '[', 'x', ',', 'y', ',', 'z', ']', ',', '[', 'new_x', ',', 'new_y', ',', 'new_z', ']', ')', ']', ')', '.', 'reshape', '(', '3', ',', '3', ')', 'rot_matrix', '=', 'np', '.', 'transpose', '(', 'rot_matrix', ')', 'sop', '=', 'SymmOp', '.', 'from_rotation_and_translation', '(', 'rot_matrix', ')', 'return', 'sop'] | Gets the transformation to rotate the z axis into the miller index | ['Gets', 'the', 'transformation', 'to', 'rotate', 'the', 'z', 'axis', 'into', 'the', 'miller', 'index'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/adsorption.py#L576-L590 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.