Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
3,900
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/device_directory/apis/default_api.py
DefaultApi.group_members_remove
def group_members_remove(self, device_group_id, body, **kwargs): # noqa: E501 """Remove a device from a group # noqa: E501 Remove one device from a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.group_members_remove(device_group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_group_id: The ID of the group (required) :param DeviceGroupManipulation body: Body of the request (required) :return: DevicePage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.group_members_remove_with_http_info(device_group_id, body, **kwargs) # noqa: E501 else: (data) = self.group_members_remove_with_http_info(device_group_id, body, **kwargs) # noqa: E501 return data
python
def group_members_remove(self, device_group_id, body, **kwargs): # noqa: E501 """Remove a device from a group # noqa: E501 Remove one device from a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.group_members_remove(device_group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_group_id: The ID of the group (required) :param DeviceGroupManipulation body: Body of the request (required) :return: DevicePage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.group_members_remove_with_http_info(device_group_id, body, **kwargs) # noqa: E501 else: (data) = self.group_members_remove_with_http_info(device_group_id, body, **kwargs) # noqa: E501 return data
['def', 'group_members_remove', '(', 'self', ',', 'device_group_id', ',', 'body', ',', '*', '*', 'kwargs', ')', ':', '# noqa: E501', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'asynchronous'", ')', ':', 'return', 'self', '.', 'group_members_remove_with_http_info', '(', 'device_group_id', ',', 'body', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'group_members_remove_with_http_info', '(', 'device_group_id', ',', 'body', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'return', 'data']
Remove a device from a group # noqa: E501 Remove one device from a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.group_members_remove(device_group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_group_id: The ID of the group (required) :param DeviceGroupManipulation body: Body of the request (required) :return: DevicePage If the method is called asynchronously, returns the request thread.
['Remove', 'a', 'device', 'from', 'a', 'group', '#', 'noqa', ':', 'E501']
train
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/device_directory/apis/default_api.py#L1830-L1851
3,901
rootpy/rootpy
rootpy/plotting/hist.py
_HistBase.fill_array
def fill_array(self, array, weights=None): """ Fill this histogram with a NumPy array """ try: try: from root_numpy import fill_hist as fill_func except ImportError: from root_numpy import fill_array as fill_func except ImportError: log.critical( "root_numpy is needed for Hist*.fill_array. " "Is it installed and importable?") raise fill_func(self, array, weights=weights)
python
def fill_array(self, array, weights=None): """ Fill this histogram with a NumPy array """ try: try: from root_numpy import fill_hist as fill_func except ImportError: from root_numpy import fill_array as fill_func except ImportError: log.critical( "root_numpy is needed for Hist*.fill_array. " "Is it installed and importable?") raise fill_func(self, array, weights=weights)
['def', 'fill_array', '(', 'self', ',', 'array', ',', 'weights', '=', 'None', ')', ':', 'try', ':', 'try', ':', 'from', 'root_numpy', 'import', 'fill_hist', 'as', 'fill_func', 'except', 'ImportError', ':', 'from', 'root_numpy', 'import', 'fill_array', 'as', 'fill_func', 'except', 'ImportError', ':', 'log', '.', 'critical', '(', '"root_numpy is needed for Hist*.fill_array. "', '"Is it installed and importable?"', ')', 'raise', 'fill_func', '(', 'self', ',', 'array', ',', 'weights', '=', 'weights', ')']
Fill this histogram with a NumPy array
['Fill', 'this', 'histogram', 'with', 'a', 'NumPy', 'array']
train
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/hist.py#L1192-L1206
3,902
lowandrew/OLCTools
spadespipeline/sistr.py
Sistr.report
def report(self): """Creates sistr reports""" # Initialise strings to store report data header = '\t'.join(self.headers) + '\n' data = '' for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # Each strain is a fresh row row = '' try: # Read in the output .json file into the metadata sample[self.analysistype].jsondata = json.load(open(sample[self.analysistype].jsonoutput, 'r')) # Set the name of the report. # Note that this is a tab-separated file, as there can be commas in the results sample[self.analysistype].report = os.path.join(sample[self.analysistype].reportdir, '{}.tsv'.format(sample.name)) # Iterate through all the headers to use as keys in the json-formatted output for category in self.headers: # Tab separate all the results row += '{}\t'.format(sample[self.analysistype].jsondata[0][category]) # Create attributes for each category setattr(sample[self.analysistype], category, str(sample[self.analysistype].jsondata[0][category])) # End the results with a newline row += '\n' data += row # Create and write headers and results to the strain-specific report with open(sample[self.analysistype].report, 'w') as strainreport: strainreport.write(header) strainreport.write(row) except (KeyError, AttributeError): pass # Create and write headers and cumulative results to the combined report with open(os.path.join(self.reportdir, 'sistr.tsv'), 'w') as report: report.write(header) report.write(data)
python
def report(self): """Creates sistr reports""" # Initialise strings to store report data header = '\t'.join(self.headers) + '\n' data = '' for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # Each strain is a fresh row row = '' try: # Read in the output .json file into the metadata sample[self.analysistype].jsondata = json.load(open(sample[self.analysistype].jsonoutput, 'r')) # Set the name of the report. # Note that this is a tab-separated file, as there can be commas in the results sample[self.analysistype].report = os.path.join(sample[self.analysistype].reportdir, '{}.tsv'.format(sample.name)) # Iterate through all the headers to use as keys in the json-formatted output for category in self.headers: # Tab separate all the results row += '{}\t'.format(sample[self.analysistype].jsondata[0][category]) # Create attributes for each category setattr(sample[self.analysistype], category, str(sample[self.analysistype].jsondata[0][category])) # End the results with a newline row += '\n' data += row # Create and write headers and results to the strain-specific report with open(sample[self.analysistype].report, 'w') as strainreport: strainreport.write(header) strainreport.write(row) except (KeyError, AttributeError): pass # Create and write headers and cumulative results to the combined report with open(os.path.join(self.reportdir, 'sistr.tsv'), 'w') as report: report.write(header) report.write(data)
['def', 'report', '(', 'self', ')', ':', '# Initialise strings to store report data', 'header', '=', "'\\t'", '.', 'join', '(', 'self', '.', 'headers', ')', '+', "'\\n'", 'data', '=', "''", 'for', 'sample', 'in', 'self', '.', 'metadata', ':', 'if', 'sample', '.', 'general', '.', 'bestassemblyfile', '!=', "'NA'", ':', '# Each strain is a fresh row', 'row', '=', "''", 'try', ':', '# Read in the output .json file into the metadata', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'jsondata', '=', 'json', '.', 'load', '(', 'open', '(', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'jsonoutput', ',', "'r'", ')', ')', '# Set the name of the report.', '# Note that this is a tab-separated file, as there can be commas in the results', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'report', '=', 'os', '.', 'path', '.', 'join', '(', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'reportdir', ',', "'{}.tsv'", '.', 'format', '(', 'sample', '.', 'name', ')', ')', '# Iterate through all the headers to use as keys in the json-formatted output', 'for', 'category', 'in', 'self', '.', 'headers', ':', '# Tab separate all the results', 'row', '+=', "'{}\\t'", '.', 'format', '(', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'jsondata', '[', '0', ']', '[', 'category', ']', ')', '# Create attributes for each category', 'setattr', '(', 'sample', '[', 'self', '.', 'analysistype', ']', ',', 'category', ',', 'str', '(', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'jsondata', '[', '0', ']', '[', 'category', ']', ')', ')', '# End the results with a newline', 'row', '+=', "'\\n'", 'data', '+=', 'row', '# Create and write headers and results to the strain-specific report', 'with', 'open', '(', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'report', ',', "'w'", ')', 'as', 'strainreport', ':', 'strainreport', '.', 'write', '(', 'header', ')', 'strainreport', '.', 'write', '(', 'row', ')', 'except', '(', 'KeyError', ',', 'AttributeError', ')', ':', 'pass', '# Create and write headers and cumulative results to the combined report', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'reportdir', ',', "'sistr.tsv'", ')', ',', "'w'", ')', 'as', 'report', ':', 'report', '.', 'write', '(', 'header', ')', 'report', '.', 'write', '(', 'data', ')']
Creates sistr reports
['Creates', 'sistr', 'reports']
train
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/sistr.py#L59-L95
3,903
saltstack/salt
salt/modules/namecheap_domains_dns.py
get_list
def get_list(sld, tld): ''' Gets a list of DNS servers associated with the requested domain. returns a dictionary of information about requested domain sld SLD of the domain name tld TLD of the domain name CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.get_list sld tld ''' opts = salt.utils.namecheap.get_opts('namecheap.domains.dns.getlist') opts['TLD'] = tld opts['SLD'] = sld response_xml = salt.utils.namecheap.get_request(opts) if response_xml is None: return {} domaindnsgetlistresult = response_xml.getElementsByTagName('DomainDNSGetListResult')[0] return salt.utils.namecheap.xml_to_dict(domaindnsgetlistresult)
python
def get_list(sld, tld): ''' Gets a list of DNS servers associated with the requested domain. returns a dictionary of information about requested domain sld SLD of the domain name tld TLD of the domain name CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.get_list sld tld ''' opts = salt.utils.namecheap.get_opts('namecheap.domains.dns.getlist') opts['TLD'] = tld opts['SLD'] = sld response_xml = salt.utils.namecheap.get_request(opts) if response_xml is None: return {} domaindnsgetlistresult = response_xml.getElementsByTagName('DomainDNSGetListResult')[0] return salt.utils.namecheap.xml_to_dict(domaindnsgetlistresult)
['def', 'get_list', '(', 'sld', ',', 'tld', ')', ':', 'opts', '=', 'salt', '.', 'utils', '.', 'namecheap', '.', 'get_opts', '(', "'namecheap.domains.dns.getlist'", ')', 'opts', '[', "'TLD'", ']', '=', 'tld', 'opts', '[', "'SLD'", ']', '=', 'sld', 'response_xml', '=', 'salt', '.', 'utils', '.', 'namecheap', '.', 'get_request', '(', 'opts', ')', 'if', 'response_xml', 'is', 'None', ':', 'return', '{', '}', 'domaindnsgetlistresult', '=', 'response_xml', '.', 'getElementsByTagName', '(', "'DomainDNSGetListResult'", ')', '[', '0', ']', 'return', 'salt', '.', 'utils', '.', 'namecheap', '.', 'xml_to_dict', '(', 'domaindnsgetlistresult', ')']
Gets a list of DNS servers associated with the requested domain. returns a dictionary of information about requested domain sld SLD of the domain name tld TLD of the domain name CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.get_list sld tld
['Gets', 'a', 'list', 'of', 'DNS', 'servers', 'associated', 'with', 'the', 'requested', 'domain', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/namecheap_domains_dns.py#L87-L115
3,904
Fortran-FOSS-Programmers/ford
ford/sourceform.py
FortranCodeUnit.prune
def prune(self): """ Remove anything which shouldn't be displayed. """ def to_include(obj): inc = obj.permission in self.display if self.settings['hide_undoc'].lower() == 'true' and not obj.doc: inc = False return inc if self.obj == 'proc' and self.meta['proc_internals'] == 'false': self.functions = [] self.subroutines = [] self.types = [] self.interfaces = [] self.absinterfaces = [] self.variables = [] else: self.functions = [obj for obj in self.functions if to_include(obj)] self.subroutines = [obj for obj in self.subroutines if to_include(obj)] self.types = [obj for obj in self.types if to_include(obj)] self.interfaces = [obj for obj in self.interfaces if to_include(obj)] self.absinterfaces = [obj for obj in self.absinterfaces if to_include(obj)] self.variables = [obj for obj in self.variables if to_include(obj)] if hasattr(self,'modprocedures'): self.modprocedures = [obj for obj in self.modprocedures if to_include(obj)] if hasattr(self,'modsubroutines'): self.modsubroutines = [obj for obj in self.modsubroutines if to_include(obj)] if hasattr(self,'modfunctions'): self.modfunctions = [obj for obj in self.modfunctions if to_include(obj)] # Recurse for obj in self.absinterfaces: obj.visible = True for obj in self.iterator('functions', 'subroutines', 'types', 'interfaces', 'modprocedures', 'modfunctions', 'modsubroutines'): obj.visible = True for obj in self.iterator('functions', 'subroutines', 'types', 'modprocedures', 'modfunctions', 'modsubroutines'): obj.prune()
python
def prune(self): """ Remove anything which shouldn't be displayed. """ def to_include(obj): inc = obj.permission in self.display if self.settings['hide_undoc'].lower() == 'true' and not obj.doc: inc = False return inc if self.obj == 'proc' and self.meta['proc_internals'] == 'false': self.functions = [] self.subroutines = [] self.types = [] self.interfaces = [] self.absinterfaces = [] self.variables = [] else: self.functions = [obj for obj in self.functions if to_include(obj)] self.subroutines = [obj for obj in self.subroutines if to_include(obj)] self.types = [obj for obj in self.types if to_include(obj)] self.interfaces = [obj for obj in self.interfaces if to_include(obj)] self.absinterfaces = [obj for obj in self.absinterfaces if to_include(obj)] self.variables = [obj for obj in self.variables if to_include(obj)] if hasattr(self,'modprocedures'): self.modprocedures = [obj for obj in self.modprocedures if to_include(obj)] if hasattr(self,'modsubroutines'): self.modsubroutines = [obj for obj in self.modsubroutines if to_include(obj)] if hasattr(self,'modfunctions'): self.modfunctions = [obj for obj in self.modfunctions if to_include(obj)] # Recurse for obj in self.absinterfaces: obj.visible = True for obj in self.iterator('functions', 'subroutines', 'types', 'interfaces', 'modprocedures', 'modfunctions', 'modsubroutines'): obj.visible = True for obj in self.iterator('functions', 'subroutines', 'types', 'modprocedures', 'modfunctions', 'modsubroutines'): obj.prune()
['def', 'prune', '(', 'self', ')', ':', 'def', 'to_include', '(', 'obj', ')', ':', 'inc', '=', 'obj', '.', 'permission', 'in', 'self', '.', 'display', 'if', 'self', '.', 'settings', '[', "'hide_undoc'", ']', '.', 'lower', '(', ')', '==', "'true'", 'and', 'not', 'obj', '.', 'doc', ':', 'inc', '=', 'False', 'return', 'inc', 'if', 'self', '.', 'obj', '==', "'proc'", 'and', 'self', '.', 'meta', '[', "'proc_internals'", ']', '==', "'false'", ':', 'self', '.', 'functions', '=', '[', ']', 'self', '.', 'subroutines', '=', '[', ']', 'self', '.', 'types', '=', '[', ']', 'self', '.', 'interfaces', '=', '[', ']', 'self', '.', 'absinterfaces', '=', '[', ']', 'self', '.', 'variables', '=', '[', ']', 'else', ':', 'self', '.', 'functions', '=', '[', 'obj', 'for', 'obj', 'in', 'self', '.', 'functions', 'if', 'to_include', '(', 'obj', ')', ']', 'self', '.', 'subroutines', '=', '[', 'obj', 'for', 'obj', 'in', 'self', '.', 'subroutines', 'if', 'to_include', '(', 'obj', ')', ']', 'self', '.', 'types', '=', '[', 'obj', 'for', 'obj', 'in', 'self', '.', 'types', 'if', 'to_include', '(', 'obj', ')', ']', 'self', '.', 'interfaces', '=', '[', 'obj', 'for', 'obj', 'in', 'self', '.', 'interfaces', 'if', 'to_include', '(', 'obj', ')', ']', 'self', '.', 'absinterfaces', '=', '[', 'obj', 'for', 'obj', 'in', 'self', '.', 'absinterfaces', 'if', 'to_include', '(', 'obj', ')', ']', 'self', '.', 'variables', '=', '[', 'obj', 'for', 'obj', 'in', 'self', '.', 'variables', 'if', 'to_include', '(', 'obj', ')', ']', 'if', 'hasattr', '(', 'self', ',', "'modprocedures'", ')', ':', 'self', '.', 'modprocedures', '=', '[', 'obj', 'for', 'obj', 'in', 'self', '.', 'modprocedures', 'if', 'to_include', '(', 'obj', ')', ']', 'if', 'hasattr', '(', 'self', ',', "'modsubroutines'", ')', ':', 'self', '.', 'modsubroutines', '=', '[', 'obj', 'for', 'obj', 'in', 'self', '.', 'modsubroutines', 'if', 'to_include', '(', 'obj', ')', ']', 'if', 'hasattr', '(', 'self', ',', "'modfunctions'", ')', ':', 'self', '.', 'modfunctions', '=', '[', 'obj', 'for', 'obj', 'in', 'self', '.', 'modfunctions', 'if', 'to_include', '(', 'obj', ')', ']', '# Recurse', 'for', 'obj', 'in', 'self', '.', 'absinterfaces', ':', 'obj', '.', 'visible', '=', 'True', 'for', 'obj', 'in', 'self', '.', 'iterator', '(', "'functions'", ',', "'subroutines'", ',', "'types'", ',', "'interfaces'", ',', "'modprocedures'", ',', "'modfunctions'", ',', "'modsubroutines'", ')', ':', 'obj', '.', 'visible', '=', 'True', 'for', 'obj', 'in', 'self', '.', 'iterator', '(', "'functions'", ',', "'subroutines'", ',', "'types'", ',', "'modprocedures'", ',', "'modfunctions'", ',', "'modsubroutines'", ')', ':', 'obj', '.', 'prune', '(', ')']
Remove anything which shouldn't be displayed.
['Remove', 'anything', 'which', 'shouldn', 't', 'be', 'displayed', '.']
train
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/sourceform.py#L998-L1034
3,905
objectrocket/python-client
objectrocket/instances/__init__.py
Instances._concrete_instance
def _concrete_instance(self, instance_doc): """Concretize an instance document. :param dict instance_doc: A document describing an instance. Should come from the API. :returns: A subclass of :py:class:`bases.BaseInstance`, or None. :rtype: :py:class:`bases.BaseInstance` """ if not isinstance(instance_doc, dict): return None # Attempt to instantiate the appropriate class for the given instance document. try: service = instance_doc['service'] cls = self._service_class_map[service] return cls(instance_document=instance_doc, instances=self) # If construction fails, log the exception and return None. except Exception as ex: logger.exception(ex) logger.error( 'Instance construction failed. You probably need to upgrade to a more ' 'recent version of the client. Instance document which generated this ' 'warning: {}'.format(instance_doc) ) return None
python
def _concrete_instance(self, instance_doc): """Concretize an instance document. :param dict instance_doc: A document describing an instance. Should come from the API. :returns: A subclass of :py:class:`bases.BaseInstance`, or None. :rtype: :py:class:`bases.BaseInstance` """ if not isinstance(instance_doc, dict): return None # Attempt to instantiate the appropriate class for the given instance document. try: service = instance_doc['service'] cls = self._service_class_map[service] return cls(instance_document=instance_doc, instances=self) # If construction fails, log the exception and return None. except Exception as ex: logger.exception(ex) logger.error( 'Instance construction failed. You probably need to upgrade to a more ' 'recent version of the client. Instance document which generated this ' 'warning: {}'.format(instance_doc) ) return None
['def', '_concrete_instance', '(', 'self', ',', 'instance_doc', ')', ':', 'if', 'not', 'isinstance', '(', 'instance_doc', ',', 'dict', ')', ':', 'return', 'None', '# Attempt to instantiate the appropriate class for the given instance document.', 'try', ':', 'service', '=', 'instance_doc', '[', "'service'", ']', 'cls', '=', 'self', '.', '_service_class_map', '[', 'service', ']', 'return', 'cls', '(', 'instance_document', '=', 'instance_doc', ',', 'instances', '=', 'self', ')', '# If construction fails, log the exception and return None.', 'except', 'Exception', 'as', 'ex', ':', 'logger', '.', 'exception', '(', 'ex', ')', 'logger', '.', 'error', '(', "'Instance construction failed. You probably need to upgrade to a more '", "'recent version of the client. Instance document which generated this '", "'warning: {}'", '.', 'format', '(', 'instance_doc', ')', ')', 'return', 'None']
Concretize an instance document. :param dict instance_doc: A document describing an instance. Should come from the API. :returns: A subclass of :py:class:`bases.BaseInstance`, or None. :rtype: :py:class:`bases.BaseInstance`
['Concretize', 'an', 'instance', 'document', '.']
train
https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/objectrocket/instances/__init__.py#L99-L123
3,906
napalm-automation/napalm-logs
napalm_logs/device.py
NapalmLogsDeviceProc._emit
def _emit(self, **kwargs): ''' Emit an OpenConfig object given a certain combination of fields mappeed in the config to the corresponding hierarchy. ''' oc_dict = {} for mapping, result_key in kwargs['mapping']['variables'].items(): result = kwargs[result_key] oc_dict = napalm_logs.utils.setval(mapping.format(**kwargs), result, oc_dict) for mapping, result in kwargs['mapping']['static'].items(): oc_dict = napalm_logs.utils.setval(mapping.format(**kwargs), result, oc_dict) return oc_dict
python
def _emit(self, **kwargs): ''' Emit an OpenConfig object given a certain combination of fields mappeed in the config to the corresponding hierarchy. ''' oc_dict = {} for mapping, result_key in kwargs['mapping']['variables'].items(): result = kwargs[result_key] oc_dict = napalm_logs.utils.setval(mapping.format(**kwargs), result, oc_dict) for mapping, result in kwargs['mapping']['static'].items(): oc_dict = napalm_logs.utils.setval(mapping.format(**kwargs), result, oc_dict) return oc_dict
['def', '_emit', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'oc_dict', '=', '{', '}', 'for', 'mapping', ',', 'result_key', 'in', 'kwargs', '[', "'mapping'", ']', '[', "'variables'", ']', '.', 'items', '(', ')', ':', 'result', '=', 'kwargs', '[', 'result_key', ']', 'oc_dict', '=', 'napalm_logs', '.', 'utils', '.', 'setval', '(', 'mapping', '.', 'format', '(', '*', '*', 'kwargs', ')', ',', 'result', ',', 'oc_dict', ')', 'for', 'mapping', ',', 'result', 'in', 'kwargs', '[', "'mapping'", ']', '[', "'static'", ']', '.', 'items', '(', ')', ':', 'oc_dict', '=', 'napalm_logs', '.', 'utils', '.', 'setval', '(', 'mapping', '.', 'format', '(', '*', '*', 'kwargs', ')', ',', 'result', ',', 'oc_dict', ')', 'return', 'oc_dict']
Emit an OpenConfig object given a certain combination of fields mappeed in the config to the corresponding hierarchy.
['Emit', 'an', 'OpenConfig', 'object', 'given', 'a', 'certain', 'combination', 'of', 'fields', 'mappeed', 'in', 'the', 'config', 'to', 'the', 'corresponding', 'hierarchy', '.']
train
https://github.com/napalm-automation/napalm-logs/blob/4b89100a6e4f994aa004f3ea42a06dc803a7ccb0/napalm_logs/device.py#L188-L200
3,907
razor-x/dichalcogenides
dichalcogenides/parameters/parameters.py
Parameters.parameter_list
def parameter_list(data): """Create a list of parameter objects from a dict. :param data: Dictionary to convert to parameter list. :type data: dict :return: Parameter list. :rtype: dict """ items = [] for item in data: param = Parameter(item['name'], item['value']) if 'meta' in item: param.meta = item['meta'] items.append(param) return items
python
def parameter_list(data): """Create a list of parameter objects from a dict. :param data: Dictionary to convert to parameter list. :type data: dict :return: Parameter list. :rtype: dict """ items = [] for item in data: param = Parameter(item['name'], item['value']) if 'meta' in item: param.meta = item['meta'] items.append(param) return items
['def', 'parameter_list', '(', 'data', ')', ':', 'items', '=', '[', ']', 'for', 'item', 'in', 'data', ':', 'param', '=', 'Parameter', '(', 'item', '[', "'name'", ']', ',', 'item', '[', "'value'", ']', ')', 'if', "'meta'", 'in', 'item', ':', 'param', '.', 'meta', '=', 'item', '[', "'meta'", ']', 'items', '.', 'append', '(', 'param', ')', 'return', 'items']
Create a list of parameter objects from a dict. :param data: Dictionary to convert to parameter list. :type data: dict :return: Parameter list. :rtype: dict
['Create', 'a', 'list', 'of', 'parameter', 'objects', 'from', 'a', 'dict', '.']
train
https://github.com/razor-x/dichalcogenides/blob/0fa1995a3a328b679c9926f73239d0ecdc6e5d3d/dichalcogenides/parameters/parameters.py#L163-L177
3,908
hobson/pug-dj
pug/dj/miner/views.py
stats
def stats(request, date_offset=0, fields=None, title_prefix=None, model='WikiItem'): """ In addition to chart data in data['chart'], send statistics data to view in data['stats'] """ data = {} modified_chart_data = data['chart']['chartdata'] if 'y2' in data['chart']['chartdata']: matrix = db.Columns([modified_chart_data['y1'], modified_chart_data['y2']], ddof=0, tall=True) else: fields = ['date/time'] + fields matrix = db.Columns([modified_chart_data['x'], modified_chart_data['y']], ddof=0, tall=True) if fields and len(fields) > 1: fields = fields[:2] else: fields = [ data['chart']['chartdata'].get('name1') or 'time', data['chart']['chartdata'].get('name2') or data['chart']['chartdata'].get('name') or 'value', ] fields = util.pluralize_field_names(fields) data.update({ 'stats': { 'fields': fields, 'heading': 'Statistics', 'cov': zip(fields, matrix.cov()), 'R': zip(fields, matrix.rho), }, }) data['chart']['chartdata'] = modified_chart_data data['chart']['chart_title'] = 'Time Series' return render_to_response('miner/stats.html', data)
python
def stats(request, date_offset=0, fields=None, title_prefix=None, model='WikiItem'): """ In addition to chart data in data['chart'], send statistics data to view in data['stats'] """ data = {} modified_chart_data = data['chart']['chartdata'] if 'y2' in data['chart']['chartdata']: matrix = db.Columns([modified_chart_data['y1'], modified_chart_data['y2']], ddof=0, tall=True) else: fields = ['date/time'] + fields matrix = db.Columns([modified_chart_data['x'], modified_chart_data['y']], ddof=0, tall=True) if fields and len(fields) > 1: fields = fields[:2] else: fields = [ data['chart']['chartdata'].get('name1') or 'time', data['chart']['chartdata'].get('name2') or data['chart']['chartdata'].get('name') or 'value', ] fields = util.pluralize_field_names(fields) data.update({ 'stats': { 'fields': fields, 'heading': 'Statistics', 'cov': zip(fields, matrix.cov()), 'R': zip(fields, matrix.rho), }, }) data['chart']['chartdata'] = modified_chart_data data['chart']['chart_title'] = 'Time Series' return render_to_response('miner/stats.html', data)
['def', 'stats', '(', 'request', ',', 'date_offset', '=', '0', ',', 'fields', '=', 'None', ',', 'title_prefix', '=', 'None', ',', 'model', '=', "'WikiItem'", ')', ':', 'data', '=', '{', '}', 'modified_chart_data', '=', 'data', '[', "'chart'", ']', '[', "'chartdata'", ']', 'if', "'y2'", 'in', 'data', '[', "'chart'", ']', '[', "'chartdata'", ']', ':', 'matrix', '=', 'db', '.', 'Columns', '(', '[', 'modified_chart_data', '[', "'y1'", ']', ',', 'modified_chart_data', '[', "'y2'", ']', ']', ',', 'ddof', '=', '0', ',', 'tall', '=', 'True', ')', 'else', ':', 'fields', '=', '[', "'date/time'", ']', '+', 'fields', 'matrix', '=', 'db', '.', 'Columns', '(', '[', 'modified_chart_data', '[', "'x'", ']', ',', 'modified_chart_data', '[', "'y'", ']', ']', ',', 'ddof', '=', '0', ',', 'tall', '=', 'True', ')', 'if', 'fields', 'and', 'len', '(', 'fields', ')', '>', '1', ':', 'fields', '=', 'fields', '[', ':', '2', ']', 'else', ':', 'fields', '=', '[', 'data', '[', "'chart'", ']', '[', "'chartdata'", ']', '.', 'get', '(', "'name1'", ')', 'or', "'time'", ',', 'data', '[', "'chart'", ']', '[', "'chartdata'", ']', '.', 'get', '(', "'name2'", ')', 'or', 'data', '[', "'chart'", ']', '[', "'chartdata'", ']', '.', 'get', '(', "'name'", ')', 'or', "'value'", ',', ']', 'fields', '=', 'util', '.', 'pluralize_field_names', '(', 'fields', ')', 'data', '.', 'update', '(', '{', "'stats'", ':', '{', "'fields'", ':', 'fields', ',', "'heading'", ':', "'Statistics'", ',', "'cov'", ':', 'zip', '(', 'fields', ',', 'matrix', '.', 'cov', '(', ')', ')', ',', "'R'", ':', 'zip', '(', 'fields', ',', 'matrix', '.', 'rho', ')', ',', '}', ',', '}', ')', 'data', '[', "'chart'", ']', '[', "'chartdata'", ']', '=', 'modified_chart_data', 'data', '[', "'chart'", ']', '[', "'chart_title'", ']', '=', "'Time Series'", 'return', 'render_to_response', '(', "'miner/stats.html'", ',', 'data', ')']
In addition to chart data in data['chart'], send statistics data to view in data['stats']
['In', 'addition', 'to', 'chart', 'data', 'in', 'data', '[', 'chart', ']', 'send', 'statistics', 'data', 'to', 'view', 'in', 'data', '[', 'stats', ']']
train
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/miner/views.py#L65-L95
3,909
paramiko/paramiko
paramiko/channel.py
Channel.set_environment_variable
def set_environment_variable(self, name, value): """ Set the value of an environment variable. .. warning:: The server may reject this request depending on its ``AcceptEnv`` setting; such rejections will fail silently (which is common client practice for this particular request type). Make sure you understand your server's configuration before using! :param str name: name of the environment variable :param str value: value of the environment variable :raises: `.SSHException` -- if the request was rejected or the channel was closed """ m = Message() m.add_byte(cMSG_CHANNEL_REQUEST) m.add_int(self.remote_chanid) m.add_string("env") m.add_boolean(False) m.add_string(name) m.add_string(value) self.transport._send_user_message(m)
python
def set_environment_variable(self, name, value): """ Set the value of an environment variable. .. warning:: The server may reject this request depending on its ``AcceptEnv`` setting; such rejections will fail silently (which is common client practice for this particular request type). Make sure you understand your server's configuration before using! :param str name: name of the environment variable :param str value: value of the environment variable :raises: `.SSHException` -- if the request was rejected or the channel was closed """ m = Message() m.add_byte(cMSG_CHANNEL_REQUEST) m.add_int(self.remote_chanid) m.add_string("env") m.add_boolean(False) m.add_string(name) m.add_string(value) self.transport._send_user_message(m)
['def', 'set_environment_variable', '(', 'self', ',', 'name', ',', 'value', ')', ':', 'm', '=', 'Message', '(', ')', 'm', '.', 'add_byte', '(', 'cMSG_CHANNEL_REQUEST', ')', 'm', '.', 'add_int', '(', 'self', '.', 'remote_chanid', ')', 'm', '.', 'add_string', '(', '"env"', ')', 'm', '.', 'add_boolean', '(', 'False', ')', 'm', '.', 'add_string', '(', 'name', ')', 'm', '.', 'add_string', '(', 'value', ')', 'self', '.', 'transport', '.', '_send_user_message', '(', 'm', ')']
Set the value of an environment variable. .. warning:: The server may reject this request depending on its ``AcceptEnv`` setting; such rejections will fail silently (which is common client practice for this particular request type). Make sure you understand your server's configuration before using! :param str name: name of the environment variable :param str value: value of the environment variable :raises: `.SSHException` -- if the request was rejected or the channel was closed
['Set', 'the', 'value', 'of', 'an', 'environment', 'variable', '.']
train
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/channel.py#L338-L362
3,910
estnltk/estnltk
estnltk/np_chunker.py
NounPhraseChunker._getPhrase
def _getPhrase( self, i, sentence, NPlabels ): ''' Fetches the full length phrase from the position i based on the existing NP phrase annotations (from NPlabels); Returns list of sentence tokens in the phrase, and indices of the phrase; ''' phrase = [] indices = [] if 0 <= i and i < len(sentence) and NPlabels[i] == 'B': phrase = [ sentence[i] ] indices = [ i ] j = i + 1 while ( j < len(sentence) ): if NPlabels[j] in ['B', '']: break else: phrase.append( sentence[j] ) indices.append( j ) j += 1 return phrase, indices
python
def _getPhrase( self, i, sentence, NPlabels ): ''' Fetches the full length phrase from the position i based on the existing NP phrase annotations (from NPlabels); Returns list of sentence tokens in the phrase, and indices of the phrase; ''' phrase = [] indices = [] if 0 <= i and i < len(sentence) and NPlabels[i] == 'B': phrase = [ sentence[i] ] indices = [ i ] j = i + 1 while ( j < len(sentence) ): if NPlabels[j] in ['B', '']: break else: phrase.append( sentence[j] ) indices.append( j ) j += 1 return phrase, indices
['def', '_getPhrase', '(', 'self', ',', 'i', ',', 'sentence', ',', 'NPlabels', ')', ':', 'phrase', '=', '[', ']', 'indices', '=', '[', ']', 'if', '0', '<=', 'i', 'and', 'i', '<', 'len', '(', 'sentence', ')', 'and', 'NPlabels', '[', 'i', ']', '==', "'B'", ':', 'phrase', '=', '[', 'sentence', '[', 'i', ']', ']', 'indices', '=', '[', 'i', ']', 'j', '=', 'i', '+', '1', 'while', '(', 'j', '<', 'len', '(', 'sentence', ')', ')', ':', 'if', 'NPlabels', '[', 'j', ']', 'in', '[', "'B'", ',', "''", ']', ':', 'break', 'else', ':', 'phrase', '.', 'append', '(', 'sentence', '[', 'j', ']', ')', 'indices', '.', 'append', '(', 'j', ')', 'j', '+=', '1', 'return', 'phrase', ',', 'indices']
Fetches the full length phrase from the position i based on the existing NP phrase annotations (from NPlabels); Returns list of sentence tokens in the phrase, and indices of the phrase;
['Fetches', 'the', 'full', 'length', 'phrase', 'from', 'the', 'position', 'i', 'based', 'on', 'the', 'existing', 'NP', 'phrase', 'annotations', '(', 'from', 'NPlabels', ')', ';', 'Returns', 'list', 'of', 'sentence', 'tokens', 'in', 'the', 'phrase', 'and', 'indices', 'of', 'the', 'phrase', ';']
train
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/np_chunker.py#L209-L229
3,911
resync/resync
resync/list_base_with_index.py
ListBaseWithIndex.as_xml_part
def as_xml_part(self, basename="/tmp/sitemap.xml", part_number=0): """Return a string of component sitemap number part_number. Used in the case of a large list that is split into component sitemaps. basename is used to create "index" links to the sitemapindex Q - what timestamp should be used? """ if (not self.requires_multifile()): raise ListBaseIndexError( "Request for component sitemap for list with only %d entries when max_sitemap_entries is set to %s" % (len(self), str( self.max_sitemap_entries))) start = part_number * self.max_sitemap_entries if (start > len(self)): raise ListBaseIndexError( "Request for component sitemap with part_number too high, would start at entry %d yet the list has only %d entries" % (start, len(self))) stop = start + self.max_sitemap_entries if (stop > len(self)): stop = len(self) part = ListBase(itertools.islice(self.resources, start, stop)) part.capability_name = self.capability_name part.default_capability() part.index = basename s = self.new_sitemap() return(s.resources_as_xml(part))
python
def as_xml_part(self, basename="/tmp/sitemap.xml", part_number=0): """Return a string of component sitemap number part_number. Used in the case of a large list that is split into component sitemaps. basename is used to create "index" links to the sitemapindex Q - what timestamp should be used? """ if (not self.requires_multifile()): raise ListBaseIndexError( "Request for component sitemap for list with only %d entries when max_sitemap_entries is set to %s" % (len(self), str( self.max_sitemap_entries))) start = part_number * self.max_sitemap_entries if (start > len(self)): raise ListBaseIndexError( "Request for component sitemap with part_number too high, would start at entry %d yet the list has only %d entries" % (start, len(self))) stop = start + self.max_sitemap_entries if (stop > len(self)): stop = len(self) part = ListBase(itertools.islice(self.resources, start, stop)) part.capability_name = self.capability_name part.default_capability() part.index = basename s = self.new_sitemap() return(s.resources_as_xml(part))
['def', 'as_xml_part', '(', 'self', ',', 'basename', '=', '"/tmp/sitemap.xml"', ',', 'part_number', '=', '0', ')', ':', 'if', '(', 'not', 'self', '.', 'requires_multifile', '(', ')', ')', ':', 'raise', 'ListBaseIndexError', '(', '"Request for component sitemap for list with only %d entries when max_sitemap_entries is set to %s"', '%', '(', 'len', '(', 'self', ')', ',', 'str', '(', 'self', '.', 'max_sitemap_entries', ')', ')', ')', 'start', '=', 'part_number', '*', 'self', '.', 'max_sitemap_entries', 'if', '(', 'start', '>', 'len', '(', 'self', ')', ')', ':', 'raise', 'ListBaseIndexError', '(', '"Request for component sitemap with part_number too high, would start at entry %d yet the list has only %d entries"', '%', '(', 'start', ',', 'len', '(', 'self', ')', ')', ')', 'stop', '=', 'start', '+', 'self', '.', 'max_sitemap_entries', 'if', '(', 'stop', '>', 'len', '(', 'self', ')', ')', ':', 'stop', '=', 'len', '(', 'self', ')', 'part', '=', 'ListBase', '(', 'itertools', '.', 'islice', '(', 'self', '.', 'resources', ',', 'start', ',', 'stop', ')', ')', 'part', '.', 'capability_name', '=', 'self', '.', 'capability_name', 'part', '.', 'default_capability', '(', ')', 'part', '.', 'index', '=', 'basename', 's', '=', 'self', '.', 'new_sitemap', '(', ')', 'return', '(', 's', '.', 'resources_as_xml', '(', 'part', ')', ')']
Return a string of component sitemap number part_number. Used in the case of a large list that is split into component sitemaps. basename is used to create "index" links to the sitemapindex Q - what timestamp should be used?
['Return', 'a', 'string', 'of', 'component', 'sitemap', 'number', 'part_number', '.']
train
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/list_base_with_index.py#L242-L270
3,912
mozilla/treeherder
treeherder/seta/common.py
job_priority_index
def job_priority_index(job_priorities): '''This structure helps with finding data from the job priorities table''' jp_index = {} # Creating this data structure which reduces how many times we iterate through the DB rows for jp in job_priorities: key = jp.unique_identifier() # This is guaranteed by a unique composite index for these 3 fields in models.py if key in jp_index: msg = '"{}" should be a unique job priority and that is unexpected.'.format(key) raise DuplicateKeyError(msg) # (testtype, buildtype, platform) jp_index[key] = {'pk': jp.id, 'build_system_type': jp.buildsystem} return jp_index
python
def job_priority_index(job_priorities): '''This structure helps with finding data from the job priorities table''' jp_index = {} # Creating this data structure which reduces how many times we iterate through the DB rows for jp in job_priorities: key = jp.unique_identifier() # This is guaranteed by a unique composite index for these 3 fields in models.py if key in jp_index: msg = '"{}" should be a unique job priority and that is unexpected.'.format(key) raise DuplicateKeyError(msg) # (testtype, buildtype, platform) jp_index[key] = {'pk': jp.id, 'build_system_type': jp.buildsystem} return jp_index
['def', 'job_priority_index', '(', 'job_priorities', ')', ':', 'jp_index', '=', '{', '}', '# Creating this data structure which reduces how many times we iterate through the DB rows', 'for', 'jp', 'in', 'job_priorities', ':', 'key', '=', 'jp', '.', 'unique_identifier', '(', ')', '# This is guaranteed by a unique composite index for these 3 fields in models.py', 'if', 'key', 'in', 'jp_index', ':', 'msg', '=', '\'"{}" should be a unique job priority and that is unexpected.\'', '.', 'format', '(', 'key', ')', 'raise', 'DuplicateKeyError', '(', 'msg', ')', '# (testtype, buildtype, platform)', 'jp_index', '[', 'key', ']', '=', '{', "'pk'", ':', 'jp', '.', 'id', ',', "'build_system_type'", ':', 'jp', '.', 'buildsystem', '}', 'return', 'jp_index']
This structure helps with finding data from the job priorities table
['This', 'structure', 'helps', 'with', 'finding', 'data', 'from', 'the', 'job', 'priorities', 'table']
train
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/common.py#L10-L25
3,913
spyder-ide/spyder-kernels
spyder_kernels/console/kernel.py
SpyderKernel._pdb_frame
def _pdb_frame(self): """Return current Pdb frame if there is any""" if self._pdb_obj is not None and self._pdb_obj.curframe is not None: return self._pdb_obj.curframe
python
def _pdb_frame(self): """Return current Pdb frame if there is any""" if self._pdb_obj is not None and self._pdb_obj.curframe is not None: return self._pdb_obj.curframe
['def', '_pdb_frame', '(', 'self', ')', ':', 'if', 'self', '.', '_pdb_obj', 'is', 'not', 'None', 'and', 'self', '.', '_pdb_obj', '.', 'curframe', 'is', 'not', 'None', ':', 'return', 'self', '.', '_pdb_obj', '.', 'curframe']
Return current Pdb frame if there is any
['Return', 'current', 'Pdb', 'frame', 'if', 'there', 'is', 'any']
train
https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L47-L50
3,914
openid/JWTConnect-Python-OidcService
src/oidcservice/oidc/pkce.py
add_code_challenge
def add_code_challenge(request_args, service, **kwargs): """ PKCE RFC 7636 support To be added as a post_construct method to an :py:class:`oidcservice.oidc.service.Authorization` instance :param service: The service that uses this function :param request_args: Set of request arguments :param kwargs: Extra set of keyword arguments :return: Updated set of request arguments """ try: cv_len = service.service_context.config['code_challenge']['length'] except KeyError: cv_len = 64 # Use default # code_verifier: string of length cv_len code_verifier = unreserved(cv_len) _cv = code_verifier.encode() try: _method = service.service_context.config['code_challenge']['method'] except KeyError: _method = 'S256' try: # Pick hash method _hash_method = CC_METHOD[_method] # Use it on the code_verifier _hv = _hash_method(_cv).digest() # base64 encode the hash value code_challenge = b64e(_hv).decode('ascii') except KeyError: raise Unsupported( 'PKCE Transformation method:{}'.format(_method)) _item = Message(code_verifier=code_verifier,code_challenge_method=_method) service.store_item(_item, 'pkce', request_args['state']) request_args.update({"code_challenge": code_challenge, "code_challenge_method": _method}) return request_args
python
def add_code_challenge(request_args, service, **kwargs): """ PKCE RFC 7636 support To be added as a post_construct method to an :py:class:`oidcservice.oidc.service.Authorization` instance :param service: The service that uses this function :param request_args: Set of request arguments :param kwargs: Extra set of keyword arguments :return: Updated set of request arguments """ try: cv_len = service.service_context.config['code_challenge']['length'] except KeyError: cv_len = 64 # Use default # code_verifier: string of length cv_len code_verifier = unreserved(cv_len) _cv = code_verifier.encode() try: _method = service.service_context.config['code_challenge']['method'] except KeyError: _method = 'S256' try: # Pick hash method _hash_method = CC_METHOD[_method] # Use it on the code_verifier _hv = _hash_method(_cv).digest() # base64 encode the hash value code_challenge = b64e(_hv).decode('ascii') except KeyError: raise Unsupported( 'PKCE Transformation method:{}'.format(_method)) _item = Message(code_verifier=code_verifier,code_challenge_method=_method) service.store_item(_item, 'pkce', request_args['state']) request_args.update({"code_challenge": code_challenge, "code_challenge_method": _method}) return request_args
['def', 'add_code_challenge', '(', 'request_args', ',', 'service', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'cv_len', '=', 'service', '.', 'service_context', '.', 'config', '[', "'code_challenge'", ']', '[', "'length'", ']', 'except', 'KeyError', ':', 'cv_len', '=', '64', '# Use default', '# code_verifier: string of length cv_len', 'code_verifier', '=', 'unreserved', '(', 'cv_len', ')', '_cv', '=', 'code_verifier', '.', 'encode', '(', ')', 'try', ':', '_method', '=', 'service', '.', 'service_context', '.', 'config', '[', "'code_challenge'", ']', '[', "'method'", ']', 'except', 'KeyError', ':', '_method', '=', "'S256'", 'try', ':', '# Pick hash method', '_hash_method', '=', 'CC_METHOD', '[', '_method', ']', '# Use it on the code_verifier', '_hv', '=', '_hash_method', '(', '_cv', ')', '.', 'digest', '(', ')', '# base64 encode the hash value', 'code_challenge', '=', 'b64e', '(', '_hv', ')', '.', 'decode', '(', "'ascii'", ')', 'except', 'KeyError', ':', 'raise', 'Unsupported', '(', "'PKCE Transformation method:{}'", '.', 'format', '(', '_method', ')', ')', '_item', '=', 'Message', '(', 'code_verifier', '=', 'code_verifier', ',', 'code_challenge_method', '=', '_method', ')', 'service', '.', 'store_item', '(', '_item', ',', "'pkce'", ',', 'request_args', '[', "'state'", ']', ')', 'request_args', '.', 'update', '(', '{', '"code_challenge"', ':', 'code_challenge', ',', '"code_challenge_method"', ':', '_method', '}', ')', 'return', 'request_args']
PKCE RFC 7636 support To be added as a post_construct method to an :py:class:`oidcservice.oidc.service.Authorization` instance :param service: The service that uses this function :param request_args: Set of request arguments :param kwargs: Extra set of keyword arguments :return: Updated set of request arguments
['PKCE', 'RFC', '7636', 'support', 'To', 'be', 'added', 'as', 'a', 'post_construct', 'method', 'to', 'an', ':', 'py', ':', 'class', ':', 'oidcservice', '.', 'oidc', '.', 'service', '.', 'Authorization', 'instance']
train
https://github.com/openid/JWTConnect-Python-OidcService/blob/759ab7adef30a7e3b9d75475e2971433b9613788/src/oidcservice/oidc/pkce.py#L9-L50
3,915
google/apitools
apitools/base/py/base_api.py
BaseApiClient.ProcessHttpRequest
def ProcessHttpRequest(self, http_request): """Hook for pre-processing of http requests.""" http_request.headers.update(self.additional_http_headers) if self.log_request: logging.info('Making http %s to %s', http_request.http_method, http_request.url) logging.info('Headers: %s', pprint.pformat(http_request.headers)) if http_request.body: # TODO(craigcitro): Make this safe to print in the case of # non-printable body characters. logging.info('Body:\n%s', http_request.loggable_body or http_request.body) else: logging.info('Body: (none)') return http_request
python
def ProcessHttpRequest(self, http_request): """Hook for pre-processing of http requests.""" http_request.headers.update(self.additional_http_headers) if self.log_request: logging.info('Making http %s to %s', http_request.http_method, http_request.url) logging.info('Headers: %s', pprint.pformat(http_request.headers)) if http_request.body: # TODO(craigcitro): Make this safe to print in the case of # non-printable body characters. logging.info('Body:\n%s', http_request.loggable_body or http_request.body) else: logging.info('Body: (none)') return http_request
['def', 'ProcessHttpRequest', '(', 'self', ',', 'http_request', ')', ':', 'http_request', '.', 'headers', '.', 'update', '(', 'self', '.', 'additional_http_headers', ')', 'if', 'self', '.', 'log_request', ':', 'logging', '.', 'info', '(', "'Making http %s to %s'", ',', 'http_request', '.', 'http_method', ',', 'http_request', '.', 'url', ')', 'logging', '.', 'info', '(', "'Headers: %s'", ',', 'pprint', '.', 'pformat', '(', 'http_request', '.', 'headers', ')', ')', 'if', 'http_request', '.', 'body', ':', '# TODO(craigcitro): Make this safe to print in the case of', '# non-printable body characters.', 'logging', '.', 'info', '(', "'Body:\\n%s'", ',', 'http_request', '.', 'loggable_body', 'or', 'http_request', '.', 'body', ')', 'else', ':', 'logging', '.', 'info', '(', "'Body: (none)'", ')', 'return', 'http_request']
Hook for pre-processing of http requests.
['Hook', 'for', 'pre', '-', 'processing', 'of', 'http', 'requests', '.']
train
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/base_api.py#L416-L430
3,916
pydata/xarray
xarray/convert.py
_iris_cell_methods_to_str
def _iris_cell_methods_to_str(cell_methods_obj): """ Converts a Iris cell methods into a string """ cell_methods = [] for cell_method in cell_methods_obj: names = ''.join(['{}: '.format(n) for n in cell_method.coord_names]) intervals = ' '.join(['interval: {}'.format(interval) for interval in cell_method.intervals]) comments = ' '.join(['comment: {}'.format(comment) for comment in cell_method.comments]) extra = ' '.join([intervals, comments]).strip() if extra: extra = ' ({})'.format(extra) cell_methods.append(names + cell_method.method + extra) return ' '.join(cell_methods)
python
def _iris_cell_methods_to_str(cell_methods_obj): """ Converts a Iris cell methods into a string """ cell_methods = [] for cell_method in cell_methods_obj: names = ''.join(['{}: '.format(n) for n in cell_method.coord_names]) intervals = ' '.join(['interval: {}'.format(interval) for interval in cell_method.intervals]) comments = ' '.join(['comment: {}'.format(comment) for comment in cell_method.comments]) extra = ' '.join([intervals, comments]).strip() if extra: extra = ' ({})'.format(extra) cell_methods.append(names + cell_method.method + extra) return ' '.join(cell_methods)
['def', '_iris_cell_methods_to_str', '(', 'cell_methods_obj', ')', ':', 'cell_methods', '=', '[', ']', 'for', 'cell_method', 'in', 'cell_methods_obj', ':', 'names', '=', "''", '.', 'join', '(', '[', "'{}: '", '.', 'format', '(', 'n', ')', 'for', 'n', 'in', 'cell_method', '.', 'coord_names', ']', ')', 'intervals', '=', "' '", '.', 'join', '(', '[', "'interval: {}'", '.', 'format', '(', 'interval', ')', 'for', 'interval', 'in', 'cell_method', '.', 'intervals', ']', ')', 'comments', '=', "' '", '.', 'join', '(', '[', "'comment: {}'", '.', 'format', '(', 'comment', ')', 'for', 'comment', 'in', 'cell_method', '.', 'comments', ']', ')', 'extra', '=', "' '", '.', 'join', '(', '[', 'intervals', ',', 'comments', ']', ')', '.', 'strip', '(', ')', 'if', 'extra', ':', 'extra', '=', "' ({})'", '.', 'format', '(', 'extra', ')', 'cell_methods', '.', 'append', '(', 'names', '+', 'cell_method', '.', 'method', '+', 'extra', ')', 'return', "' '", '.', 'join', '(', 'cell_methods', ')']
Converts a Iris cell methods into a string
['Converts', 'a', 'Iris', 'cell', 'methods', 'into', 'a', 'string']
train
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/convert.py#L194-L208
3,917
pypa/setuptools
setuptools/dist.py
Distribution.iter_distribution_names
def iter_distribution_names(self): """Yield all packages, modules, and extension names in distribution""" for pkg in self.packages or (): yield pkg for module in self.py_modules or (): yield module for ext in self.ext_modules or (): if isinstance(ext, tuple): name, buildinfo = ext else: name = ext.name if name.endswith('module'): name = name[:-6] yield name
python
def iter_distribution_names(self): """Yield all packages, modules, and extension names in distribution""" for pkg in self.packages or (): yield pkg for module in self.py_modules or (): yield module for ext in self.ext_modules or (): if isinstance(ext, tuple): name, buildinfo = ext else: name = ext.name if name.endswith('module'): name = name[:-6] yield name
['def', 'iter_distribution_names', '(', 'self', ')', ':', 'for', 'pkg', 'in', 'self', '.', 'packages', 'or', '(', ')', ':', 'yield', 'pkg', 'for', 'module', 'in', 'self', '.', 'py_modules', 'or', '(', ')', ':', 'yield', 'module', 'for', 'ext', 'in', 'self', '.', 'ext_modules', 'or', '(', ')', ':', 'if', 'isinstance', '(', 'ext', ',', 'tuple', ')', ':', 'name', ',', 'buildinfo', '=', 'ext', 'else', ':', 'name', '=', 'ext', '.', 'name', 'if', 'name', '.', 'endswith', '(', "'module'", ')', ':', 'name', '=', 'name', '[', ':', '-', '6', ']', 'yield', 'name']
Yield all packages, modules, and extension names in distribution
['Yield', 'all', 'packages', 'modules', 'and', 'extension', 'names', 'in', 'distribution']
train
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/dist.py#L1069-L1085
3,918
linode/linode_api4-python
linode_api4/linode_client.py
LinodeClient.domain_create
def domain_create(self, domain, master=True, **kwargs): """ Registers a new Domain on the acting user's account. Make sure to point your registrar to Linode's nameservers so that Linode's DNS manager will correctly serve your domain. :param domain: The domain to register to Linode's DNS manager. :type domain: str :param master: Whether this is a master (defaults to true) :type master: bool :returns: The new Domain object. :rtype: Domain """ params = { 'domain': domain, 'type': 'master' if master else 'slave', } params.update(kwargs) result = self.post('/domains', data=params) if not 'id' in result: raise UnexpectedResponseError('Unexpected response when creating Domain!', json=result) d = Domain(self, result['id'], result) return d
python
def domain_create(self, domain, master=True, **kwargs): """ Registers a new Domain on the acting user's account. Make sure to point your registrar to Linode's nameservers so that Linode's DNS manager will correctly serve your domain. :param domain: The domain to register to Linode's DNS manager. :type domain: str :param master: Whether this is a master (defaults to true) :type master: bool :returns: The new Domain object. :rtype: Domain """ params = { 'domain': domain, 'type': 'master' if master else 'slave', } params.update(kwargs) result = self.post('/domains', data=params) if not 'id' in result: raise UnexpectedResponseError('Unexpected response when creating Domain!', json=result) d = Domain(self, result['id'], result) return d
['def', 'domain_create', '(', 'self', ',', 'domain', ',', 'master', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'params', '=', '{', "'domain'", ':', 'domain', ',', "'type'", ':', "'master'", 'if', 'master', 'else', "'slave'", ',', '}', 'params', '.', 'update', '(', 'kwargs', ')', 'result', '=', 'self', '.', 'post', '(', "'/domains'", ',', 'data', '=', 'params', ')', 'if', 'not', "'id'", 'in', 'result', ':', 'raise', 'UnexpectedResponseError', '(', "'Unexpected response when creating Domain!'", ',', 'json', '=', 'result', ')', 'd', '=', 'Domain', '(', 'self', ',', 'result', '[', "'id'", ']', ',', 'result', ')', 'return', 'd']
Registers a new Domain on the acting user's account. Make sure to point your registrar to Linode's nameservers so that Linode's DNS manager will correctly serve your domain. :param domain: The domain to register to Linode's DNS manager. :type domain: str :param master: Whether this is a master (defaults to true) :type master: bool :returns: The new Domain object. :rtype: Domain
['Registers', 'a', 'new', 'Domain', 'on', 'the', 'acting', 'user', 's', 'account', '.', 'Make', 'sure', 'to', 'point', 'your', 'registrar', 'to', 'Linode', 's', 'nameservers', 'so', 'that', 'Linode', 's', 'DNS', 'manager', 'will', 'correctly', 'serve', 'your', 'domain', '.']
train
https://github.com/linode/linode_api4-python/blob/1dd7318d2aed014c746d48c7957464c57af883ca/linode_api4/linode_client.py#L1028-L1054
3,919
MisterY/gnucash-portfolio
gnucash_portfolio/securitiesaggregate.py
SecurityAggregate.get_prices
def get_prices(self) -> List[PriceModel]: """ Returns all available prices for security """ # return self.security.prices.order_by(Price.date) from pricedb.dal import Price pricedb = PriceDbApplication() repo = pricedb.get_price_repository() query = (repo.query(Price) .filter(Price.namespace == self.security.namespace) .filter(Price.symbol == self.security.mnemonic) .orderby_desc(Price.date) ) return query.all()
python
def get_prices(self) -> List[PriceModel]: """ Returns all available prices for security """ # return self.security.prices.order_by(Price.date) from pricedb.dal import Price pricedb = PriceDbApplication() repo = pricedb.get_price_repository() query = (repo.query(Price) .filter(Price.namespace == self.security.namespace) .filter(Price.symbol == self.security.mnemonic) .orderby_desc(Price.date) ) return query.all()
['def', 'get_prices', '(', 'self', ')', '->', 'List', '[', 'PriceModel', ']', ':', '# return self.security.prices.order_by(Price.date)', 'from', 'pricedb', '.', 'dal', 'import', 'Price', 'pricedb', '=', 'PriceDbApplication', '(', ')', 'repo', '=', 'pricedb', '.', 'get_price_repository', '(', ')', 'query', '=', '(', 'repo', '.', 'query', '(', 'Price', ')', '.', 'filter', '(', 'Price', '.', 'namespace', '==', 'self', '.', 'security', '.', 'namespace', ')', '.', 'filter', '(', 'Price', '.', 'symbol', '==', 'self', '.', 'security', '.', 'mnemonic', ')', '.', 'orderby_desc', '(', 'Price', '.', 'date', ')', ')', 'return', 'query', '.', 'all', '(', ')']
Returns all available prices for security
['Returns', 'all', 'available', 'prices', 'for', 'security']
train
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L236-L248
3,920
saltstack/salt
salt/modules/oracle.py
_parse_oratab
def _parse_oratab(sid): ''' Return ORACLE_HOME for a given SID found in oratab Note: only works with Unix-like minions ''' if __grains__.get('kernel') in ('Linux', 'AIX', 'FreeBSD', 'OpenBSD', 'NetBSD'): ORATAB = '/etc/oratab' elif __grains__.get('kernel') in 'SunOS': ORATAB = '/var/opt/oracle/oratab' else: # Windows has no oratab file raise CommandExecutionError( 'No uri defined for {0} and oratab not available in this OS'.format(sid)) with fopen(ORATAB, 'r') as f: while True: line = f.readline() if not line: break if line.startswith('#'): continue if sid in line.split(':')[0]: return line.split(':')[1] return None
python
def _parse_oratab(sid): ''' Return ORACLE_HOME for a given SID found in oratab Note: only works with Unix-like minions ''' if __grains__.get('kernel') in ('Linux', 'AIX', 'FreeBSD', 'OpenBSD', 'NetBSD'): ORATAB = '/etc/oratab' elif __grains__.get('kernel') in 'SunOS': ORATAB = '/var/opt/oracle/oratab' else: # Windows has no oratab file raise CommandExecutionError( 'No uri defined for {0} and oratab not available in this OS'.format(sid)) with fopen(ORATAB, 'r') as f: while True: line = f.readline() if not line: break if line.startswith('#'): continue if sid in line.split(':')[0]: return line.split(':')[1] return None
['def', '_parse_oratab', '(', 'sid', ')', ':', 'if', '__grains__', '.', 'get', '(', "'kernel'", ')', 'in', '(', "'Linux'", ',', "'AIX'", ',', "'FreeBSD'", ',', "'OpenBSD'", ',', "'NetBSD'", ')', ':', 'ORATAB', '=', "'/etc/oratab'", 'elif', '__grains__', '.', 'get', '(', "'kernel'", ')', 'in', "'SunOS'", ':', 'ORATAB', '=', "'/var/opt/oracle/oratab'", 'else', ':', '# Windows has no oratab file', 'raise', 'CommandExecutionError', '(', "'No uri defined for {0} and oratab not available in this OS'", '.', 'format', '(', 'sid', ')', ')', 'with', 'fopen', '(', 'ORATAB', ',', "'r'", ')', 'as', 'f', ':', 'while', 'True', ':', 'line', '=', 'f', '.', 'readline', '(', ')', 'if', 'not', 'line', ':', 'break', 'if', 'line', '.', 'startswith', '(', "'#'", ')', ':', 'continue', 'if', 'sid', 'in', 'line', '.', 'split', '(', "':'", ')', '[', '0', ']', ':', 'return', 'line', '.', 'split', '(', "':'", ')', '[', '1', ']', 'return', 'None']
Return ORACLE_HOME for a given SID found in oratab Note: only works with Unix-like minions
['Return', 'ORACLE_HOME', 'for', 'a', 'given', 'SID', 'found', 'in', 'oratab']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/oracle.py#L143-L166
3,921
robotools/fontMath
Lib/fontMath/mathGlyph.py
MathGlyph.drawPoints
def drawPoints(self, pointPen, filterRedundantPoints=False): """draw self using pointPen""" if filterRedundantPoints: pointPen = FilterRedundantPointPen(pointPen) for contour in self.contours: pointPen.beginPath(identifier=contour["identifier"]) for segmentType, pt, smooth, name, identifier in contour["points"]: pointPen.addPoint(pt=pt, segmentType=segmentType, smooth=smooth, name=name, identifier=identifier) pointPen.endPath() for component in self.components: pointPen.addComponent(component["baseGlyph"], component["transformation"], identifier=component["identifier"])
python
def drawPoints(self, pointPen, filterRedundantPoints=False): """draw self using pointPen""" if filterRedundantPoints: pointPen = FilterRedundantPointPen(pointPen) for contour in self.contours: pointPen.beginPath(identifier=contour["identifier"]) for segmentType, pt, smooth, name, identifier in contour["points"]: pointPen.addPoint(pt=pt, segmentType=segmentType, smooth=smooth, name=name, identifier=identifier) pointPen.endPath() for component in self.components: pointPen.addComponent(component["baseGlyph"], component["transformation"], identifier=component["identifier"])
['def', 'drawPoints', '(', 'self', ',', 'pointPen', ',', 'filterRedundantPoints', '=', 'False', ')', ':', 'if', 'filterRedundantPoints', ':', 'pointPen', '=', 'FilterRedundantPointPen', '(', 'pointPen', ')', 'for', 'contour', 'in', 'self', '.', 'contours', ':', 'pointPen', '.', 'beginPath', '(', 'identifier', '=', 'contour', '[', '"identifier"', ']', ')', 'for', 'segmentType', ',', 'pt', ',', 'smooth', ',', 'name', ',', 'identifier', 'in', 'contour', '[', '"points"', ']', ':', 'pointPen', '.', 'addPoint', '(', 'pt', '=', 'pt', ',', 'segmentType', '=', 'segmentType', ',', 'smooth', '=', 'smooth', ',', 'name', '=', 'name', ',', 'identifier', '=', 'identifier', ')', 'pointPen', '.', 'endPath', '(', ')', 'for', 'component', 'in', 'self', '.', 'components', ':', 'pointPen', '.', 'addComponent', '(', 'component', '[', '"baseGlyph"', ']', ',', 'component', '[', '"transformation"', ']', ',', 'identifier', '=', 'component', '[', '"identifier"', ']', ')']
draw self using pointPen
['draw', 'self', 'using', 'pointPen']
train
https://github.com/robotools/fontMath/blob/6abcb9d5a1ca19788fbde4418d7b5630c60990d8/Lib/fontMath/mathGlyph.py#L276-L286
3,922
dariusbakunas/rawdisk
rawdisk/util/rawstruct.py
RawStruct.get_field
def get_field(self, offset, length, format): """Returns unpacked Python struct array. Args: offset (int): offset to byte array within structure length (int): how many bytes to unpack format (str): Python struct format string for unpacking See Also: https://docs.python.org/2/library/struct.html#format-characters """ return struct.unpack(format, self.data[offset:offset + length])[0]
python
def get_field(self, offset, length, format): """Returns unpacked Python struct array. Args: offset (int): offset to byte array within structure length (int): how many bytes to unpack format (str): Python struct format string for unpacking See Also: https://docs.python.org/2/library/struct.html#format-characters """ return struct.unpack(format, self.data[offset:offset + length])[0]
['def', 'get_field', '(', 'self', ',', 'offset', ',', 'length', ',', 'format', ')', ':', 'return', 'struct', '.', 'unpack', '(', 'format', ',', 'self', '.', 'data', '[', 'offset', ':', 'offset', '+', 'length', ']', ')', '[', '0', ']']
Returns unpacked Python struct array. Args: offset (int): offset to byte array within structure length (int): how many bytes to unpack format (str): Python struct format string for unpacking See Also: https://docs.python.org/2/library/struct.html#format-characters
['Returns', 'unpacked', 'Python', 'struct', 'array', '.']
train
https://github.com/dariusbakunas/rawdisk/blob/1dc9d0b377fe5da3c406ccec4abc238c54167403/rawdisk/util/rawstruct.py#L92-L103
3,923
chrisspen/dtree
dtree.py
Tree.train
def train(self, record): """ Incrementally updates the tree with the given sample record. """ assert self.data.class_attribute_name in record, \ "The class attribute must be present in the record." record = record.copy() self.sample_count += 1 self.tree.train(record)
python
def train(self, record): """ Incrementally updates the tree with the given sample record. """ assert self.data.class_attribute_name in record, \ "The class attribute must be present in the record." record = record.copy() self.sample_count += 1 self.tree.train(record)
['def', 'train', '(', 'self', ',', 'record', ')', ':', 'assert', 'self', '.', 'data', '.', 'class_attribute_name', 'in', 'record', ',', '"The class attribute must be present in the record."', 'record', '=', 'record', '.', 'copy', '(', ')', 'self', '.', 'sample_count', '+=', '1', 'self', '.', 'tree', '.', 'train', '(', 'record', ')']
Incrementally updates the tree with the given sample record.
['Incrementally', 'updates', 'the', 'tree', 'with', 'the', 'given', 'sample', 'record', '.']
train
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L1415-L1423
3,924
disqus/overseer
overseer/templatetags/overseer_helpers.py
truncatechars
def truncatechars(value, arg): """ Truncates a string after a certain number of chars. Argument: Number of chars to truncate after. """ try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. if len(value) > length: return value[:length] + '...' return value
python
def truncatechars(value, arg): """ Truncates a string after a certain number of chars. Argument: Number of chars to truncate after. """ try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. if len(value) > length: return value[:length] + '...' return value
['def', 'truncatechars', '(', 'value', ',', 'arg', ')', ':', 'try', ':', 'length', '=', 'int', '(', 'arg', ')', 'except', 'ValueError', ':', '# Invalid literal for int().', 'return', 'value', '# Fail silently.', 'if', 'len', '(', 'value', ')', '>', 'length', ':', 'return', 'value', '[', ':', 'length', ']', '+', "'...'", 'return', 'value']
Truncates a string after a certain number of chars. Argument: Number of chars to truncate after.
['Truncates', 'a', 'string', 'after', 'a', 'certain', 'number', 'of', 'chars', '.']
train
https://github.com/disqus/overseer/blob/b37573aba33b20aa86f89eb0c7e6f4d9905bedef/overseer/templatetags/overseer_helpers.py#L32-L44
3,925
campbellr/smashrun-client
smashrun/client.py
Smashrun.refresh_token
def refresh_token(self, **kwargs): """Refresh the authentication token. :param str refresh_token: The refresh token to use. May be empty if retrieved with ``fetch_token``. """ if 'client_secret' not in kwargs: kwargs.update(client_secret=self.client_secret) if 'client_id' not in kwargs: kwargs.update(client_id=self.client_id) return self.session.refresh_token(token_url, **kwargs)
python
def refresh_token(self, **kwargs): """Refresh the authentication token. :param str refresh_token: The refresh token to use. May be empty if retrieved with ``fetch_token``. """ if 'client_secret' not in kwargs: kwargs.update(client_secret=self.client_secret) if 'client_id' not in kwargs: kwargs.update(client_id=self.client_id) return self.session.refresh_token(token_url, **kwargs)
['def', 'refresh_token', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'if', "'client_secret'", 'not', 'in', 'kwargs', ':', 'kwargs', '.', 'update', '(', 'client_secret', '=', 'self', '.', 'client_secret', ')', 'if', "'client_id'", 'not', 'in', 'kwargs', ':', 'kwargs', '.', 'update', '(', 'client_id', '=', 'self', '.', 'client_id', ')', 'return', 'self', '.', 'session', '.', 'refresh_token', '(', 'token_url', ',', '*', '*', 'kwargs', ')']
Refresh the authentication token. :param str refresh_token: The refresh token to use. May be empty if retrieved with ``fetch_token``.
['Refresh', 'the', 'authentication', 'token', '.']
train
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L53-L64
3,926
splunk/splunk-sdk-python
examples/analytics/bottle.py
Bottle.install
def install(self, plugin): ''' Add a plugin to the list of plugins and prepare it for beeing applied to all routes of this application. A plugin may be a simple decorator or an object that implements the :class:`Plugin` API. ''' if hasattr(plugin, 'setup'): plugin.setup(self) if not callable(plugin) and not hasattr(plugin, 'apply'): raise TypeError("Plugins must be callable or implement .apply()") self.plugins.append(plugin) self.reset() return plugin
python
def install(self, plugin): ''' Add a plugin to the list of plugins and prepare it for beeing applied to all routes of this application. A plugin may be a simple decorator or an object that implements the :class:`Plugin` API. ''' if hasattr(plugin, 'setup'): plugin.setup(self) if not callable(plugin) and not hasattr(plugin, 'apply'): raise TypeError("Plugins must be callable or implement .apply()") self.plugins.append(plugin) self.reset() return plugin
['def', 'install', '(', 'self', ',', 'plugin', ')', ':', 'if', 'hasattr', '(', 'plugin', ',', "'setup'", ')', ':', 'plugin', '.', 'setup', '(', 'self', ')', 'if', 'not', 'callable', '(', 'plugin', ')', 'and', 'not', 'hasattr', '(', 'plugin', ',', "'apply'", ')', ':', 'raise', 'TypeError', '(', '"Plugins must be callable or implement .apply()"', ')', 'self', '.', 'plugins', '.', 'append', '(', 'plugin', ')', 'self', '.', 'reset', '(', ')', 'return', 'plugin']
Add a plugin to the list of plugins and prepare it for beeing applied to all routes of this application. A plugin may be a simple decorator or an object that implements the :class:`Plugin` API.
['Add', 'a', 'plugin', 'to', 'the', 'list', 'of', 'plugins', 'and', 'prepare', 'it', 'for', 'beeing', 'applied', 'to', 'all', 'routes', 'of', 'this', 'application', '.', 'A', 'plugin', 'may', 'be', 'a', 'simple', 'decorator', 'or', 'an', 'object', 'that', 'implements', 'the', ':', 'class', ':', 'Plugin', 'API', '.']
train
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/examples/analytics/bottle.py#L455-L465
3,927
Phelimb/ga4gh-mongo
ga4ghmongo/schema/models/variants.py
is_snp
def is_snp(reference_bases, alternate_bases): """ Return whether or not the variant is a SNP """ if len(reference_bases) > 1: return False for alt in alternate_bases: if alt is None: return False if alt not in ['A', 'C', 'G', 'T', 'N', '*']: return False return True
python
def is_snp(reference_bases, alternate_bases): """ Return whether or not the variant is a SNP """ if len(reference_bases) > 1: return False for alt in alternate_bases: if alt is None: return False if alt not in ['A', 'C', 'G', 'T', 'N', '*']: return False return True
['def', 'is_snp', '(', 'reference_bases', ',', 'alternate_bases', ')', ':', 'if', 'len', '(', 'reference_bases', ')', '>', '1', ':', 'return', 'False', 'for', 'alt', 'in', 'alternate_bases', ':', 'if', 'alt', 'is', 'None', ':', 'return', 'False', 'if', 'alt', 'not', 'in', '[', "'A'", ',', "'C'", ',', "'G'", ',', "'T'", ',', "'N'", ',', "'*'", ']', ':', 'return', 'False', 'return', 'True']
Return whether or not the variant is a SNP
['Return', 'whether', 'or', 'not', 'the', 'variant', 'is', 'a', 'SNP']
train
https://github.com/Phelimb/ga4gh-mongo/blob/5f5a3e1922be0e0d13af1874fad6eed5418ee761/ga4ghmongo/schema/models/variants.py#L280-L289
3,928
saltstack/salt
salt/modules/mac_group.py
_list_gids
def _list_gids(): ''' Return a list of gids in use ''' output = __salt__['cmd.run']( ['dscacheutil', '-q', 'group'], output_loglevel='quiet', python_shell=False ) ret = set() for line in salt.utils.itertools.split(output, '\n'): if line.startswith('gid:'): ret.update(line.split()[1:]) return sorted(ret)
python
def _list_gids(): ''' Return a list of gids in use ''' output = __salt__['cmd.run']( ['dscacheutil', '-q', 'group'], output_loglevel='quiet', python_shell=False ) ret = set() for line in salt.utils.itertools.split(output, '\n'): if line.startswith('gid:'): ret.update(line.split()[1:]) return sorted(ret)
['def', '_list_gids', '(', ')', ':', 'output', '=', '__salt__', '[', "'cmd.run'", ']', '(', '[', "'dscacheutil'", ',', "'-q'", ',', "'group'", ']', ',', 'output_loglevel', '=', "'quiet'", ',', 'python_shell', '=', 'False', ')', 'ret', '=', 'set', '(', ')', 'for', 'line', 'in', 'salt', '.', 'utils', '.', 'itertools', '.', 'split', '(', 'output', ',', "'\\n'", ')', ':', 'if', 'line', '.', 'startswith', '(', "'gid:'", ')', ':', 'ret', '.', 'update', '(', 'line', '.', 'split', '(', ')', '[', '1', ':', ']', ')', 'return', 'sorted', '(', 'ret', ')']
Return a list of gids in use
['Return', 'a', 'list', 'of', 'gids', 'in', 'use']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_group.py#L75-L88
3,929
juju/charm-helpers
charmhelpers/coordinator.py
BaseCoordinator.granted
def granted(self, lock): '''Return True if a previously requested lock has been granted''' unit = hookenv.local_unit() ts = self.requests[unit].get(lock) if ts and self.grants.get(unit, {}).get(lock) == ts: return True return False
python
def granted(self, lock): '''Return True if a previously requested lock has been granted''' unit = hookenv.local_unit() ts = self.requests[unit].get(lock) if ts and self.grants.get(unit, {}).get(lock) == ts: return True return False
['def', 'granted', '(', 'self', ',', 'lock', ')', ':', 'unit', '=', 'hookenv', '.', 'local_unit', '(', ')', 'ts', '=', 'self', '.', 'requests', '[', 'unit', ']', '.', 'get', '(', 'lock', ')', 'if', 'ts', 'and', 'self', '.', 'grants', '.', 'get', '(', 'unit', ',', '{', '}', ')', '.', 'get', '(', 'lock', ')', '==', 'ts', ':', 'return', 'True', 'return', 'False']
Return True if a previously requested lock has been granted
['Return', 'True', 'if', 'a', 'previously', 'requested', 'lock', 'has', 'been', 'granted']
train
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/coordinator.py#L338-L344
3,930
SpikeInterface/spikeextractors
spikeextractors/tools.py
load_probe_file
def load_probe_file(recording, probe_file, channel_map=None, channel_groups=None): '''Loads channel information into recording extractor. If a .prb file is given, then 'location' and 'group' information for each channel is stored. If a .csv file is given, then it will only store 'location' Parameters ---------- recording: RecordingExtractor The recording extractor to channel information probe_file: str Path to probe file. Either .prb or .csv Returns --------- subRecordingExtractor ''' probe_file = Path(probe_file) if probe_file.suffix == '.prb': probe_dict = read_python(probe_file) if 'channel_groups' in probe_dict.keys(): ordered_channels = np.array([], dtype=int) groups = sorted(probe_dict['channel_groups'].keys()) for cgroup_id in groups: cgroup = probe_dict['channel_groups'][cgroup_id] for key_prop, prop_val in cgroup.items(): if key_prop == 'channels': ordered_channels = np.concatenate((ordered_channels, prop_val)) if list(ordered_channels) == recording.get_channel_ids(): subrecording = recording else: if not np.all([chan in recording.get_channel_ids() for chan in ordered_channels]): print('Some channel in PRB file are in original recording') present_ordered_channels = [chan for chan in ordered_channels if chan in recording.get_channel_ids()] subrecording = SubRecordingExtractor(recording, channel_ids=present_ordered_channels) for cgroup_id in groups: cgroup = probe_dict['channel_groups'][cgroup_id] if 'channels' not in cgroup.keys() and len(groups) > 1: raise Exception("If more than one 'channel_group' is in the probe file, the 'channels' field" "for each channel group is required") elif 'channels' not in cgroup.keys(): channels_in_group = subrecording.get_num_channels() else: channels_in_group = len(cgroup['channels']) for key_prop, prop_val in cgroup.items(): if key_prop == 'channels': for i_ch, prop in enumerate(prop_val): if prop in subrecording.get_channel_ids(): subrecording.set_channel_property(prop, 'group', int(cgroup_id)) elif key_prop == 'geometry' or key_prop == 'location': if isinstance(prop_val, dict): if len(prop_val.keys()) == channels_in_group: print('geometry in PRB have not the same length as channel in group') for (i_ch, prop) in prop_val.items(): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, 'location', prop) elif isinstance(prop_val, (list, np.ndarray)) and len(prop_val) == channels_in_group: for (i_ch, prop) in zip(subrecording.get_channel_ids(), prop_val): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, 'location', prop) else: if isinstance(prop_val, dict) and len(prop_val.keys()) == channels_in_group: for (i_ch, prop) in prop_val.items(): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, key_prop, prop) elif isinstance(prop_val, (list, np.ndarray)) and len(prop_val) == channels_in_group: for (i_ch, prop) in zip(subrecording.get_channel_ids(), prop_val): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, key_prop, prop) # create dummy locations if 'geometry' not in cgroup.keys() and 'location' not in cgroup.keys(): for i, chan in enumerate(subrecording.get_channel_ids()): subrecording.set_channel_property(chan, 'location', [i, 0]) else: raise AttributeError("'.prb' file should contain the 'channel_groups' field") elif probe_file.suffix == '.csv': if channel_map is not None: assert np.all([chan in channel_map for chan in recording.get_channel_ids()]), \ "all channel_ids in 'channel_map' must be in the original recording channel ids" subrecording = SubRecordingExtractor(recording, channel_ids=channel_map) else: subrecording = recording with probe_file.open() as csvfile: posreader = csv.reader(csvfile) row_count = 0 loaded_pos = [] for pos in (posreader): row_count += 1 loaded_pos.append(pos) assert len(subrecording.get_channel_ids()) == row_count, "The .csv file must contain as many " \ "rows as the number of channels in the recordings" for i_ch, pos in zip(subrecording.get_channel_ids(), loaded_pos): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, 'location', list(np.array(pos).astype(float))) if channel_groups is not None and len(channel_groups) == len(subrecording.get_channel_ids()): for i_ch, chg in zip(subrecording.get_channel_ids(), channel_groups): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, 'group', chg) else: raise NotImplementedError("Only .csv and .prb probe files can be loaded.") return subrecording
python
def load_probe_file(recording, probe_file, channel_map=None, channel_groups=None): '''Loads channel information into recording extractor. If a .prb file is given, then 'location' and 'group' information for each channel is stored. If a .csv file is given, then it will only store 'location' Parameters ---------- recording: RecordingExtractor The recording extractor to channel information probe_file: str Path to probe file. Either .prb or .csv Returns --------- subRecordingExtractor ''' probe_file = Path(probe_file) if probe_file.suffix == '.prb': probe_dict = read_python(probe_file) if 'channel_groups' in probe_dict.keys(): ordered_channels = np.array([], dtype=int) groups = sorted(probe_dict['channel_groups'].keys()) for cgroup_id in groups: cgroup = probe_dict['channel_groups'][cgroup_id] for key_prop, prop_val in cgroup.items(): if key_prop == 'channels': ordered_channels = np.concatenate((ordered_channels, prop_val)) if list(ordered_channels) == recording.get_channel_ids(): subrecording = recording else: if not np.all([chan in recording.get_channel_ids() for chan in ordered_channels]): print('Some channel in PRB file are in original recording') present_ordered_channels = [chan for chan in ordered_channels if chan in recording.get_channel_ids()] subrecording = SubRecordingExtractor(recording, channel_ids=present_ordered_channels) for cgroup_id in groups: cgroup = probe_dict['channel_groups'][cgroup_id] if 'channels' not in cgroup.keys() and len(groups) > 1: raise Exception("If more than one 'channel_group' is in the probe file, the 'channels' field" "for each channel group is required") elif 'channels' not in cgroup.keys(): channels_in_group = subrecording.get_num_channels() else: channels_in_group = len(cgroup['channels']) for key_prop, prop_val in cgroup.items(): if key_prop == 'channels': for i_ch, prop in enumerate(prop_val): if prop in subrecording.get_channel_ids(): subrecording.set_channel_property(prop, 'group', int(cgroup_id)) elif key_prop == 'geometry' or key_prop == 'location': if isinstance(prop_val, dict): if len(prop_val.keys()) == channels_in_group: print('geometry in PRB have not the same length as channel in group') for (i_ch, prop) in prop_val.items(): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, 'location', prop) elif isinstance(prop_val, (list, np.ndarray)) and len(prop_val) == channels_in_group: for (i_ch, prop) in zip(subrecording.get_channel_ids(), prop_val): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, 'location', prop) else: if isinstance(prop_val, dict) and len(prop_val.keys()) == channels_in_group: for (i_ch, prop) in prop_val.items(): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, key_prop, prop) elif isinstance(prop_val, (list, np.ndarray)) and len(prop_val) == channels_in_group: for (i_ch, prop) in zip(subrecording.get_channel_ids(), prop_val): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, key_prop, prop) # create dummy locations if 'geometry' not in cgroup.keys() and 'location' not in cgroup.keys(): for i, chan in enumerate(subrecording.get_channel_ids()): subrecording.set_channel_property(chan, 'location', [i, 0]) else: raise AttributeError("'.prb' file should contain the 'channel_groups' field") elif probe_file.suffix == '.csv': if channel_map is not None: assert np.all([chan in channel_map for chan in recording.get_channel_ids()]), \ "all channel_ids in 'channel_map' must be in the original recording channel ids" subrecording = SubRecordingExtractor(recording, channel_ids=channel_map) else: subrecording = recording with probe_file.open() as csvfile: posreader = csv.reader(csvfile) row_count = 0 loaded_pos = [] for pos in (posreader): row_count += 1 loaded_pos.append(pos) assert len(subrecording.get_channel_ids()) == row_count, "The .csv file must contain as many " \ "rows as the number of channels in the recordings" for i_ch, pos in zip(subrecording.get_channel_ids(), loaded_pos): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, 'location', list(np.array(pos).astype(float))) if channel_groups is not None and len(channel_groups) == len(subrecording.get_channel_ids()): for i_ch, chg in zip(subrecording.get_channel_ids(), channel_groups): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, 'group', chg) else: raise NotImplementedError("Only .csv and .prb probe files can be loaded.") return subrecording
['def', 'load_probe_file', '(', 'recording', ',', 'probe_file', ',', 'channel_map', '=', 'None', ',', 'channel_groups', '=', 'None', ')', ':', 'probe_file', '=', 'Path', '(', 'probe_file', ')', 'if', 'probe_file', '.', 'suffix', '==', "'.prb'", ':', 'probe_dict', '=', 'read_python', '(', 'probe_file', ')', 'if', "'channel_groups'", 'in', 'probe_dict', '.', 'keys', '(', ')', ':', 'ordered_channels', '=', 'np', '.', 'array', '(', '[', ']', ',', 'dtype', '=', 'int', ')', 'groups', '=', 'sorted', '(', 'probe_dict', '[', "'channel_groups'", ']', '.', 'keys', '(', ')', ')', 'for', 'cgroup_id', 'in', 'groups', ':', 'cgroup', '=', 'probe_dict', '[', "'channel_groups'", ']', '[', 'cgroup_id', ']', 'for', 'key_prop', ',', 'prop_val', 'in', 'cgroup', '.', 'items', '(', ')', ':', 'if', 'key_prop', '==', "'channels'", ':', 'ordered_channels', '=', 'np', '.', 'concatenate', '(', '(', 'ordered_channels', ',', 'prop_val', ')', ')', 'if', 'list', '(', 'ordered_channels', ')', '==', 'recording', '.', 'get_channel_ids', '(', ')', ':', 'subrecording', '=', 'recording', 'else', ':', 'if', 'not', 'np', '.', 'all', '(', '[', 'chan', 'in', 'recording', '.', 'get_channel_ids', '(', ')', 'for', 'chan', 'in', 'ordered_channels', ']', ')', ':', 'print', '(', "'Some channel in PRB file are in original recording'", ')', 'present_ordered_channels', '=', '[', 'chan', 'for', 'chan', 'in', 'ordered_channels', 'if', 'chan', 'in', 'recording', '.', 'get_channel_ids', '(', ')', ']', 'subrecording', '=', 'SubRecordingExtractor', '(', 'recording', ',', 'channel_ids', '=', 'present_ordered_channels', ')', 'for', 'cgroup_id', 'in', 'groups', ':', 'cgroup', '=', 'probe_dict', '[', "'channel_groups'", ']', '[', 'cgroup_id', ']', 'if', "'channels'", 'not', 'in', 'cgroup', '.', 'keys', '(', ')', 'and', 'len', '(', 'groups', ')', '>', '1', ':', 'raise', 'Exception', '(', '"If more than one \'channel_group\' is in the probe file, the \'channels\' field"', '"for each channel group is required"', ')', 'elif', "'channels'", 'not', 'in', 'cgroup', '.', 'keys', '(', ')', ':', 'channels_in_group', '=', 'subrecording', '.', 'get_num_channels', '(', ')', 'else', ':', 'channels_in_group', '=', 'len', '(', 'cgroup', '[', "'channels'", ']', ')', 'for', 'key_prop', ',', 'prop_val', 'in', 'cgroup', '.', 'items', '(', ')', ':', 'if', 'key_prop', '==', "'channels'", ':', 'for', 'i_ch', ',', 'prop', 'in', 'enumerate', '(', 'prop_val', ')', ':', 'if', 'prop', 'in', 'subrecording', '.', 'get_channel_ids', '(', ')', ':', 'subrecording', '.', 'set_channel_property', '(', 'prop', ',', "'group'", ',', 'int', '(', 'cgroup_id', ')', ')', 'elif', 'key_prop', '==', "'geometry'", 'or', 'key_prop', '==', "'location'", ':', 'if', 'isinstance', '(', 'prop_val', ',', 'dict', ')', ':', 'if', 'len', '(', 'prop_val', '.', 'keys', '(', ')', ')', '==', 'channels_in_group', ':', 'print', '(', "'geometry in PRB have not the same length as channel in group'", ')', 'for', '(', 'i_ch', ',', 'prop', ')', 'in', 'prop_val', '.', 'items', '(', ')', ':', 'if', 'i_ch', 'in', 'subrecording', '.', 'get_channel_ids', '(', ')', ':', 'subrecording', '.', 'set_channel_property', '(', 'i_ch', ',', "'location'", ',', 'prop', ')', 'elif', 'isinstance', '(', 'prop_val', ',', '(', 'list', ',', 'np', '.', 'ndarray', ')', ')', 'and', 'len', '(', 'prop_val', ')', '==', 'channels_in_group', ':', 'for', '(', 'i_ch', ',', 'prop', ')', 'in', 'zip', '(', 'subrecording', '.', 'get_channel_ids', '(', ')', ',', 'prop_val', ')', ':', 'if', 'i_ch', 'in', 'subrecording', '.', 'get_channel_ids', '(', ')', ':', 'subrecording', '.', 'set_channel_property', '(', 'i_ch', ',', "'location'", ',', 'prop', ')', 'else', ':', 'if', 'isinstance', '(', 'prop_val', ',', 'dict', ')', 'and', 'len', '(', 'prop_val', '.', 'keys', '(', ')', ')', '==', 'channels_in_group', ':', 'for', '(', 'i_ch', ',', 'prop', ')', 'in', 'prop_val', '.', 'items', '(', ')', ':', 'if', 'i_ch', 'in', 'subrecording', '.', 'get_channel_ids', '(', ')', ':', 'subrecording', '.', 'set_channel_property', '(', 'i_ch', ',', 'key_prop', ',', 'prop', ')', 'elif', 'isinstance', '(', 'prop_val', ',', '(', 'list', ',', 'np', '.', 'ndarray', ')', ')', 'and', 'len', '(', 'prop_val', ')', '==', 'channels_in_group', ':', 'for', '(', 'i_ch', ',', 'prop', ')', 'in', 'zip', '(', 'subrecording', '.', 'get_channel_ids', '(', ')', ',', 'prop_val', ')', ':', 'if', 'i_ch', 'in', 'subrecording', '.', 'get_channel_ids', '(', ')', ':', 'subrecording', '.', 'set_channel_property', '(', 'i_ch', ',', 'key_prop', ',', 'prop', ')', '# create dummy locations', 'if', "'geometry'", 'not', 'in', 'cgroup', '.', 'keys', '(', ')', 'and', "'location'", 'not', 'in', 'cgroup', '.', 'keys', '(', ')', ':', 'for', 'i', ',', 'chan', 'in', 'enumerate', '(', 'subrecording', '.', 'get_channel_ids', '(', ')', ')', ':', 'subrecording', '.', 'set_channel_property', '(', 'chan', ',', "'location'", ',', '[', 'i', ',', '0', ']', ')', 'else', ':', 'raise', 'AttributeError', '(', '"\'.prb\' file should contain the \'channel_groups\' field"', ')', 'elif', 'probe_file', '.', 'suffix', '==', "'.csv'", ':', 'if', 'channel_map', 'is', 'not', 'None', ':', 'assert', 'np', '.', 'all', '(', '[', 'chan', 'in', 'channel_map', 'for', 'chan', 'in', 'recording', '.', 'get_channel_ids', '(', ')', ']', ')', ',', '"all channel_ids in \'channel_map\' must be in the original recording channel ids"', 'subrecording', '=', 'SubRecordingExtractor', '(', 'recording', ',', 'channel_ids', '=', 'channel_map', ')', 'else', ':', 'subrecording', '=', 'recording', 'with', 'probe_file', '.', 'open', '(', ')', 'as', 'csvfile', ':', 'posreader', '=', 'csv', '.', 'reader', '(', 'csvfile', ')', 'row_count', '=', '0', 'loaded_pos', '=', '[', ']', 'for', 'pos', 'in', '(', 'posreader', ')', ':', 'row_count', '+=', '1', 'loaded_pos', '.', 'append', '(', 'pos', ')', 'assert', 'len', '(', 'subrecording', '.', 'get_channel_ids', '(', ')', ')', '==', 'row_count', ',', '"The .csv file must contain as many "', '"rows as the number of channels in the recordings"', 'for', 'i_ch', ',', 'pos', 'in', 'zip', '(', 'subrecording', '.', 'get_channel_ids', '(', ')', ',', 'loaded_pos', ')', ':', 'if', 'i_ch', 'in', 'subrecording', '.', 'get_channel_ids', '(', ')', ':', 'subrecording', '.', 'set_channel_property', '(', 'i_ch', ',', "'location'", ',', 'list', '(', 'np', '.', 'array', '(', 'pos', ')', '.', 'astype', '(', 'float', ')', ')', ')', 'if', 'channel_groups', 'is', 'not', 'None', 'and', 'len', '(', 'channel_groups', ')', '==', 'len', '(', 'subrecording', '.', 'get_channel_ids', '(', ')', ')', ':', 'for', 'i_ch', ',', 'chg', 'in', 'zip', '(', 'subrecording', '.', 'get_channel_ids', '(', ')', ',', 'channel_groups', ')', ':', 'if', 'i_ch', 'in', 'subrecording', '.', 'get_channel_ids', '(', ')', ':', 'subrecording', '.', 'set_channel_property', '(', 'i_ch', ',', "'group'", ',', 'chg', ')', 'else', ':', 'raise', 'NotImplementedError', '(', '"Only .csv and .prb probe files can be loaded."', ')', 'return', 'subrecording']
Loads channel information into recording extractor. If a .prb file is given, then 'location' and 'group' information for each channel is stored. If a .csv file is given, then it will only store 'location' Parameters ---------- recording: RecordingExtractor The recording extractor to channel information probe_file: str Path to probe file. Either .prb or .csv Returns --------- subRecordingExtractor
['Loads', 'channel', 'information', 'into', 'recording', 'extractor', '.', 'If', 'a', '.', 'prb', 'file', 'is', 'given', 'then', 'location', 'and', 'group', 'information', 'for', 'each', 'channel', 'is', 'stored', '.', 'If', 'a', '.', 'csv', 'file', 'is', 'given', 'then', 'it', 'will', 'only', 'store', 'location']
train
https://github.com/SpikeInterface/spikeextractors/blob/cbe3b8778a215f0bbd743af8b306856a87e438e1/spikeextractors/tools.py#L35-L136
3,931
twoolie/NBT
nbt/region.py
RegionFile.get_nbt
def get_nbt(self, x, z): """ Return a NBTFile of the specified chunk. Raise InconceivedChunk if the chunk is not included in the file. """ # TODO: cache results? data = self.get_blockdata(x, z) # This may raise a RegionFileFormatError. data = BytesIO(data) err = None try: nbt = NBTFile(buffer=data) if self.loc.x != None: x += self.loc.x*32 if self.loc.z != None: z += self.loc.z*32 nbt.loc = Location(x=x, z=z) return nbt # this may raise a MalformedFileError. Convert to ChunkDataError. except MalformedFileError as e: err = '%s' % e # avoid str(e) due to Unicode issues in Python 2. if err: raise ChunkDataError(err)
python
def get_nbt(self, x, z): """ Return a NBTFile of the specified chunk. Raise InconceivedChunk if the chunk is not included in the file. """ # TODO: cache results? data = self.get_blockdata(x, z) # This may raise a RegionFileFormatError. data = BytesIO(data) err = None try: nbt = NBTFile(buffer=data) if self.loc.x != None: x += self.loc.x*32 if self.loc.z != None: z += self.loc.z*32 nbt.loc = Location(x=x, z=z) return nbt # this may raise a MalformedFileError. Convert to ChunkDataError. except MalformedFileError as e: err = '%s' % e # avoid str(e) due to Unicode issues in Python 2. if err: raise ChunkDataError(err)
['def', 'get_nbt', '(', 'self', ',', 'x', ',', 'z', ')', ':', '# TODO: cache results?', 'data', '=', 'self', '.', 'get_blockdata', '(', 'x', ',', 'z', ')', '# This may raise a RegionFileFormatError.', 'data', '=', 'BytesIO', '(', 'data', ')', 'err', '=', 'None', 'try', ':', 'nbt', '=', 'NBTFile', '(', 'buffer', '=', 'data', ')', 'if', 'self', '.', 'loc', '.', 'x', '!=', 'None', ':', 'x', '+=', 'self', '.', 'loc', '.', 'x', '*', '32', 'if', 'self', '.', 'loc', '.', 'z', '!=', 'None', ':', 'z', '+=', 'self', '.', 'loc', '.', 'z', '*', '32', 'nbt', '.', 'loc', '=', 'Location', '(', 'x', '=', 'x', ',', 'z', '=', 'z', ')', 'return', 'nbt', '# this may raise a MalformedFileError. Convert to ChunkDataError.', 'except', 'MalformedFileError', 'as', 'e', ':', 'err', '=', "'%s'", '%', 'e', '# avoid str(e) due to Unicode issues in Python 2.', 'if', 'err', ':', 'raise', 'ChunkDataError', '(', 'err', ')']
Return a NBTFile of the specified chunk. Raise InconceivedChunk if the chunk is not included in the file.
['Return', 'a', 'NBTFile', 'of', 'the', 'specified', 'chunk', '.', 'Raise', 'InconceivedChunk', 'if', 'the', 'chunk', 'is', 'not', 'included', 'in', 'the', 'file', '.']
train
https://github.com/twoolie/NBT/blob/b06dd6cc8117d2788da1d8416e642d58bad45762/nbt/region.py#L585-L606
3,932
heroku-python/django-postgrespool
django_postgrespool/base.py
DatabaseWrapper._dispose
def _dispose(self): """Dispose of the pool for this instance, closing all connections.""" self.close() # _DBProxy.dispose doesn't actually call dispose on the pool conn_params = self.get_connection_params() key = db_pool._serialize(**conn_params) try: pool = db_pool.pools[key] except KeyError: pass else: pool.dispose() del db_pool.pools[key]
python
def _dispose(self): """Dispose of the pool for this instance, closing all connections.""" self.close() # _DBProxy.dispose doesn't actually call dispose on the pool conn_params = self.get_connection_params() key = db_pool._serialize(**conn_params) try: pool = db_pool.pools[key] except KeyError: pass else: pool.dispose() del db_pool.pools[key]
['def', '_dispose', '(', 'self', ')', ':', 'self', '.', 'close', '(', ')', "# _DBProxy.dispose doesn't actually call dispose on the pool", 'conn_params', '=', 'self', '.', 'get_connection_params', '(', ')', 'key', '=', 'db_pool', '.', '_serialize', '(', '*', '*', 'conn_params', ')', 'try', ':', 'pool', '=', 'db_pool', '.', 'pools', '[', 'key', ']', 'except', 'KeyError', ':', 'pass', 'else', ':', 'pool', '.', 'dispose', '(', ')', 'del', 'db_pool', '.', 'pools', '[', 'key', ']']
Dispose of the pool for this instance, closing all connections.
['Dispose', 'of', 'the', 'pool', 'for', 'this', 'instance', 'closing', 'all', 'connections', '.']
train
https://github.com/heroku-python/django-postgrespool/blob/ce83a4d49c19eded86d86d5fcfa8daaeea5ef662/django_postgrespool/base.py#L91-L103
3,933
jobovy/galpy
galpy/orbit/OrbitTop.py
OrbitTop.plotJacobi
def plotJacobi(self,*args,**kwargs): """ NAME: plotE PURPOSE: plot Jacobi(.) along the orbit INPUT: bovy_plot.bovy_plot inputs OUTPUT: figure to output device HISTORY: 2014-06-16 - Written - Bovy (IAS) """ if kwargs.pop('normed',False): kwargs['d2']= 'Jacobinorm' else: kwargs['d2']= 'Jacobi' return self.plot(*args,**kwargs)
python
def plotJacobi(self,*args,**kwargs): """ NAME: plotE PURPOSE: plot Jacobi(.) along the orbit INPUT: bovy_plot.bovy_plot inputs OUTPUT: figure to output device HISTORY: 2014-06-16 - Written - Bovy (IAS) """ if kwargs.pop('normed',False): kwargs['d2']= 'Jacobinorm' else: kwargs['d2']= 'Jacobi' return self.plot(*args,**kwargs)
['def', 'plotJacobi', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'kwargs', '.', 'pop', '(', "'normed'", ',', 'False', ')', ':', 'kwargs', '[', "'d2'", ']', '=', "'Jacobinorm'", 'else', ':', 'kwargs', '[', "'d2'", ']', '=', "'Jacobi'", 'return', 'self', '.', 'plot', '(', '*', 'args', ',', '*', '*', 'kwargs', ')']
NAME: plotE PURPOSE: plot Jacobi(.) along the orbit INPUT: bovy_plot.bovy_plot inputs OUTPUT: figure to output device HISTORY: 2014-06-16 - Written - Bovy (IAS)
['NAME', ':', 'plotE', 'PURPOSE', ':', 'plot', 'Jacobi', '(', '.', ')', 'along', 'the', 'orbit', 'INPUT', ':', 'bovy_plot', '.', 'bovy_plot', 'inputs', 'OUTPUT', ':', 'figure', 'to', 'output', 'device', 'HISTORY', ':', '2014', '-', '06', '-', '16', '-', 'Written', '-', 'Bovy', '(', 'IAS', ')']
train
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/OrbitTop.py#L1848-L1865
3,934
ivanprjcts/sdklib
sdklib/html/base.py
HTML5libMixin.find_element_by_xpath
def find_element_by_xpath(self, xpath): """ Finds an element by xpath. :param xpath: The xpath locator of the element to find. :return: See html5lib xpath expressions `here <https://docs.python.org/2/library/xml.etree.elementtree.html#supported-xpath-syntax>`_ """ from sdklib.html.elem import Elem5lib return Elem5lib(self.html_obj.find(self._convert_xpath(xpath)))
python
def find_element_by_xpath(self, xpath): """ Finds an element by xpath. :param xpath: The xpath locator of the element to find. :return: See html5lib xpath expressions `here <https://docs.python.org/2/library/xml.etree.elementtree.html#supported-xpath-syntax>`_ """ from sdklib.html.elem import Elem5lib return Elem5lib(self.html_obj.find(self._convert_xpath(xpath)))
['def', 'find_element_by_xpath', '(', 'self', ',', 'xpath', ')', ':', 'from', 'sdklib', '.', 'html', '.', 'elem', 'import', 'Elem5lib', 'return', 'Elem5lib', '(', 'self', '.', 'html_obj', '.', 'find', '(', 'self', '.', '_convert_xpath', '(', 'xpath', ')', ')', ')']
Finds an element by xpath. :param xpath: The xpath locator of the element to find. :return: See html5lib xpath expressions `here <https://docs.python.org/2/library/xml.etree.elementtree.html#supported-xpath-syntax>`_
['Finds', 'an', 'element', 'by', 'xpath', '.']
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/html/base.py#L121-L132
3,935
HttpRunner/HttpRunner
httprunner/utils.py
dump_json_file
def dump_json_file(json_data, pwd_dir_path, dump_file_name): """ dump json data to file """ class PythonObjectEncoder(json.JSONEncoder): def default(self, obj): try: return super().default(self, obj) except TypeError: return str(obj) logs_dir_path = os.path.join(pwd_dir_path, "logs") if not os.path.isdir(logs_dir_path): os.makedirs(logs_dir_path) dump_file_path = os.path.join(logs_dir_path, dump_file_name) try: with io.open(dump_file_path, 'w', encoding='utf-8') as outfile: if is_py2: outfile.write( unicode(json.dumps( json_data, indent=4, separators=(',', ':'), ensure_ascii=False, cls=PythonObjectEncoder )) ) else: json.dump( json_data, outfile, indent=4, separators=(',', ':'), ensure_ascii=False, cls=PythonObjectEncoder ) msg = "dump file: {}".format(dump_file_path) logger.color_print(msg, "BLUE") except TypeError as ex: msg = "Failed to dump json file: {}\nReason: {}".format(dump_file_path, ex) logger.color_print(msg, "RED")
python
def dump_json_file(json_data, pwd_dir_path, dump_file_name): """ dump json data to file """ class PythonObjectEncoder(json.JSONEncoder): def default(self, obj): try: return super().default(self, obj) except TypeError: return str(obj) logs_dir_path = os.path.join(pwd_dir_path, "logs") if not os.path.isdir(logs_dir_path): os.makedirs(logs_dir_path) dump_file_path = os.path.join(logs_dir_path, dump_file_name) try: with io.open(dump_file_path, 'w', encoding='utf-8') as outfile: if is_py2: outfile.write( unicode(json.dumps( json_data, indent=4, separators=(',', ':'), ensure_ascii=False, cls=PythonObjectEncoder )) ) else: json.dump( json_data, outfile, indent=4, separators=(',', ':'), ensure_ascii=False, cls=PythonObjectEncoder ) msg = "dump file: {}".format(dump_file_path) logger.color_print(msg, "BLUE") except TypeError as ex: msg = "Failed to dump json file: {}\nReason: {}".format(dump_file_path, ex) logger.color_print(msg, "RED")
['def', 'dump_json_file', '(', 'json_data', ',', 'pwd_dir_path', ',', 'dump_file_name', ')', ':', 'class', 'PythonObjectEncoder', '(', 'json', '.', 'JSONEncoder', ')', ':', 'def', 'default', '(', 'self', ',', 'obj', ')', ':', 'try', ':', 'return', 'super', '(', ')', '.', 'default', '(', 'self', ',', 'obj', ')', 'except', 'TypeError', ':', 'return', 'str', '(', 'obj', ')', 'logs_dir_path', '=', 'os', '.', 'path', '.', 'join', '(', 'pwd_dir_path', ',', '"logs"', ')', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'logs_dir_path', ')', ':', 'os', '.', 'makedirs', '(', 'logs_dir_path', ')', 'dump_file_path', '=', 'os', '.', 'path', '.', 'join', '(', 'logs_dir_path', ',', 'dump_file_name', ')', 'try', ':', 'with', 'io', '.', 'open', '(', 'dump_file_path', ',', "'w'", ',', 'encoding', '=', "'utf-8'", ')', 'as', 'outfile', ':', 'if', 'is_py2', ':', 'outfile', '.', 'write', '(', 'unicode', '(', 'json', '.', 'dumps', '(', 'json_data', ',', 'indent', '=', '4', ',', 'separators', '=', '(', "','", ',', "':'", ')', ',', 'ensure_ascii', '=', 'False', ',', 'cls', '=', 'PythonObjectEncoder', ')', ')', ')', 'else', ':', 'json', '.', 'dump', '(', 'json_data', ',', 'outfile', ',', 'indent', '=', '4', ',', 'separators', '=', '(', "','", ',', "':'", ')', ',', 'ensure_ascii', '=', 'False', ',', 'cls', '=', 'PythonObjectEncoder', ')', 'msg', '=', '"dump file: {}"', '.', 'format', '(', 'dump_file_path', ')', 'logger', '.', 'color_print', '(', 'msg', ',', '"BLUE"', ')', 'except', 'TypeError', 'as', 'ex', ':', 'msg', '=', '"Failed to dump json file: {}\\nReason: {}"', '.', 'format', '(', 'dump_file_path', ',', 'ex', ')', 'logger', '.', 'color_print', '(', 'msg', ',', '"RED"', ')']
dump json data to file
['dump', 'json', 'data', 'to', 'file']
train
https://github.com/HttpRunner/HttpRunner/blob/f259551bf9c8ba905eae5c1afcf2efea20ae0871/httprunner/utils.py#L527-L570
3,936
CalebBell/fpi
fpi/drag.py
Flemmer_Banks
def Flemmer_Banks(Re): r'''Calculates drag coefficient of a smooth sphere using the method in [1]_ as described in [2]_. .. math:: C_D = \frac{24}{Re}10^E E = 0.383Re^{0.356}-0.207Re^{0.396} - \frac{0.143}{1+(\log_{10} Re)^2} Parameters ---------- Re : float Reynolds number of the sphere, [-] Returns ------- Cd : float Drag coefficient [-] Notes ----- Range is Re <= 2E5 Examples -------- >>> Flemmer_Banks(200.) 0.7849169609270039 References ---------- .. [1] Flemmer, R. L. C., and C. L. Banks. "On the Drag Coefficient of a Sphere." Powder Technology 48, no. 3 (November 1986): 217-21. doi:10.1016/0032-5910(86)80044-4. .. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz Ahmadi. "Development of Empirical Models with High Accuracy for Estimation of Drag Coefficient of Flow around a Smooth Sphere: An Evolutionary Approach." Powder Technology 257 (May 2014): 11-19. doi:10.1016/j.powtec.2014.02.045. ''' E = 0.383*Re**0.356 - 0.207*Re**0.396 - 0.143/(1 + (log10(Re))**2) Cd = 24./Re*10**E return Cd
python
def Flemmer_Banks(Re): r'''Calculates drag coefficient of a smooth sphere using the method in [1]_ as described in [2]_. .. math:: C_D = \frac{24}{Re}10^E E = 0.383Re^{0.356}-0.207Re^{0.396} - \frac{0.143}{1+(\log_{10} Re)^2} Parameters ---------- Re : float Reynolds number of the sphere, [-] Returns ------- Cd : float Drag coefficient [-] Notes ----- Range is Re <= 2E5 Examples -------- >>> Flemmer_Banks(200.) 0.7849169609270039 References ---------- .. [1] Flemmer, R. L. C., and C. L. Banks. "On the Drag Coefficient of a Sphere." Powder Technology 48, no. 3 (November 1986): 217-21. doi:10.1016/0032-5910(86)80044-4. .. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz Ahmadi. "Development of Empirical Models with High Accuracy for Estimation of Drag Coefficient of Flow around a Smooth Sphere: An Evolutionary Approach." Powder Technology 257 (May 2014): 11-19. doi:10.1016/j.powtec.2014.02.045. ''' E = 0.383*Re**0.356 - 0.207*Re**0.396 - 0.143/(1 + (log10(Re))**2) Cd = 24./Re*10**E return Cd
['def', 'Flemmer_Banks', '(', 'Re', ')', ':', 'E', '=', '0.383', '*', 'Re', '**', '0.356', '-', '0.207', '*', 'Re', '**', '0.396', '-', '0.143', '/', '(', '1', '+', '(', 'log10', '(', 'Re', ')', ')', '**', '2', ')', 'Cd', '=', '24.', '/', 'Re', '*', '10', '**', 'E', 'return', 'Cd']
r'''Calculates drag coefficient of a smooth sphere using the method in [1]_ as described in [2]_. .. math:: C_D = \frac{24}{Re}10^E E = 0.383Re^{0.356}-0.207Re^{0.396} - \frac{0.143}{1+(\log_{10} Re)^2} Parameters ---------- Re : float Reynolds number of the sphere, [-] Returns ------- Cd : float Drag coefficient [-] Notes ----- Range is Re <= 2E5 Examples -------- >>> Flemmer_Banks(200.) 0.7849169609270039 References ---------- .. [1] Flemmer, R. L. C., and C. L. Banks. "On the Drag Coefficient of a Sphere." Powder Technology 48, no. 3 (November 1986): 217-21. doi:10.1016/0032-5910(86)80044-4. .. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz Ahmadi. "Development of Empirical Models with High Accuracy for Estimation of Drag Coefficient of Flow around a Smooth Sphere: An Evolutionary Approach." Powder Technology 257 (May 2014): 11-19. doi:10.1016/j.powtec.2014.02.045.
['r', 'Calculates', 'drag', 'coefficient', 'of', 'a', 'smooth', 'sphere', 'using', 'the', 'method', 'in', '[', '1', ']', '_', 'as', 'described', 'in', '[', '2', ']', '_', '.']
train
https://github.com/CalebBell/fpi/blob/6e6da3b9d0c17e10cc0886c97bc1bb8aeba2cca5/fpi/drag.py#L383-L424
3,937
hishnash/djangochannelsrestframework
djangochannelsrestframework/consumers.py
DjangoViewAsConsumer.handle_action
async def handle_action(self, action: str, request_id: str, **kwargs): """ run the action. """ try: await self.check_permissions(action, **kwargs) if action not in self.actions: raise MethodNotAllowed(method=action) content, status = await self.call_view( action=action, **kwargs ) await self.reply( action=action, request_id=request_id, data=content, status=status ) except Exception as exc: await self.handle_exception( exc, action=action, request_id=request_id )
python
async def handle_action(self, action: str, request_id: str, **kwargs): """ run the action. """ try: await self.check_permissions(action, **kwargs) if action not in self.actions: raise MethodNotAllowed(method=action) content, status = await self.call_view( action=action, **kwargs ) await self.reply( action=action, request_id=request_id, data=content, status=status ) except Exception as exc: await self.handle_exception( exc, action=action, request_id=request_id )
['async', 'def', 'handle_action', '(', 'self', ',', 'action', ':', 'str', ',', 'request_id', ':', 'str', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'await', 'self', '.', 'check_permissions', '(', 'action', ',', '*', '*', 'kwargs', ')', 'if', 'action', 'not', 'in', 'self', '.', 'actions', ':', 'raise', 'MethodNotAllowed', '(', 'method', '=', 'action', ')', 'content', ',', 'status', '=', 'await', 'self', '.', 'call_view', '(', 'action', '=', 'action', ',', '*', '*', 'kwargs', ')', 'await', 'self', '.', 'reply', '(', 'action', '=', 'action', ',', 'request_id', '=', 'request_id', ',', 'data', '=', 'content', ',', 'status', '=', 'status', ')', 'except', 'Exception', 'as', 'exc', ':', 'await', 'self', '.', 'handle_exception', '(', 'exc', ',', 'action', '=', 'action', ',', 'request_id', '=', 'request_id', ')']
run the action.
['run', 'the', 'action', '.']
train
https://github.com/hishnash/djangochannelsrestframework/blob/19fdec7efd785b1a94d19612a8de934e1948e344/djangochannelsrestframework/consumers.py#L222-L249
3,938
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py
brocade_aaa.service_password_encryption
def service_password_encryption(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") service = ET.SubElement(config, "service", xmlns="urn:brocade.com:mgmt:brocade-aaa") password_encryption = ET.SubElement(service, "password-encryption") callback = kwargs.pop('callback', self._callback) return callback(config)
python
def service_password_encryption(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") service = ET.SubElement(config, "service", xmlns="urn:brocade.com:mgmt:brocade-aaa") password_encryption = ET.SubElement(service, "password-encryption") callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'service_password_encryption', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'service', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"service"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-aaa"', ')', 'password_encryption', '=', 'ET', '.', 'SubElement', '(', 'service', ',', '"password-encryption"', ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py#L161-L169
3,939
bulkan/robotframework-requests
src/RequestsLibrary/RequestsKeywords.py
RequestsKeywords.update_session
def update_session(self, alias, headers=None, cookies=None): """Update Session Headers: update a HTTP Session Headers ``alias`` Robot Framework alias to identify the session ``headers`` Dictionary of headers merge into session """ session = self._cache.switch(alias) session.headers = merge_setting(headers, session.headers) session.cookies = merge_cookies(session.cookies, cookies)
python
def update_session(self, alias, headers=None, cookies=None): """Update Session Headers: update a HTTP Session Headers ``alias`` Robot Framework alias to identify the session ``headers`` Dictionary of headers merge into session """ session = self._cache.switch(alias) session.headers = merge_setting(headers, session.headers) session.cookies = merge_cookies(session.cookies, cookies)
['def', 'update_session', '(', 'self', ',', 'alias', ',', 'headers', '=', 'None', ',', 'cookies', '=', 'None', ')', ':', 'session', '=', 'self', '.', '_cache', '.', 'switch', '(', 'alias', ')', 'session', '.', 'headers', '=', 'merge_setting', '(', 'headers', ',', 'session', '.', 'headers', ')', 'session', '.', 'cookies', '=', 'merge_cookies', '(', 'session', '.', 'cookies', ',', 'cookies', ')']
Update Session Headers: update a HTTP Session Headers ``alias`` Robot Framework alias to identify the session ``headers`` Dictionary of headers merge into session
['Update', 'Session', 'Headers', ':', 'update', 'a', 'HTTP', 'Session', 'Headers']
train
https://github.com/bulkan/robotframework-requests/blob/11baa3277f1cb728712e26d996200703c15254a8/src/RequestsLibrary/RequestsKeywords.py#L449-L458
3,940
lrq3000/pyFileFixity
pyFileFixity/lib/brownanrs/polynomial.py
Polynomial.derive
def derive(self): '''Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))''' #res = [0] * (len(self)-1) # pre-allocate the list, it will be one item shorter because the constant coefficient (x^0) will be removed #for i in _range(2, len(self)+1): # start at 2 to skip the first coeff which is useless since it's a constant (x^0) so we +1, and because we work in reverse (lower coefficients are on the right) so +1 again #res[-(i-1)] = (i-1) * self[-i] # self[-i] == coeff[i] and i-1 is the x exponent (eg: x^1, x^2, x^3, etc.) #return Polynomial(res) # One liner way to do it (also a bit faster too) #return Polynomial( [(i-1) * self[-i] for i in _range(2, len(self)+1)][::-1] ) # Another faster version L = len(self)-1 return Polynomial( [(L-i) * self[i] for i in _range(0, len(self)-1)] )
python
def derive(self): '''Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))''' #res = [0] * (len(self)-1) # pre-allocate the list, it will be one item shorter because the constant coefficient (x^0) will be removed #for i in _range(2, len(self)+1): # start at 2 to skip the first coeff which is useless since it's a constant (x^0) so we +1, and because we work in reverse (lower coefficients are on the right) so +1 again #res[-(i-1)] = (i-1) * self[-i] # self[-i] == coeff[i] and i-1 is the x exponent (eg: x^1, x^2, x^3, etc.) #return Polynomial(res) # One liner way to do it (also a bit faster too) #return Polynomial( [(i-1) * self[-i] for i in _range(2, len(self)+1)][::-1] ) # Another faster version L = len(self)-1 return Polynomial( [(L-i) * self[i] for i in _range(0, len(self)-1)] )
['def', 'derive', '(', 'self', ')', ':', '#res = [0] * (len(self)-1) # pre-allocate the list, it will be one item shorter because the constant coefficient (x^0) will be removed', "#for i in _range(2, len(self)+1): # start at 2 to skip the first coeff which is useless since it's a constant (x^0) so we +1, and because we work in reverse (lower coefficients are on the right) so +1 again", '#res[-(i-1)] = (i-1) * self[-i] # self[-i] == coeff[i] and i-1 is the x exponent (eg: x^1, x^2, x^3, etc.)', '#return Polynomial(res)', '# One liner way to do it (also a bit faster too)', '#return Polynomial( [(i-1) * self[-i] for i in _range(2, len(self)+1)][::-1] )', '# Another faster version', 'L', '=', 'len', '(', 'self', ')', '-', '1', 'return', 'Polynomial', '(', '[', '(', 'L', '-', 'i', ')', '*', 'self', '[', 'i', ']', 'for', 'i', 'in', '_range', '(', '0', ',', 'len', '(', 'self', ')', '-', '1', ')', ']', ')']
Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))
['Compute', 'the', 'formal', 'derivative', 'of', 'the', 'polynomial', ':', 'sum', '(', 'i', '*', 'coeff', '[', 'i', ']', 'x^', '(', 'i', '-', '1', '))']
train
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/polynomial.py#L358-L369
3,941
minhhoit/yacms
yacms/generic/fields.py
KeywordsField.contribute_to_class
def contribute_to_class(self, cls, name): """ Swap out any reference to ``KeywordsField`` with the ``KEYWORDS_FIELD_string`` field in ``search_fields``. """ super(KeywordsField, self).contribute_to_class(cls, name) string_field_name = list(self.fields.keys())[0] % \ self.related_field_name if hasattr(cls, "search_fields") and name in cls.search_fields: try: weight = cls.search_fields[name] except TypeError: # search_fields is a sequence. index = cls.search_fields.index(name) search_fields_type = type(cls.search_fields) cls.search_fields = list(cls.search_fields) cls.search_fields[index] = string_field_name cls.search_fields = search_fields_type(cls.search_fields) else: del cls.search_fields[name] cls.search_fields[string_field_name] = weight
python
def contribute_to_class(self, cls, name): """ Swap out any reference to ``KeywordsField`` with the ``KEYWORDS_FIELD_string`` field in ``search_fields``. """ super(KeywordsField, self).contribute_to_class(cls, name) string_field_name = list(self.fields.keys())[0] % \ self.related_field_name if hasattr(cls, "search_fields") and name in cls.search_fields: try: weight = cls.search_fields[name] except TypeError: # search_fields is a sequence. index = cls.search_fields.index(name) search_fields_type = type(cls.search_fields) cls.search_fields = list(cls.search_fields) cls.search_fields[index] = string_field_name cls.search_fields = search_fields_type(cls.search_fields) else: del cls.search_fields[name] cls.search_fields[string_field_name] = weight
['def', 'contribute_to_class', '(', 'self', ',', 'cls', ',', 'name', ')', ':', 'super', '(', 'KeywordsField', ',', 'self', ')', '.', 'contribute_to_class', '(', 'cls', ',', 'name', ')', 'string_field_name', '=', 'list', '(', 'self', '.', 'fields', '.', 'keys', '(', ')', ')', '[', '0', ']', '%', 'self', '.', 'related_field_name', 'if', 'hasattr', '(', 'cls', ',', '"search_fields"', ')', 'and', 'name', 'in', 'cls', '.', 'search_fields', ':', 'try', ':', 'weight', '=', 'cls', '.', 'search_fields', '[', 'name', ']', 'except', 'TypeError', ':', '# search_fields is a sequence.', 'index', '=', 'cls', '.', 'search_fields', '.', 'index', '(', 'name', ')', 'search_fields_type', '=', 'type', '(', 'cls', '.', 'search_fields', ')', 'cls', '.', 'search_fields', '=', 'list', '(', 'cls', '.', 'search_fields', ')', 'cls', '.', 'search_fields', '[', 'index', ']', '=', 'string_field_name', 'cls', '.', 'search_fields', '=', 'search_fields_type', '(', 'cls', '.', 'search_fields', ')', 'else', ':', 'del', 'cls', '.', 'search_fields', '[', 'name', ']', 'cls', '.', 'search_fields', '[', 'string_field_name', ']', '=', 'weight']
Swap out any reference to ``KeywordsField`` with the ``KEYWORDS_FIELD_string`` field in ``search_fields``.
['Swap', 'out', 'any', 'reference', 'to', 'KeywordsField', 'with', 'the', 'KEYWORDS_FIELD_string', 'field', 'in', 'search_fields', '.']
train
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/fields.py#L206-L226
3,942
HEPData/hepdata-converter
hepdata_converter/parsers/oldhepdata_parser.py
OldHEPData._parse_header
def _parse_header(self, data): """Parse header (xheader or yheader) :param data: data to be parsed :type data: str :return: list with header's data :rtype: list """ return_list = [] headers = data.split(':') for header in headers: header = re.split(' IN ', header, flags=re.I) # ignore case xheader = {'name': header[0].strip()} if len(header) > 1: xheader['units'] = header[1].strip() return_list.append(xheader) return return_list
python
def _parse_header(self, data): """Parse header (xheader or yheader) :param data: data to be parsed :type data: str :return: list with header's data :rtype: list """ return_list = [] headers = data.split(':') for header in headers: header = re.split(' IN ', header, flags=re.I) # ignore case xheader = {'name': header[0].strip()} if len(header) > 1: xheader['units'] = header[1].strip() return_list.append(xheader) return return_list
['def', '_parse_header', '(', 'self', ',', 'data', ')', ':', 'return_list', '=', '[', ']', 'headers', '=', 'data', '.', 'split', '(', "':'", ')', 'for', 'header', 'in', 'headers', ':', 'header', '=', 're', '.', 'split', '(', "' IN '", ',', 'header', ',', 'flags', '=', 're', '.', 'I', ')', '# ignore case', 'xheader', '=', '{', "'name'", ':', 'header', '[', '0', ']', '.', 'strip', '(', ')', '}', 'if', 'len', '(', 'header', ')', '>', '1', ':', 'xheader', '[', "'units'", ']', '=', 'header', '[', '1', ']', '.', 'strip', '(', ')', 'return_list', '.', 'append', '(', 'xheader', ')', 'return', 'return_list']
Parse header (xheader or yheader) :param data: data to be parsed :type data: str :return: list with header's data :rtype: list
['Parse', 'header', '(', 'xheader', 'or', 'yheader', ')']
train
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L552-L571
3,943
6809/MC6809
MC6809/components/mc6809_base.py
CPUBase.burst_run
def burst_run(self): """ Run CPU as fast as Python can... """ # https://wiki.python.org/moin/PythonSpeed/PerformanceTips#Avoiding_dots... get_and_call_next_op = self.get_and_call_next_op for __ in range(self.outer_burst_op_count): for __ in range(self.inner_burst_op_count): get_and_call_next_op() self.call_sync_callbacks()
python
def burst_run(self): """ Run CPU as fast as Python can... """ # https://wiki.python.org/moin/PythonSpeed/PerformanceTips#Avoiding_dots... get_and_call_next_op = self.get_and_call_next_op for __ in range(self.outer_burst_op_count): for __ in range(self.inner_burst_op_count): get_and_call_next_op() self.call_sync_callbacks()
['def', 'burst_run', '(', 'self', ')', ':', '# https://wiki.python.org/moin/PythonSpeed/PerformanceTips#Avoiding_dots...', 'get_and_call_next_op', '=', 'self', '.', 'get_and_call_next_op', 'for', '__', 'in', 'range', '(', 'self', '.', 'outer_burst_op_count', ')', ':', 'for', '__', 'in', 'range', '(', 'self', '.', 'inner_burst_op_count', ')', ':', 'get_and_call_next_op', '(', ')', 'self', '.', 'call_sync_callbacks', '(', ')']
Run CPU as fast as Python can...
['Run', 'CPU', 'as', 'fast', 'as', 'Python', 'can', '...']
train
https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/mc6809_base.py#L285-L294
3,944
nickjj/ansigenome
ansigenome/export.py
Export.graph_dot
def graph_dot(self): """ Export a graph of the data in dot format. """ default_graphviz_template = """ digraph role_dependencies { size="%size" dpi=%dpi ratio="fill" landscape=false rankdir="BT"; node [shape = "box", style = "rounded,filled", fillcolor = "lightgrey", fontsize = 20]; edge [style = "dashed", dir = "forward", penwidth = 1.5]; %roles_list %dependencies } """ roles_list = "" edges = "" # remove the darkest and brightest colors, still have 100+ colors adjusted_colors = c.X11_COLORS[125:-325] random.shuffle(adjusted_colors) backup_colors = adjusted_colors[:] for role, fields in sorted(self.report["roles"].iteritems()): name = utils.normalize_role(role, self.config) color_length = len(adjusted_colors) - 1 # reset the colors if we run out if color_length == 0: adjusted_colors = backup_colors[:] color_length = len(adjusted_colors) - 1 random_index = random.randint(1, color_length) roles_list += " role_{0} [label = \"{1}\"]\n" \ .format(re.sub(r'[.-/]', '_', name), name) edge = '\n edge [color = "{0}"];\n' \ .format(adjusted_colors[random_index]) del adjusted_colors[random_index] if fields["dependencies"]: dependencies = "" for dependency in sorted(fields["dependencies"]): dependency_name = utils.role_name(dependency) dependencies += " role_{0} -> role_{1}\n".format( re.sub(r'[.-/]', '_', name), re.sub(r'[.-/]', '_', utils.normalize_role(dependency_name, self.config) ) ) edges += "{0}{1}\n".format(edge, dependencies) graphviz_template = default_graphviz_template.replace("%roles_list", roles_list) graphviz_template = graphviz_template.replace("%dependencies", edges) graphviz_template = graphviz_template.replace("%size", self.size) graphviz_template = graphviz_template.replace("%dpi", str(self.dpi)) if self.out_file: utils.string_to_file(self.out_file, graphviz_template) else: print graphviz_template
python
def graph_dot(self): """ Export a graph of the data in dot format. """ default_graphviz_template = """ digraph role_dependencies { size="%size" dpi=%dpi ratio="fill" landscape=false rankdir="BT"; node [shape = "box", style = "rounded,filled", fillcolor = "lightgrey", fontsize = 20]; edge [style = "dashed", dir = "forward", penwidth = 1.5]; %roles_list %dependencies } """ roles_list = "" edges = "" # remove the darkest and brightest colors, still have 100+ colors adjusted_colors = c.X11_COLORS[125:-325] random.shuffle(adjusted_colors) backup_colors = adjusted_colors[:] for role, fields in sorted(self.report["roles"].iteritems()): name = utils.normalize_role(role, self.config) color_length = len(adjusted_colors) - 1 # reset the colors if we run out if color_length == 0: adjusted_colors = backup_colors[:] color_length = len(adjusted_colors) - 1 random_index = random.randint(1, color_length) roles_list += " role_{0} [label = \"{1}\"]\n" \ .format(re.sub(r'[.-/]', '_', name), name) edge = '\n edge [color = "{0}"];\n' \ .format(adjusted_colors[random_index]) del adjusted_colors[random_index] if fields["dependencies"]: dependencies = "" for dependency in sorted(fields["dependencies"]): dependency_name = utils.role_name(dependency) dependencies += " role_{0} -> role_{1}\n".format( re.sub(r'[.-/]', '_', name), re.sub(r'[.-/]', '_', utils.normalize_role(dependency_name, self.config) ) ) edges += "{0}{1}\n".format(edge, dependencies) graphviz_template = default_graphviz_template.replace("%roles_list", roles_list) graphviz_template = graphviz_template.replace("%dependencies", edges) graphviz_template = graphviz_template.replace("%size", self.size) graphviz_template = graphviz_template.replace("%dpi", str(self.dpi)) if self.out_file: utils.string_to_file(self.out_file, graphviz_template) else: print graphviz_template
['def', 'graph_dot', '(', 'self', ')', ':', 'default_graphviz_template', '=', '"""\ndigraph role_dependencies {\n size="%size"\n dpi=%dpi\n ratio="fill"\n landscape=false\n rankdir="BT";\n\n node [shape = "box",\n style = "rounded,filled",\n fillcolor = "lightgrey",\n fontsize = 20];\n\n edge [style = "dashed",\n dir = "forward",\n penwidth = 1.5];\n\n%roles_list\n\n%dependencies\n}\n"""', 'roles_list', '=', '""', 'edges', '=', '""', '# remove the darkest and brightest colors, still have 100+ colors', 'adjusted_colors', '=', 'c', '.', 'X11_COLORS', '[', '125', ':', '-', '325', ']', 'random', '.', 'shuffle', '(', 'adjusted_colors', ')', 'backup_colors', '=', 'adjusted_colors', '[', ':', ']', 'for', 'role', ',', 'fields', 'in', 'sorted', '(', 'self', '.', 'report', '[', '"roles"', ']', '.', 'iteritems', '(', ')', ')', ':', 'name', '=', 'utils', '.', 'normalize_role', '(', 'role', ',', 'self', '.', 'config', ')', 'color_length', '=', 'len', '(', 'adjusted_colors', ')', '-', '1', '# reset the colors if we run out', 'if', 'color_length', '==', '0', ':', 'adjusted_colors', '=', 'backup_colors', '[', ':', ']', 'color_length', '=', 'len', '(', 'adjusted_colors', ')', '-', '1', 'random_index', '=', 'random', '.', 'randint', '(', '1', ',', 'color_length', ')', 'roles_list', '+=', '" role_{0} [label = \\"{1}\\"]\\n"', '.', 'format', '(', 're', '.', 'sub', '(', "r'[.-/]'", ',', "'_'", ',', 'name', ')', ',', 'name', ')', 'edge', '=', '\'\\n edge [color = "{0}"];\\n\'', '.', 'format', '(', 'adjusted_colors', '[', 'random_index', ']', ')', 'del', 'adjusted_colors', '[', 'random_index', ']', 'if', 'fields', '[', '"dependencies"', ']', ':', 'dependencies', '=', '""', 'for', 'dependency', 'in', 'sorted', '(', 'fields', '[', '"dependencies"', ']', ')', ':', 'dependency_name', '=', 'utils', '.', 'role_name', '(', 'dependency', ')', 'dependencies', '+=', '" role_{0} -> role_{1}\\n"', '.', 'format', '(', 're', '.', 'sub', '(', "r'[.-/]'", ',', "'_'", ',', 'name', ')', ',', 're', '.', 'sub', '(', "r'[.-/]'", ',', "'_'", ',', 'utils', '.', 'normalize_role', '(', 'dependency_name', ',', 'self', '.', 'config', ')', ')', ')', 'edges', '+=', '"{0}{1}\\n"', '.', 'format', '(', 'edge', ',', 'dependencies', ')', 'graphviz_template', '=', 'default_graphviz_template', '.', 'replace', '(', '"%roles_list"', ',', 'roles_list', ')', 'graphviz_template', '=', 'graphviz_template', '.', 'replace', '(', '"%dependencies"', ',', 'edges', ')', 'graphviz_template', '=', 'graphviz_template', '.', 'replace', '(', '"%size"', ',', 'self', '.', 'size', ')', 'graphviz_template', '=', 'graphviz_template', '.', 'replace', '(', '"%dpi"', ',', 'str', '(', 'self', '.', 'dpi', ')', ')', 'if', 'self', '.', 'out_file', ':', 'utils', '.', 'string_to_file', '(', 'self', '.', 'out_file', ',', 'graphviz_template', ')', 'else', ':', 'print', 'graphviz_template']
Export a graph of the data in dot format.
['Export', 'a', 'graph', 'of', 'the', 'data', 'in', 'dot', 'format', '.']
train
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/export.py#L82-L161
3,945
KrzyHonk/bpmn-python
bpmn_python/graph/classes/events/catch_event_type.py
CatchEvent.set_parallel_multiple
def set_parallel_multiple(self, value): """ Setter for 'parallel_multiple' field. :param value - a new value of 'parallel_multiple' field. Must be a boolean type. Does not accept None value. """ if value is None or not isinstance(value, bool): raise TypeError("ParallelMultiple must be set to a bool") else: self.__parallel_multiple = value
python
def set_parallel_multiple(self, value): """ Setter for 'parallel_multiple' field. :param value - a new value of 'parallel_multiple' field. Must be a boolean type. Does not accept None value. """ if value is None or not isinstance(value, bool): raise TypeError("ParallelMultiple must be set to a bool") else: self.__parallel_multiple = value
['def', 'set_parallel_multiple', '(', 'self', ',', 'value', ')', ':', 'if', 'value', 'is', 'None', 'or', 'not', 'isinstance', '(', 'value', ',', 'bool', ')', ':', 'raise', 'TypeError', '(', '"ParallelMultiple must be set to a bool"', ')', 'else', ':', 'self', '.', '__parallel_multiple', '=', 'value']
Setter for 'parallel_multiple' field. :param value - a new value of 'parallel_multiple' field. Must be a boolean type. Does not accept None value.
['Setter', 'for', 'parallel_multiple', 'field', '.', ':', 'param', 'value', '-', 'a', 'new', 'value', 'of', 'parallel_multiple', 'field', '.', 'Must', 'be', 'a', 'boolean', 'type', '.', 'Does', 'not', 'accept', 'None', 'value', '.']
train
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/graph/classes/events/catch_event_type.py#L32-L40
3,946
numenta/htmresearch
htmresearch/frameworks/sp_paper/sp_metrics.py
binaryEntropyVectorized
def binaryEntropyVectorized(x): """ Calculate entropy for a list of binary random variables :param x: (numpy array) the probability of the variable to be 1. :return: entropy: (numpy array) entropy """ entropy = - x*np.log2(x) - (1-x)*np.log2(1-x) entropy[x*(1 - x) == 0] = 0 return entropy
python
def binaryEntropyVectorized(x): """ Calculate entropy for a list of binary random variables :param x: (numpy array) the probability of the variable to be 1. :return: entropy: (numpy array) entropy """ entropy = - x*np.log2(x) - (1-x)*np.log2(1-x) entropy[x*(1 - x) == 0] = 0 return entropy
['def', 'binaryEntropyVectorized', '(', 'x', ')', ':', 'entropy', '=', '-', 'x', '*', 'np', '.', 'log2', '(', 'x', ')', '-', '(', '1', '-', 'x', ')', '*', 'np', '.', 'log2', '(', '1', '-', 'x', ')', 'entropy', '[', 'x', '*', '(', '1', '-', 'x', ')', '==', '0', ']', '=', '0', 'return', 'entropy']
Calculate entropy for a list of binary random variables :param x: (numpy array) the probability of the variable to be 1. :return: entropy: (numpy array) entropy
['Calculate', 'entropy', 'for', 'a', 'list', 'of', 'binary', 'random', 'variables', ':', 'param', 'x', ':', '(', 'numpy', 'array', ')', 'the', 'probability', 'of', 'the', 'variable', 'to', 'be', '1', '.', ':', 'return', ':', 'entropy', ':', '(', 'numpy', 'array', ')', 'entropy']
train
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L725-L733
3,947
chukysoria/pyspotify-connect
spotifyconnect/__init__.py
_setup_logging
def _setup_logging(): """Setup logging to log to nowhere by default. For details, see: http://docs.python.org/3/howto/logging.html#library-config Internal function. """ import logging logger = logging.getLogger('spotify-connect') handler = logging.NullHandler() logger.addHandler(handler)
python
def _setup_logging(): """Setup logging to log to nowhere by default. For details, see: http://docs.python.org/3/howto/logging.html#library-config Internal function. """ import logging logger = logging.getLogger('spotify-connect') handler = logging.NullHandler() logger.addHandler(handler)
['def', '_setup_logging', '(', ')', ':', 'import', 'logging', 'logger', '=', 'logging', '.', 'getLogger', '(', "'spotify-connect'", ')', 'handler', '=', 'logging', '.', 'NullHandler', '(', ')', 'logger', '.', 'addHandler', '(', 'handler', ')']
Setup logging to log to nowhere by default. For details, see: http://docs.python.org/3/howto/logging.html#library-config Internal function.
['Setup', 'logging', 'to', 'log', 'to', 'nowhere', 'by', 'default', '.']
train
https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/__init__.py#L19-L31
3,948
sibirrer/lenstronomy
lenstronomy/Util/prob_density.py
SkewGaussian.pdf_new
def pdf_new(self, x, mu, sigma, skw): """ function with different parameterisation :param x: :param mu: mean :param sigma: sigma :param skw: skewness :return: """ if skw > 1 or skw < -1: print("skewness %s out of range" % skw) skw = 1. e, w, a = self.map_mu_sigma_skw(mu, sigma, skw) pdf = self.pdf(x, e, w, a) return pdf
python
def pdf_new(self, x, mu, sigma, skw): """ function with different parameterisation :param x: :param mu: mean :param sigma: sigma :param skw: skewness :return: """ if skw > 1 or skw < -1: print("skewness %s out of range" % skw) skw = 1. e, w, a = self.map_mu_sigma_skw(mu, sigma, skw) pdf = self.pdf(x, e, w, a) return pdf
['def', 'pdf_new', '(', 'self', ',', 'x', ',', 'mu', ',', 'sigma', ',', 'skw', ')', ':', 'if', 'skw', '>', '1', 'or', 'skw', '<', '-', '1', ':', 'print', '(', '"skewness %s out of range"', '%', 'skw', ')', 'skw', '=', '1.', 'e', ',', 'w', ',', 'a', '=', 'self', '.', 'map_mu_sigma_skw', '(', 'mu', ',', 'sigma', ',', 'skw', ')', 'pdf', '=', 'self', '.', 'pdf', '(', 'x', ',', 'e', ',', 'w', ',', 'a', ')', 'return', 'pdf']
function with different parameterisation :param x: :param mu: mean :param sigma: sigma :param skw: skewness :return:
['function', 'with', 'different', 'parameterisation', ':', 'param', 'x', ':', ':', 'param', 'mu', ':', 'mean', ':', 'param', 'sigma', ':', 'sigma', ':', 'param', 'skw', ':', 'skewness', ':', 'return', ':']
train
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Util/prob_density.py#L25-L39
3,949
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py
brocade_ras_ext.show_support_save_status_output_show_support_save_status_message
def show_support_save_status_output_show_support_save_status_message(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_support_save_status = ET.Element("show_support_save_status") config = show_support_save_status output = ET.SubElement(show_support_save_status, "output") show_support_save_status = ET.SubElement(output, "show-support-save-status") message = ET.SubElement(show_support_save_status, "message") message.text = kwargs.pop('message') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def show_support_save_status_output_show_support_save_status_message(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_support_save_status = ET.Element("show_support_save_status") config = show_support_save_status output = ET.SubElement(show_support_save_status, "output") show_support_save_status = ET.SubElement(output, "show-support-save-status") message = ET.SubElement(show_support_save_status, "message") message.text = kwargs.pop('message') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'show_support_save_status_output_show_support_save_status_message', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'show_support_save_status', '=', 'ET', '.', 'Element', '(', '"show_support_save_status"', ')', 'config', '=', 'show_support_save_status', 'output', '=', 'ET', '.', 'SubElement', '(', 'show_support_save_status', ',', '"output"', ')', 'show_support_save_status', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"show-support-save-status"', ')', 'message', '=', 'ET', '.', 'SubElement', '(', 'show_support_save_status', ',', '"message"', ')', 'message', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'message'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py#L255-L267
3,950
Faylixe/pygame_vkeyboard
pygame_vkeyboard/vkeyboard.py
VKeyboardLayout.set_uppercase
def set_uppercase(self, uppercase): """Sets layout uppercase state. :param uppercase: True if uppercase, False otherwise. """ for row in self.rows: for key in row.keys: if type(key) == VKey: if uppercase: key.value = key.value.upper() else: key.value = key.value.lower()
python
def set_uppercase(self, uppercase): """Sets layout uppercase state. :param uppercase: True if uppercase, False otherwise. """ for row in self.rows: for key in row.keys: if type(key) == VKey: if uppercase: key.value = key.value.upper() else: key.value = key.value.lower()
['def', 'set_uppercase', '(', 'self', ',', 'uppercase', ')', ':', 'for', 'row', 'in', 'self', '.', 'rows', ':', 'for', 'key', 'in', 'row', '.', 'keys', ':', 'if', 'type', '(', 'key', ')', '==', 'VKey', ':', 'if', 'uppercase', ':', 'key', '.', 'value', '=', 'key', '.', 'value', '.', 'upper', '(', ')', 'else', ':', 'key', '.', 'value', '=', 'key', '.', 'value', '.', 'lower', '(', ')']
Sets layout uppercase state. :param uppercase: True if uppercase, False otherwise.
['Sets', 'layout', 'uppercase', 'state', '.']
train
https://github.com/Faylixe/pygame_vkeyboard/blob/72753a47b4d1d8bf22c9c51ca877aef742481d2a/pygame_vkeyboard/vkeyboard.py#L500-L511
3,951
wmayner/pyphi
pyphi/actual.py
Transition.potential_purviews
def potential_purviews(self, direction, mechanism, purviews=False): """Return all purviews that could belong to the |MIC|/|MIE|. Filters out trivially-reducible purviews. Args: direction (str): Either |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism of interest. Keyword Args: purviews (tuple[int]): Optional subset of purviews of interest. """ system = self.system[direction] return [ purview for purview in system.potential_purviews( direction, mechanism, purviews) if set(purview).issubset(self.purview_indices(direction)) ]
python
def potential_purviews(self, direction, mechanism, purviews=False): """Return all purviews that could belong to the |MIC|/|MIE|. Filters out trivially-reducible purviews. Args: direction (str): Either |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism of interest. Keyword Args: purviews (tuple[int]): Optional subset of purviews of interest. """ system = self.system[direction] return [ purview for purview in system.potential_purviews( direction, mechanism, purviews) if set(purview).issubset(self.purview_indices(direction)) ]
['def', 'potential_purviews', '(', 'self', ',', 'direction', ',', 'mechanism', ',', 'purviews', '=', 'False', ')', ':', 'system', '=', 'self', '.', 'system', '[', 'direction', ']', 'return', '[', 'purview', 'for', 'purview', 'in', 'system', '.', 'potential_purviews', '(', 'direction', ',', 'mechanism', ',', 'purviews', ')', 'if', 'set', '(', 'purview', ')', '.', 'issubset', '(', 'self', '.', 'purview_indices', '(', 'direction', ')', ')', ']']
Return all purviews that could belong to the |MIC|/|MIE|. Filters out trivially-reducible purviews. Args: direction (str): Either |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism of interest. Keyword Args: purviews (tuple[int]): Optional subset of purviews of interest.
['Return', 'all', 'purviews', 'that', 'could', 'belong', 'to', 'the', '|MIC|', '/', '|MIE|', '.']
train
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/actual.py#L366-L383
3,952
smarie/python-valid8
valid8/entry_points.py
get_none_policy_text
def get_none_policy_text(none_policy, # type: int verbose=False # type: bool ): """ Returns a user-friendly description of a NonePolicy taking into account NoneArgPolicy :param none_policy: :param verbose: :return: """ if none_policy is NonePolicy.SKIP: return "accept None without performing validation" if verbose else 'SKIP' elif none_policy is NonePolicy.FAIL: return "fail on None without performing validation" if verbose else 'FAIL' elif none_policy is NonePolicy.VALIDATE: return "validate None as any other values" if verbose else 'VALIDATE' elif none_policy is NoneArgPolicy.SKIP_IF_NONABLE_ELSE_FAIL: return "accept None without validation if the argument is optional, otherwise fail on None" if verbose \ else 'SKIP_IF_NONABLE_ELSE_FAIL' elif none_policy is NoneArgPolicy.SKIP_IF_NONABLE_ELSE_VALIDATE: return "accept None without validation if the argument is optional, otherwise validate None as any other " \ "values" if verbose else 'SKIP_IF_NONABLE_ELSE_VALIDATE' else: raise ValueError('Invalid none_policy ' + str(none_policy))
python
def get_none_policy_text(none_policy, # type: int verbose=False # type: bool ): """ Returns a user-friendly description of a NonePolicy taking into account NoneArgPolicy :param none_policy: :param verbose: :return: """ if none_policy is NonePolicy.SKIP: return "accept None without performing validation" if verbose else 'SKIP' elif none_policy is NonePolicy.FAIL: return "fail on None without performing validation" if verbose else 'FAIL' elif none_policy is NonePolicy.VALIDATE: return "validate None as any other values" if verbose else 'VALIDATE' elif none_policy is NoneArgPolicy.SKIP_IF_NONABLE_ELSE_FAIL: return "accept None without validation if the argument is optional, otherwise fail on None" if verbose \ else 'SKIP_IF_NONABLE_ELSE_FAIL' elif none_policy is NoneArgPolicy.SKIP_IF_NONABLE_ELSE_VALIDATE: return "accept None without validation if the argument is optional, otherwise validate None as any other " \ "values" if verbose else 'SKIP_IF_NONABLE_ELSE_VALIDATE' else: raise ValueError('Invalid none_policy ' + str(none_policy))
['def', 'get_none_policy_text', '(', 'none_policy', ',', '# type: int', 'verbose', '=', 'False', '# type: bool', ')', ':', 'if', 'none_policy', 'is', 'NonePolicy', '.', 'SKIP', ':', 'return', '"accept None without performing validation"', 'if', 'verbose', 'else', "'SKIP'", 'elif', 'none_policy', 'is', 'NonePolicy', '.', 'FAIL', ':', 'return', '"fail on None without performing validation"', 'if', 'verbose', 'else', "'FAIL'", 'elif', 'none_policy', 'is', 'NonePolicy', '.', 'VALIDATE', ':', 'return', '"validate None as any other values"', 'if', 'verbose', 'else', "'VALIDATE'", 'elif', 'none_policy', 'is', 'NoneArgPolicy', '.', 'SKIP_IF_NONABLE_ELSE_FAIL', ':', 'return', '"accept None without validation if the argument is optional, otherwise fail on None"', 'if', 'verbose', 'else', "'SKIP_IF_NONABLE_ELSE_FAIL'", 'elif', 'none_policy', 'is', 'NoneArgPolicy', '.', 'SKIP_IF_NONABLE_ELSE_VALIDATE', ':', 'return', '"accept None without validation if the argument is optional, otherwise validate None as any other "', '"values"', 'if', 'verbose', 'else', "'SKIP_IF_NONABLE_ELSE_VALIDATE'", 'else', ':', 'raise', 'ValueError', '(', "'Invalid none_policy '", '+', 'str', '(', 'none_policy', ')', ')']
Returns a user-friendly description of a NonePolicy taking into account NoneArgPolicy :param none_policy: :param verbose: :return:
['Returns', 'a', 'user', '-', 'friendly', 'description', 'of', 'a', 'NonePolicy', 'taking', 'into', 'account', 'NoneArgPolicy']
train
https://github.com/smarie/python-valid8/blob/5e15d1de11602933c5114eb9f73277ad91d97800/valid8/entry_points.py#L53-L76
3,953
quantumlib/Cirq
cirq/circuits/circuit.py
Circuit.findall_operations_between
def findall_operations_between(self, start_frontier: Dict[ops.Qid, int], end_frontier: Dict[ops.Qid, int], omit_crossing_operations: bool = False ) -> List[Tuple[int, ops.Operation]]: """Finds operations between the two given frontiers. If a qubit is in `start_frontier` but not `end_frontier`, its end index defaults to the end of the circuit. If a qubit is in `end_frontier` but not `start_frontier`, its start index defaults to the start of the circuit. Operations on qubits not mentioned in either frontier are not included in the results. Args: start_frontier: Just before where to start searching for operations, for each qubit of interest. Start frontier indices are inclusive. end_frontier: Just before where to stop searching for operations, for each qubit of interest. End frontier indices are exclusive. omit_crossing_operations: Determines whether or not operations that cross from a location between the two frontiers to a location outside the two frontiers are included or excluded. (Operations completely inside are always included, and operations completely outside are always excluded.) Returns: A list of tuples. Each tuple describes an operation found between the two frontiers. The first item of each tuple is the index of the moment containing the operation, and the second item is the operation itself. The list is sorted so that the moment index increases monotonically. """ result = BucketPriorityQueue[ops.Operation]( drop_duplicate_entries=True) involved_qubits = set(start_frontier.keys()) | set(end_frontier.keys()) # Note: only sorted to ensure a deterministic result ordering. for q in sorted(involved_qubits): for i in range(start_frontier.get(q, 0), end_frontier.get(q, len(self))): op = self.operation_at(q, i) if op is None: continue if (omit_crossing_operations and not involved_qubits.issuperset(op.qubits)): continue result.enqueue(i, op) return list(result)
python
def findall_operations_between(self, start_frontier: Dict[ops.Qid, int], end_frontier: Dict[ops.Qid, int], omit_crossing_operations: bool = False ) -> List[Tuple[int, ops.Operation]]: """Finds operations between the two given frontiers. If a qubit is in `start_frontier` but not `end_frontier`, its end index defaults to the end of the circuit. If a qubit is in `end_frontier` but not `start_frontier`, its start index defaults to the start of the circuit. Operations on qubits not mentioned in either frontier are not included in the results. Args: start_frontier: Just before where to start searching for operations, for each qubit of interest. Start frontier indices are inclusive. end_frontier: Just before where to stop searching for operations, for each qubit of interest. End frontier indices are exclusive. omit_crossing_operations: Determines whether or not operations that cross from a location between the two frontiers to a location outside the two frontiers are included or excluded. (Operations completely inside are always included, and operations completely outside are always excluded.) Returns: A list of tuples. Each tuple describes an operation found between the two frontiers. The first item of each tuple is the index of the moment containing the operation, and the second item is the operation itself. The list is sorted so that the moment index increases monotonically. """ result = BucketPriorityQueue[ops.Operation]( drop_duplicate_entries=True) involved_qubits = set(start_frontier.keys()) | set(end_frontier.keys()) # Note: only sorted to ensure a deterministic result ordering. for q in sorted(involved_qubits): for i in range(start_frontier.get(q, 0), end_frontier.get(q, len(self))): op = self.operation_at(q, i) if op is None: continue if (omit_crossing_operations and not involved_qubits.issuperset(op.qubits)): continue result.enqueue(i, op) return list(result)
['def', 'findall_operations_between', '(', 'self', ',', 'start_frontier', ':', 'Dict', '[', 'ops', '.', 'Qid', ',', 'int', ']', ',', 'end_frontier', ':', 'Dict', '[', 'ops', '.', 'Qid', ',', 'int', ']', ',', 'omit_crossing_operations', ':', 'bool', '=', 'False', ')', '->', 'List', '[', 'Tuple', '[', 'int', ',', 'ops', '.', 'Operation', ']', ']', ':', 'result', '=', 'BucketPriorityQueue', '[', 'ops', '.', 'Operation', ']', '(', 'drop_duplicate_entries', '=', 'True', ')', 'involved_qubits', '=', 'set', '(', 'start_frontier', '.', 'keys', '(', ')', ')', '|', 'set', '(', 'end_frontier', '.', 'keys', '(', ')', ')', '# Note: only sorted to ensure a deterministic result ordering.', 'for', 'q', 'in', 'sorted', '(', 'involved_qubits', ')', ':', 'for', 'i', 'in', 'range', '(', 'start_frontier', '.', 'get', '(', 'q', ',', '0', ')', ',', 'end_frontier', '.', 'get', '(', 'q', ',', 'len', '(', 'self', ')', ')', ')', ':', 'op', '=', 'self', '.', 'operation_at', '(', 'q', ',', 'i', ')', 'if', 'op', 'is', 'None', ':', 'continue', 'if', '(', 'omit_crossing_operations', 'and', 'not', 'involved_qubits', '.', 'issuperset', '(', 'op', '.', 'qubits', ')', ')', ':', 'continue', 'result', '.', 'enqueue', '(', 'i', ',', 'op', ')', 'return', 'list', '(', 'result', ')']
Finds operations between the two given frontiers. If a qubit is in `start_frontier` but not `end_frontier`, its end index defaults to the end of the circuit. If a qubit is in `end_frontier` but not `start_frontier`, its start index defaults to the start of the circuit. Operations on qubits not mentioned in either frontier are not included in the results. Args: start_frontier: Just before where to start searching for operations, for each qubit of interest. Start frontier indices are inclusive. end_frontier: Just before where to stop searching for operations, for each qubit of interest. End frontier indices are exclusive. omit_crossing_operations: Determines whether or not operations that cross from a location between the two frontiers to a location outside the two frontiers are included or excluded. (Operations completely inside are always included, and operations completely outside are always excluded.) Returns: A list of tuples. Each tuple describes an operation found between the two frontiers. The first item of each tuple is the index of the moment containing the operation, and the second item is the operation itself. The list is sorted so that the moment index increases monotonically.
['Finds', 'operations', 'between', 'the', 'two', 'given', 'frontiers', '.']
train
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/circuit.py#L623-L671
3,954
csaez/wishlib
wishlib/qt/helpers.py
wrapinstance
def wrapinstance(ptr, base=None): """convert a pointer to a Qt class instance (PySide/PyQt compatible)""" if ptr is None: return None ptr = long(ptr) # Ensure type from wishlib.qt import active, QtCore, QtGui if active == "PySide": import shiboken if base is None: qObj = shiboken.wrapInstance(ptr, QtCore.QObject) metaObj = qObj.metaObject() cls = metaObj.className() superCls = metaObj.superClass().className() if hasattr(QtGui, cls): base = getattr(QtGui, cls) elif hasattr(QtGui, superCls): base = getattr(QtGui, superCls) else: base = QtGui.QWidget return shiboken.wrapInstance(ptr, base) elif active == "PyQt4": import sip return sip.wrapinstance(ptr, QtGui.QWidget) return None
python
def wrapinstance(ptr, base=None): """convert a pointer to a Qt class instance (PySide/PyQt compatible)""" if ptr is None: return None ptr = long(ptr) # Ensure type from wishlib.qt import active, QtCore, QtGui if active == "PySide": import shiboken if base is None: qObj = shiboken.wrapInstance(ptr, QtCore.QObject) metaObj = qObj.metaObject() cls = metaObj.className() superCls = metaObj.superClass().className() if hasattr(QtGui, cls): base = getattr(QtGui, cls) elif hasattr(QtGui, superCls): base = getattr(QtGui, superCls) else: base = QtGui.QWidget return shiboken.wrapInstance(ptr, base) elif active == "PyQt4": import sip return sip.wrapinstance(ptr, QtGui.QWidget) return None
['def', 'wrapinstance', '(', 'ptr', ',', 'base', '=', 'None', ')', ':', 'if', 'ptr', 'is', 'None', ':', 'return', 'None', 'ptr', '=', 'long', '(', 'ptr', ')', '# Ensure type', 'from', 'wishlib', '.', 'qt', 'import', 'active', ',', 'QtCore', ',', 'QtGui', 'if', 'active', '==', '"PySide"', ':', 'import', 'shiboken', 'if', 'base', 'is', 'None', ':', 'qObj', '=', 'shiboken', '.', 'wrapInstance', '(', 'ptr', ',', 'QtCore', '.', 'QObject', ')', 'metaObj', '=', 'qObj', '.', 'metaObject', '(', ')', 'cls', '=', 'metaObj', '.', 'className', '(', ')', 'superCls', '=', 'metaObj', '.', 'superClass', '(', ')', '.', 'className', '(', ')', 'if', 'hasattr', '(', 'QtGui', ',', 'cls', ')', ':', 'base', '=', 'getattr', '(', 'QtGui', ',', 'cls', ')', 'elif', 'hasattr', '(', 'QtGui', ',', 'superCls', ')', ':', 'base', '=', 'getattr', '(', 'QtGui', ',', 'superCls', ')', 'else', ':', 'base', '=', 'QtGui', '.', 'QWidget', 'return', 'shiboken', '.', 'wrapInstance', '(', 'ptr', ',', 'base', ')', 'elif', 'active', '==', '"PyQt4"', ':', 'import', 'sip', 'return', 'sip', '.', 'wrapinstance', '(', 'ptr', ',', 'QtGui', '.', 'QWidget', ')', 'return', 'None']
convert a pointer to a Qt class instance (PySide/PyQt compatible)
['convert', 'a', 'pointer', 'to', 'a', 'Qt', 'class', 'instance', '(', 'PySide', '/', 'PyQt', 'compatible', ')']
train
https://github.com/csaez/wishlib/blob/c212fa7875006a332a4cefbf69885ced9647bc2f/wishlib/qt/helpers.py#L98-L121
3,955
tensorflow/tensor2tensor
tensor2tensor/layers/modalities.py
image_top
def image_top(body_output, targets, model_hparams, vocab_size): """Top transformation for images.""" del targets # unused arg # TODO(lukaszkaiser): is this a universal enough way to get channels? num_channels = model_hparams.problem.num_channels with tf.variable_scope("rgb_softmax"): body_output_shape = common_layers.shape_list(body_output) reshape_shape = body_output_shape[:3] reshape_shape.extend([num_channels, vocab_size]) res = tf.layers.dense(body_output, vocab_size * num_channels) res = tf.reshape(res, reshape_shape) if not tf.get_variable_scope().reuse: res_argmax = tf.argmax(res, axis=-1) tf.summary.image( "result", common_layers.tpu_safe_image_summary(res_argmax), max_outputs=1) return res
python
def image_top(body_output, targets, model_hparams, vocab_size): """Top transformation for images.""" del targets # unused arg # TODO(lukaszkaiser): is this a universal enough way to get channels? num_channels = model_hparams.problem.num_channels with tf.variable_scope("rgb_softmax"): body_output_shape = common_layers.shape_list(body_output) reshape_shape = body_output_shape[:3] reshape_shape.extend([num_channels, vocab_size]) res = tf.layers.dense(body_output, vocab_size * num_channels) res = tf.reshape(res, reshape_shape) if not tf.get_variable_scope().reuse: res_argmax = tf.argmax(res, axis=-1) tf.summary.image( "result", common_layers.tpu_safe_image_summary(res_argmax), max_outputs=1) return res
['def', 'image_top', '(', 'body_output', ',', 'targets', ',', 'model_hparams', ',', 'vocab_size', ')', ':', 'del', 'targets', '# unused arg', '# TODO(lukaszkaiser): is this a universal enough way to get channels?', 'num_channels', '=', 'model_hparams', '.', 'problem', '.', 'num_channels', 'with', 'tf', '.', 'variable_scope', '(', '"rgb_softmax"', ')', ':', 'body_output_shape', '=', 'common_layers', '.', 'shape_list', '(', 'body_output', ')', 'reshape_shape', '=', 'body_output_shape', '[', ':', '3', ']', 'reshape_shape', '.', 'extend', '(', '[', 'num_channels', ',', 'vocab_size', ']', ')', 'res', '=', 'tf', '.', 'layers', '.', 'dense', '(', 'body_output', ',', 'vocab_size', '*', 'num_channels', ')', 'res', '=', 'tf', '.', 'reshape', '(', 'res', ',', 'reshape_shape', ')', 'if', 'not', 'tf', '.', 'get_variable_scope', '(', ')', '.', 'reuse', ':', 'res_argmax', '=', 'tf', '.', 'argmax', '(', 'res', ',', 'axis', '=', '-', '1', ')', 'tf', '.', 'summary', '.', 'image', '(', '"result"', ',', 'common_layers', '.', 'tpu_safe_image_summary', '(', 'res_argmax', ')', ',', 'max_outputs', '=', '1', ')', 'return', 'res']
Top transformation for images.
['Top', 'transformation', 'for', 'images', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/modalities.py#L955-L972
3,956
twisted/mantissa
xmantissa/people.py
PhoneNumberContactType.createContactItem
def createContactItem(self, person, label, number): """ Create a L{PhoneNumber} item for C{number}, associated with C{person}. @type person: L{Person} @param label: The value to use for the I{label} attribute of the new L{PhoneNumber} item. @type label: C{unicode} @param number: The value to use for the I{number} attribute of the new L{PhoneNumber} item. If C{''}, no item will be created. @type number: C{unicode} @rtype: L{PhoneNumber} or C{NoneType} """ if number: return PhoneNumber( store=person.store, person=person, label=label, number=number)
python
def createContactItem(self, person, label, number): """ Create a L{PhoneNumber} item for C{number}, associated with C{person}. @type person: L{Person} @param label: The value to use for the I{label} attribute of the new L{PhoneNumber} item. @type label: C{unicode} @param number: The value to use for the I{number} attribute of the new L{PhoneNumber} item. If C{''}, no item will be created. @type number: C{unicode} @rtype: L{PhoneNumber} or C{NoneType} """ if number: return PhoneNumber( store=person.store, person=person, label=label, number=number)
['def', 'createContactItem', '(', 'self', ',', 'person', ',', 'label', ',', 'number', ')', ':', 'if', 'number', ':', 'return', 'PhoneNumber', '(', 'store', '=', 'person', '.', 'store', ',', 'person', '=', 'person', ',', 'label', '=', 'label', ',', 'number', '=', 'number', ')']
Create a L{PhoneNumber} item for C{number}, associated with C{person}. @type person: L{Person} @param label: The value to use for the I{label} attribute of the new L{PhoneNumber} item. @type label: C{unicode} @param number: The value to use for the I{number} attribute of the new L{PhoneNumber} item. If C{''}, no item will be created. @type number: C{unicode} @rtype: L{PhoneNumber} or C{NoneType}
['Create', 'a', 'L', '{', 'PhoneNumber', '}', 'item', 'for', 'C', '{', 'number', '}', 'associated', 'with', 'C', '{', 'person', '}', '.']
train
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/people.py#L2016-L2034
3,957
oceanprotocol/squid-py
squid_py/agreements/register_service_agreement.py
execute_pending_service_agreements
def execute_pending_service_agreements(storage_path, account, actor_type, did_resolver_fn): """ Iterates over pending service agreements recorded in the local storage, fetches their service definitions, and subscribes to service agreement events. :param storage_path: storage path for the internal db, str :param account: :param actor_type: :param did_resolver_fn: :return: """ keeper = Keeper.get_instance() # service_agreement_id, did, service_definition_id, price, files, start_time, status for (agreement_id, did, _, price, files, start_time, _) in get_service_agreements(storage_path): ddo = did_resolver_fn(did) for service in ddo.services: if service.type != 'Access': continue consumer_provider_tuple = keeper.escrow_access_secretstore_template.get_agreement_data( agreement_id) if not consumer_provider_tuple: continue consumer, provider = consumer_provider_tuple did = ddo.did service_agreement = ServiceAgreement.from_service_dict(service.as_dictionary()) condition_ids = service_agreement.generate_agreement_condition_ids( agreement_id, did, consumer, provider, keeper) if actor_type == 'consumer': assert account.address == consumer process_agreement_events_consumer( provider, agreement_id, did, service_agreement, price, account, condition_ids, None) else: assert account.address == provider process_agreement_events_publisher( account, agreement_id, did, service_agreement, price, consumer, condition_ids)
python
def execute_pending_service_agreements(storage_path, account, actor_type, did_resolver_fn): """ Iterates over pending service agreements recorded in the local storage, fetches their service definitions, and subscribes to service agreement events. :param storage_path: storage path for the internal db, str :param account: :param actor_type: :param did_resolver_fn: :return: """ keeper = Keeper.get_instance() # service_agreement_id, did, service_definition_id, price, files, start_time, status for (agreement_id, did, _, price, files, start_time, _) in get_service_agreements(storage_path): ddo = did_resolver_fn(did) for service in ddo.services: if service.type != 'Access': continue consumer_provider_tuple = keeper.escrow_access_secretstore_template.get_agreement_data( agreement_id) if not consumer_provider_tuple: continue consumer, provider = consumer_provider_tuple did = ddo.did service_agreement = ServiceAgreement.from_service_dict(service.as_dictionary()) condition_ids = service_agreement.generate_agreement_condition_ids( agreement_id, did, consumer, provider, keeper) if actor_type == 'consumer': assert account.address == consumer process_agreement_events_consumer( provider, agreement_id, did, service_agreement, price, account, condition_ids, None) else: assert account.address == provider process_agreement_events_publisher( account, agreement_id, did, service_agreement, price, consumer, condition_ids)
['def', 'execute_pending_service_agreements', '(', 'storage_path', ',', 'account', ',', 'actor_type', ',', 'did_resolver_fn', ')', ':', 'keeper', '=', 'Keeper', '.', 'get_instance', '(', ')', '# service_agreement_id, did, service_definition_id, price, files, start_time, status', 'for', '(', 'agreement_id', ',', 'did', ',', '_', ',', 'price', ',', 'files', ',', 'start_time', ',', '_', ')', 'in', 'get_service_agreements', '(', 'storage_path', ')', ':', 'ddo', '=', 'did_resolver_fn', '(', 'did', ')', 'for', 'service', 'in', 'ddo', '.', 'services', ':', 'if', 'service', '.', 'type', '!=', "'Access'", ':', 'continue', 'consumer_provider_tuple', '=', 'keeper', '.', 'escrow_access_secretstore_template', '.', 'get_agreement_data', '(', 'agreement_id', ')', 'if', 'not', 'consumer_provider_tuple', ':', 'continue', 'consumer', ',', 'provider', '=', 'consumer_provider_tuple', 'did', '=', 'ddo', '.', 'did', 'service_agreement', '=', 'ServiceAgreement', '.', 'from_service_dict', '(', 'service', '.', 'as_dictionary', '(', ')', ')', 'condition_ids', '=', 'service_agreement', '.', 'generate_agreement_condition_ids', '(', 'agreement_id', ',', 'did', ',', 'consumer', ',', 'provider', ',', 'keeper', ')', 'if', 'actor_type', '==', "'consumer'", ':', 'assert', 'account', '.', 'address', '==', 'consumer', 'process_agreement_events_consumer', '(', 'provider', ',', 'agreement_id', ',', 'did', ',', 'service_agreement', ',', 'price', ',', 'account', ',', 'condition_ids', ',', 'None', ')', 'else', ':', 'assert', 'account', '.', 'address', '==', 'provider', 'process_agreement_events_publisher', '(', 'account', ',', 'agreement_id', ',', 'did', ',', 'service_agreement', ',', 'price', ',', 'consumer', ',', 'condition_ids', ')']
Iterates over pending service agreements recorded in the local storage, fetches their service definitions, and subscribes to service agreement events. :param storage_path: storage path for the internal db, str :param account: :param actor_type: :param did_resolver_fn: :return:
['Iterates', 'over', 'pending', 'service', 'agreements', 'recorded', 'in', 'the', 'local', 'storage', 'fetches', 'their', 'service', 'definitions', 'and', 'subscribes', 'to', 'service', 'agreement', 'events', '.']
train
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/agreements/register_service_agreement.py#L174-L215
3,958
sarugaku/requirementslib
tasks/__init__.py
clean
def clean(ctx): """Clean previously built package artifacts. """ ctx.run(f"python setup.py clean") dist = ROOT.joinpath("dist") build = ROOT.joinpath("build") print(f"[clean] Removing {dist} and {build}") if dist.exists(): shutil.rmtree(str(dist)) if build.exists(): shutil.rmtree(str(build))
python
def clean(ctx): """Clean previously built package artifacts. """ ctx.run(f"python setup.py clean") dist = ROOT.joinpath("dist") build = ROOT.joinpath("build") print(f"[clean] Removing {dist} and {build}") if dist.exists(): shutil.rmtree(str(dist)) if build.exists(): shutil.rmtree(str(build))
['def', 'clean', '(', 'ctx', ')', ':', 'ctx', '.', 'run', '(', 'f"python setup.py clean"', ')', 'dist', '=', 'ROOT', '.', 'joinpath', '(', '"dist"', ')', 'build', '=', 'ROOT', '.', 'joinpath', '(', '"build"', ')', 'print', '(', 'f"[clean] Removing {dist} and {build}"', ')', 'if', 'dist', '.', 'exists', '(', ')', ':', 'shutil', '.', 'rmtree', '(', 'str', '(', 'dist', ')', ')', 'if', 'build', '.', 'exists', '(', ')', ':', 'shutil', '.', 'rmtree', '(', 'str', '(', 'build', ')', ')']
Clean previously built package artifacts.
['Clean', 'previously', 'built', 'package', 'artifacts', '.']
train
https://github.com/sarugaku/requirementslib/blob/de78a01e8abc1fc47155516a96008d97035e8063/tasks/__init__.py#L56-L66
3,959
SiLab-Bonn/basil
basil/HL/GPAC.py
GPAC.set_current
def set_current(self, channel, value, unit='A'): '''Setting current of current source ''' dac_offset = self._ch_cal[channel]['DAC']['offset'] dac_gain = self._ch_cal[channel]['DAC']['gain'] if unit == 'raw': value = value elif unit == 'A': value = int((-value * 1000000 - dac_offset) / dac_gain) # fix sign of output elif unit == 'mA': value = int((-value * 1000 - dac_offset) / dac_gain) # fix sign of output elif unit == 'uA': value = int((-value - dac_offset) / dac_gain) # fix sign of output else: raise TypeError("Invalid unit type.") self._set_dac_value(channel=channel, value=value)
python
def set_current(self, channel, value, unit='A'): '''Setting current of current source ''' dac_offset = self._ch_cal[channel]['DAC']['offset'] dac_gain = self._ch_cal[channel]['DAC']['gain'] if unit == 'raw': value = value elif unit == 'A': value = int((-value * 1000000 - dac_offset) / dac_gain) # fix sign of output elif unit == 'mA': value = int((-value * 1000 - dac_offset) / dac_gain) # fix sign of output elif unit == 'uA': value = int((-value - dac_offset) / dac_gain) # fix sign of output else: raise TypeError("Invalid unit type.") self._set_dac_value(channel=channel, value=value)
['def', 'set_current', '(', 'self', ',', 'channel', ',', 'value', ',', 'unit', '=', "'A'", ')', ':', 'dac_offset', '=', 'self', '.', '_ch_cal', '[', 'channel', ']', '[', "'DAC'", ']', '[', "'offset'", ']', 'dac_gain', '=', 'self', '.', '_ch_cal', '[', 'channel', ']', '[', "'DAC'", ']', '[', "'gain'", ']', 'if', 'unit', '==', "'raw'", ':', 'value', '=', 'value', 'elif', 'unit', '==', "'A'", ':', 'value', '=', 'int', '(', '(', '-', 'value', '*', '1000000', '-', 'dac_offset', ')', '/', 'dac_gain', ')', '# fix sign of output', 'elif', 'unit', '==', "'mA'", ':', 'value', '=', 'int', '(', '(', '-', 'value', '*', '1000', '-', 'dac_offset', ')', '/', 'dac_gain', ')', '# fix sign of output', 'elif', 'unit', '==', "'uA'", ':', 'value', '=', 'int', '(', '(', '-', 'value', '-', 'dac_offset', ')', '/', 'dac_gain', ')', '# fix sign of output', 'else', ':', 'raise', 'TypeError', '(', '"Invalid unit type."', ')', 'self', '.', '_set_dac_value', '(', 'channel', '=', 'channel', ',', 'value', '=', 'value', ')']
Setting current of current source
['Setting', 'current', 'of', 'current', 'source']
train
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L858-L874
3,960
CamDavidsonPilon/lifelines
lifelines/generate_datasets.py
right_censor_lifetimes
def right_censor_lifetimes(lifetimes, max_, min_=0): """ Right censor the deaths, uniformly lifetimes: (n,) array of positive random variables max_: the max time a censorship can occur min_: the min time a censorship can occur Returns The actual observations including uniform right censoring, and D_i (observed death or did not) I think this is deprecated """ n = lifetimes.shape[0] u = min_ + (max_ - min_) * random.rand(n) observations = np.minimum(u, lifetimes) return observations, lifetimes == observations
python
def right_censor_lifetimes(lifetimes, max_, min_=0): """ Right censor the deaths, uniformly lifetimes: (n,) array of positive random variables max_: the max time a censorship can occur min_: the min time a censorship can occur Returns The actual observations including uniform right censoring, and D_i (observed death or did not) I think this is deprecated """ n = lifetimes.shape[0] u = min_ + (max_ - min_) * random.rand(n) observations = np.minimum(u, lifetimes) return observations, lifetimes == observations
['def', 'right_censor_lifetimes', '(', 'lifetimes', ',', 'max_', ',', 'min_', '=', '0', ')', ':', 'n', '=', 'lifetimes', '.', 'shape', '[', '0', ']', 'u', '=', 'min_', '+', '(', 'max_', '-', 'min_', ')', '*', 'random', '.', 'rand', '(', 'n', ')', 'observations', '=', 'np', '.', 'minimum', '(', 'u', ',', 'lifetimes', ')', 'return', 'observations', ',', 'lifetimes', '==', 'observations']
Right censor the deaths, uniformly lifetimes: (n,) array of positive random variables max_: the max time a censorship can occur min_: the min time a censorship can occur Returns The actual observations including uniform right censoring, and D_i (observed death or did not) I think this is deprecated
['Right', 'censor', 'the', 'deaths', 'uniformly', 'lifetimes', ':', '(', 'n', ')', 'array', 'of', 'positive', 'random', 'variables', 'max_', ':', 'the', 'max', 'time', 'a', 'censorship', 'can', 'occur', 'min_', ':', 'the', 'min', 'time', 'a', 'censorship', 'can', 'occur']
train
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/generate_datasets.py#L130-L146
3,961
gccxml/pygccxml
pygccxml/declarations/type_traits.py
is_array
def is_array(type_): """returns True, if type represents C++ array type, False otherwise""" nake_type = remove_alias(type_) nake_type = remove_reference(nake_type) nake_type = remove_cv(nake_type) return isinstance(nake_type, cpptypes.array_t)
python
def is_array(type_): """returns True, if type represents C++ array type, False otherwise""" nake_type = remove_alias(type_) nake_type = remove_reference(nake_type) nake_type = remove_cv(nake_type) return isinstance(nake_type, cpptypes.array_t)
['def', 'is_array', '(', 'type_', ')', ':', 'nake_type', '=', 'remove_alias', '(', 'type_', ')', 'nake_type', '=', 'remove_reference', '(', 'nake_type', ')', 'nake_type', '=', 'remove_cv', '(', 'nake_type', ')', 'return', 'isinstance', '(', 'nake_type', ',', 'cpptypes', '.', 'array_t', ')']
returns True, if type represents C++ array type, False otherwise
['returns', 'True', 'if', 'type', 'represents', 'C', '++', 'array', 'type', 'False', 'otherwise']
train
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/type_traits.py#L280-L285
3,962
matrix-org/matrix-python-sdk
matrix_client/client.py
MatrixClient.stop_listener_thread
def stop_listener_thread(self): """ Stop listener thread running in the background """ if self.sync_thread: self.should_listen = False self.sync_thread.join() self.sync_thread = None
python
def stop_listener_thread(self): """ Stop listener thread running in the background """ if self.sync_thread: self.should_listen = False self.sync_thread.join() self.sync_thread = None
['def', 'stop_listener_thread', '(', 'self', ')', ':', 'if', 'self', '.', 'sync_thread', ':', 'self', '.', 'should_listen', '=', 'False', 'self', '.', 'sync_thread', '.', 'join', '(', ')', 'self', '.', 'sync_thread', '=', 'None']
Stop listener thread running in the background
['Stop', 'listener', 'thread', 'running', 'in', 'the', 'background']
train
https://github.com/matrix-org/matrix-python-sdk/blob/e734cce3ccd35f2d355c6a19a7a701033472498a/matrix_client/client.py#L533-L539
3,963
ecell/ecell4
ecell4/util/viz.py
plot_world_with_plotly
def plot_world_with_plotly(world, species_list=None, max_count=1000): """ Plot a World on IPython Notebook """ if isinstance(world, str): from .simulation import load_world world = load_world(world) if species_list is None: species_list = [sp.serial() for sp in world.list_species()] species_list.sort() import random from ecell4_base.core import Species positions = {} for serial in species_list: x, y, z = [], [], [] particles = world.list_particles_exact(Species(serial)) if max_count is not None and len(particles) > max_count: particles = random.sample(particles, max_count) for pid, p in particles: pos = p.position() x.append(pos[0]) y.append(pos[1]) z.append(pos[2]) positions[serial] = (x, y, z) import plotly import plotly.graph_objs as go plotly.offline.init_notebook_mode() marker = dict(size=6, line=dict(color='rgb(204, 204, 204)', width=1), opacity=0.9, symbol='circle') data = [] for serial, (x, y, z) in positions.items(): trace = go.Scatter3d( x=x, y=y, z=z, mode='markers', marker=marker, name=serial) data.append(trace) layout = go.Layout(margin=dict(l=0, r=0, b=0, t=0)) fig = go.Figure(data=data, layout=layout) plotly.offline.iplot(fig)
python
def plot_world_with_plotly(world, species_list=None, max_count=1000): """ Plot a World on IPython Notebook """ if isinstance(world, str): from .simulation import load_world world = load_world(world) if species_list is None: species_list = [sp.serial() for sp in world.list_species()] species_list.sort() import random from ecell4_base.core import Species positions = {} for serial in species_list: x, y, z = [], [], [] particles = world.list_particles_exact(Species(serial)) if max_count is not None and len(particles) > max_count: particles = random.sample(particles, max_count) for pid, p in particles: pos = p.position() x.append(pos[0]) y.append(pos[1]) z.append(pos[2]) positions[serial] = (x, y, z) import plotly import plotly.graph_objs as go plotly.offline.init_notebook_mode() marker = dict(size=6, line=dict(color='rgb(204, 204, 204)', width=1), opacity=0.9, symbol='circle') data = [] for serial, (x, y, z) in positions.items(): trace = go.Scatter3d( x=x, y=y, z=z, mode='markers', marker=marker, name=serial) data.append(trace) layout = go.Layout(margin=dict(l=0, r=0, b=0, t=0)) fig = go.Figure(data=data, layout=layout) plotly.offline.iplot(fig)
['def', 'plot_world_with_plotly', '(', 'world', ',', 'species_list', '=', 'None', ',', 'max_count', '=', '1000', ')', ':', 'if', 'isinstance', '(', 'world', ',', 'str', ')', ':', 'from', '.', 'simulation', 'import', 'load_world', 'world', '=', 'load_world', '(', 'world', ')', 'if', 'species_list', 'is', 'None', ':', 'species_list', '=', '[', 'sp', '.', 'serial', '(', ')', 'for', 'sp', 'in', 'world', '.', 'list_species', '(', ')', ']', 'species_list', '.', 'sort', '(', ')', 'import', 'random', 'from', 'ecell4_base', '.', 'core', 'import', 'Species', 'positions', '=', '{', '}', 'for', 'serial', 'in', 'species_list', ':', 'x', ',', 'y', ',', 'z', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'particles', '=', 'world', '.', 'list_particles_exact', '(', 'Species', '(', 'serial', ')', ')', 'if', 'max_count', 'is', 'not', 'None', 'and', 'len', '(', 'particles', ')', '>', 'max_count', ':', 'particles', '=', 'random', '.', 'sample', '(', 'particles', ',', 'max_count', ')', 'for', 'pid', ',', 'p', 'in', 'particles', ':', 'pos', '=', 'p', '.', 'position', '(', ')', 'x', '.', 'append', '(', 'pos', '[', '0', ']', ')', 'y', '.', 'append', '(', 'pos', '[', '1', ']', ')', 'z', '.', 'append', '(', 'pos', '[', '2', ']', ')', 'positions', '[', 'serial', ']', '=', '(', 'x', ',', 'y', ',', 'z', ')', 'import', 'plotly', 'import', 'plotly', '.', 'graph_objs', 'as', 'go', 'plotly', '.', 'offline', '.', 'init_notebook_mode', '(', ')', 'marker', '=', 'dict', '(', 'size', '=', '6', ',', 'line', '=', 'dict', '(', 'color', '=', "'rgb(204, 204, 204)'", ',', 'width', '=', '1', ')', ',', 'opacity', '=', '0.9', ',', 'symbol', '=', "'circle'", ')', 'data', '=', '[', ']', 'for', 'serial', ',', '(', 'x', ',', 'y', ',', 'z', ')', 'in', 'positions', '.', 'items', '(', ')', ':', 'trace', '=', 'go', '.', 'Scatter3d', '(', 'x', '=', 'x', ',', 'y', '=', 'y', ',', 'z', '=', 'z', ',', 'mode', '=', "'markers'", ',', 'marker', '=', 'marker', ',', 'name', '=', 'serial', ')', 'data', '.', 'append', '(', 'trace', ')', 'layout', '=', 'go', '.', 'Layout', '(', 'margin', '=', 'dict', '(', 'l', '=', '0', ',', 'r', '=', '0', ',', 'b', '=', '0', ',', 't', '=', '0', ')', ')', 'fig', '=', 'go', '.', 'Figure', '(', 'data', '=', 'data', ',', 'layout', '=', 'layout', ')', 'plotly', '.', 'offline', '.', 'iplot', '(', 'fig', ')']
Plot a World on IPython Notebook
['Plot', 'a', 'World', 'on', 'IPython', 'Notebook']
train
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L2136-L2182
3,964
saltstack/salt
salt/modules/boto_kms.py
get_key_policy
def get_key_policy(key_id, policy_name, region=None, key=None, keyid=None, profile=None): ''' Get the policy for the specified key. CLI example:: salt myminion boto_kms.get_key_policy 'alias/mykey' mypolicy ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} try: key_policy = conn.get_key_policy(key_id, policy_name) r['key_policy'] = salt.serializers.json.deserialize( key_policy['Policy'], object_pairs_hook=odict.OrderedDict ) except boto.exception.BotoServerError as e: r['error'] = __utils__['boto.get_error'](e) return r
python
def get_key_policy(key_id, policy_name, region=None, key=None, keyid=None, profile=None): ''' Get the policy for the specified key. CLI example:: salt myminion boto_kms.get_key_policy 'alias/mykey' mypolicy ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} try: key_policy = conn.get_key_policy(key_id, policy_name) r['key_policy'] = salt.serializers.json.deserialize( key_policy['Policy'], object_pairs_hook=odict.OrderedDict ) except boto.exception.BotoServerError as e: r['error'] = __utils__['boto.get_error'](e) return r
['def', 'get_key_policy', '(', 'key_id', ',', 'policy_name', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'conn', '=', '_get_conn', '(', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'r', '=', '{', '}', 'try', ':', 'key_policy', '=', 'conn', '.', 'get_key_policy', '(', 'key_id', ',', 'policy_name', ')', 'r', '[', "'key_policy'", ']', '=', 'salt', '.', 'serializers', '.', 'json', '.', 'deserialize', '(', 'key_policy', '[', "'Policy'", ']', ',', 'object_pairs_hook', '=', 'odict', '.', 'OrderedDict', ')', 'except', 'boto', '.', 'exception', '.', 'BotoServerError', 'as', 'e', ':', 'r', '[', "'error'", ']', '=', '__utils__', '[', "'boto.get_error'", ']', '(', 'e', ')', 'return', 'r']
Get the policy for the specified key. CLI example:: salt myminion boto_kms.get_key_policy 'alias/mykey' mypolicy
['Get', 'the', 'policy', 'for', 'the', 'specified', 'key', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kms.py#L415-L435
3,965
liip/taxi
taxi/timesheet/parser.py
TimesheetParser.duration_to_text
def duration_to_text(self, duration): """ Return the textual representation of the given `duration`. The duration can either be a tuple of :class:`datetime.time` objects, or a simple number. The returned text will be either a hhmm-hhmm string (if the given `duration` is a tuple) or a number. """ if isinstance(duration, tuple): start = (duration[0].strftime(self.ENTRY_DURATION_FORMAT) if duration[0] is not None else '') end = (duration[1].strftime(self.ENTRY_DURATION_FORMAT) if duration[1] is not None else '?') duration = '%s-%s' % (start, end) else: duration = six.text_type(duration) return duration
python
def duration_to_text(self, duration): """ Return the textual representation of the given `duration`. The duration can either be a tuple of :class:`datetime.time` objects, or a simple number. The returned text will be either a hhmm-hhmm string (if the given `duration` is a tuple) or a number. """ if isinstance(duration, tuple): start = (duration[0].strftime(self.ENTRY_DURATION_FORMAT) if duration[0] is not None else '') end = (duration[1].strftime(self.ENTRY_DURATION_FORMAT) if duration[1] is not None else '?') duration = '%s-%s' % (start, end) else: duration = six.text_type(duration) return duration
['def', 'duration_to_text', '(', 'self', ',', 'duration', ')', ':', 'if', 'isinstance', '(', 'duration', ',', 'tuple', ')', ':', 'start', '=', '(', 'duration', '[', '0', ']', '.', 'strftime', '(', 'self', '.', 'ENTRY_DURATION_FORMAT', ')', 'if', 'duration', '[', '0', ']', 'is', 'not', 'None', 'else', "''", ')', 'end', '=', '(', 'duration', '[', '1', ']', '.', 'strftime', '(', 'self', '.', 'ENTRY_DURATION_FORMAT', ')', 'if', 'duration', '[', '1', ']', 'is', 'not', 'None', 'else', "'?'", ')', 'duration', '=', "'%s-%s'", '%', '(', 'start', ',', 'end', ')', 'else', ':', 'duration', '=', 'six', '.', 'text_type', '(', 'duration', ')', 'return', 'duration']
Return the textual representation of the given `duration`. The duration can either be a tuple of :class:`datetime.time` objects, or a simple number. The returned text will be either a hhmm-hhmm string (if the given `duration` is a tuple) or a number.
['Return', 'the', 'textual', 'representation', 'of', 'the', 'given', 'duration', '.', 'The', 'duration', 'can', 'either', 'be', 'a', 'tuple', 'of', ':', 'class', ':', 'datetime', '.', 'time', 'objects', 'or', 'a', 'simple', 'number', '.', 'The', 'returned', 'text', 'will', 'be', 'either', 'a', 'hhmm', '-', 'hhmm', 'string', '(', 'if', 'the', 'given', 'duration', 'is', 'a', 'tuple', ')', 'or', 'a', 'number', '.']
train
https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/timesheet/parser.py#L101-L120
3,966
sporteasy/python-poeditor
poeditor/client.py
POEditorAPI._run
def _run(self, url_path, headers=None, **kwargs): """ Requests API """ url = self._construct_url(url_path) payload = kwargs payload.update({'api_token': self.api_token}) return self._make_request(url, payload, headers)
python
def _run(self, url_path, headers=None, **kwargs): """ Requests API """ url = self._construct_url(url_path) payload = kwargs payload.update({'api_token': self.api_token}) return self._make_request(url, payload, headers)
['def', '_run', '(', 'self', ',', 'url_path', ',', 'headers', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'url', '=', 'self', '.', '_construct_url', '(', 'url_path', ')', 'payload', '=', 'kwargs', 'payload', '.', 'update', '(', '{', "'api_token'", ':', 'self', '.', 'api_token', '}', ')', 'return', 'self', '.', '_make_request', '(', 'url', ',', 'payload', ',', 'headers', ')']
Requests API
['Requests', 'API']
train
https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L129-L138
3,967
lightning-viz/lightning-python
lightning/main.py
Lightning.disable_ipython
def disable_ipython(self): """ Disable plotting in the iPython notebook. After disabling, lightning plots will be produced in your lightning server, but will not appear in the notebook. """ from IPython.core.getipython import get_ipython self.ipython_enabled = False ip = get_ipython() formatter = ip.display_formatter.formatters['text/html'] formatter.type_printers.pop(Visualization, None) formatter.type_printers.pop(VisualizationLocal, None)
python
def disable_ipython(self): """ Disable plotting in the iPython notebook. After disabling, lightning plots will be produced in your lightning server, but will not appear in the notebook. """ from IPython.core.getipython import get_ipython self.ipython_enabled = False ip = get_ipython() formatter = ip.display_formatter.formatters['text/html'] formatter.type_printers.pop(Visualization, None) formatter.type_printers.pop(VisualizationLocal, None)
['def', 'disable_ipython', '(', 'self', ')', ':', 'from', 'IPython', '.', 'core', '.', 'getipython', 'import', 'get_ipython', 'self', '.', 'ipython_enabled', '=', 'False', 'ip', '=', 'get_ipython', '(', ')', 'formatter', '=', 'ip', '.', 'display_formatter', '.', 'formatters', '[', "'text/html'", ']', 'formatter', '.', 'type_printers', '.', 'pop', '(', 'Visualization', ',', 'None', ')', 'formatter', '.', 'type_printers', '.', 'pop', '(', 'VisualizationLocal', ',', 'None', ')']
Disable plotting in the iPython notebook. After disabling, lightning plots will be produced in your lightning server, but will not appear in the notebook.
['Disable', 'plotting', 'in', 'the', 'iPython', 'notebook', '.']
train
https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/main.py#L85-L98
3,968
PolicyStat/jobtastic
jobtastic/task.py
JobtasticTask._get_cache
def _get_cache(self): """ Return the cache to use for thundering herd protection, etc. """ if not self._cache: self._cache = get_cache(self.app) return self._cache
python
def _get_cache(self): """ Return the cache to use for thundering herd protection, etc. """ if not self._cache: self._cache = get_cache(self.app) return self._cache
['def', '_get_cache', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_cache', ':', 'self', '.', '_cache', '=', 'get_cache', '(', 'self', '.', 'app', ')', 'return', 'self', '.', '_cache']
Return the cache to use for thundering herd protection, etc.
['Return', 'the', 'cache', 'to', 'use', 'for', 'thundering', 'herd', 'protection', 'etc', '.']
train
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L405-L411
3,969
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.probabilities
def probabilities(self, choosers, alternatives): """ Returns alternative probabilties for each chooser segment as a dictionary keyed by segment name. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probabilties : dict of pandas.Series """ logger.debug( 'start: calculate probabilities in LCM group {}'.format(self.name)) probs = {} for name, df in self._iter_groups(choosers): probs[name] = self.models[name].probabilities(df, alternatives) logger.debug( 'finish: calculate probabilities in LCM group {}'.format( self.name)) return probs
python
def probabilities(self, choosers, alternatives): """ Returns alternative probabilties for each chooser segment as a dictionary keyed by segment name. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probabilties : dict of pandas.Series """ logger.debug( 'start: calculate probabilities in LCM group {}'.format(self.name)) probs = {} for name, df in self._iter_groups(choosers): probs[name] = self.models[name].probabilities(df, alternatives) logger.debug( 'finish: calculate probabilities in LCM group {}'.format( self.name)) return probs
['def', 'probabilities', '(', 'self', ',', 'choosers', ',', 'alternatives', ')', ':', 'logger', '.', 'debug', '(', "'start: calculate probabilities in LCM group {}'", '.', 'format', '(', 'self', '.', 'name', ')', ')', 'probs', '=', '{', '}', 'for', 'name', ',', 'df', 'in', 'self', '.', '_iter_groups', '(', 'choosers', ')', ':', 'probs', '[', 'name', ']', '=', 'self', '.', 'models', '[', 'name', ']', '.', 'probabilities', '(', 'df', ',', 'alternatives', ')', 'logger', '.', 'debug', '(', "'finish: calculate probabilities in LCM group {}'", '.', 'format', '(', 'self', '.', 'name', ')', ')', 'return', 'probs']
Returns alternative probabilties for each chooser segment as a dictionary keyed by segment name. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probabilties : dict of pandas.Series
['Returns', 'alternative', 'probabilties', 'for', 'each', 'chooser', 'segment', 'as', 'a', 'dictionary', 'keyed', 'by', 'segment', 'name', '.']
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1089-L1117
3,970
ClericPy/torequests
torequests/crawlers.py
CleanRequest.reset_new_request
def reset_new_request(self): """Remove the non-sense args from the self.ignore, return self.new_request""" raw_url = self.new_request['url'] parsed_url = urlparse(raw_url) qsl = parse_qsl(parsed_url.query) new_url = self._join_url( parsed_url, [i for i in qsl if i not in self.ignore['qsl']]) self.new_request['url'] = new_url self.logger_function('ignore: %s' % self.ignore) for key in self.ignore['headers']: self.new_request['headers'].pop(key) if not self.new_request.get('headers'): self.new_request.pop('headers', None) if self.ignore['Cookie'] and 'Cookie' not in self.ignore['headers']: headers = self.new_request['headers'] headers = {key.title(): headers[key] for key in headers} if 'Cookie' in headers: cookies = SimpleCookie(headers['Cookie']) new_cookie = '; '.join([ i[1].OutputString() for i in cookies.items() if i[0] not in self.ignore['Cookie'] ]) self.new_request['headers']['Cookie'] = new_cookie if self.new_request['method'] == 'post': data = self.new_request.get('data') if data: if isinstance(data, dict): for key in self.ignore['form_data']: data.pop(key) if (not data) or self.ignore['total_data']: # not need data any more self.new_request.pop('data', None) if self.has_json_data and 'data' in self.new_request: json_data = json.loads(data.decode(self.encoding)) for key in self.ignore['json_data']: json_data.pop(key) self.new_request['data'] = json.dumps(json_data).encode( self.encoding) return self.new_request
python
def reset_new_request(self): """Remove the non-sense args from the self.ignore, return self.new_request""" raw_url = self.new_request['url'] parsed_url = urlparse(raw_url) qsl = parse_qsl(parsed_url.query) new_url = self._join_url( parsed_url, [i for i in qsl if i not in self.ignore['qsl']]) self.new_request['url'] = new_url self.logger_function('ignore: %s' % self.ignore) for key in self.ignore['headers']: self.new_request['headers'].pop(key) if not self.new_request.get('headers'): self.new_request.pop('headers', None) if self.ignore['Cookie'] and 'Cookie' not in self.ignore['headers']: headers = self.new_request['headers'] headers = {key.title(): headers[key] for key in headers} if 'Cookie' in headers: cookies = SimpleCookie(headers['Cookie']) new_cookie = '; '.join([ i[1].OutputString() for i in cookies.items() if i[0] not in self.ignore['Cookie'] ]) self.new_request['headers']['Cookie'] = new_cookie if self.new_request['method'] == 'post': data = self.new_request.get('data') if data: if isinstance(data, dict): for key in self.ignore['form_data']: data.pop(key) if (not data) or self.ignore['total_data']: # not need data any more self.new_request.pop('data', None) if self.has_json_data and 'data' in self.new_request: json_data = json.loads(data.decode(self.encoding)) for key in self.ignore['json_data']: json_data.pop(key) self.new_request['data'] = json.dumps(json_data).encode( self.encoding) return self.new_request
['def', 'reset_new_request', '(', 'self', ')', ':', 'raw_url', '=', 'self', '.', 'new_request', '[', "'url'", ']', 'parsed_url', '=', 'urlparse', '(', 'raw_url', ')', 'qsl', '=', 'parse_qsl', '(', 'parsed_url', '.', 'query', ')', 'new_url', '=', 'self', '.', '_join_url', '(', 'parsed_url', ',', '[', 'i', 'for', 'i', 'in', 'qsl', 'if', 'i', 'not', 'in', 'self', '.', 'ignore', '[', "'qsl'", ']', ']', ')', 'self', '.', 'new_request', '[', "'url'", ']', '=', 'new_url', 'self', '.', 'logger_function', '(', "'ignore: %s'", '%', 'self', '.', 'ignore', ')', 'for', 'key', 'in', 'self', '.', 'ignore', '[', "'headers'", ']', ':', 'self', '.', 'new_request', '[', "'headers'", ']', '.', 'pop', '(', 'key', ')', 'if', 'not', 'self', '.', 'new_request', '.', 'get', '(', "'headers'", ')', ':', 'self', '.', 'new_request', '.', 'pop', '(', "'headers'", ',', 'None', ')', 'if', 'self', '.', 'ignore', '[', "'Cookie'", ']', 'and', "'Cookie'", 'not', 'in', 'self', '.', 'ignore', '[', "'headers'", ']', ':', 'headers', '=', 'self', '.', 'new_request', '[', "'headers'", ']', 'headers', '=', '{', 'key', '.', 'title', '(', ')', ':', 'headers', '[', 'key', ']', 'for', 'key', 'in', 'headers', '}', 'if', "'Cookie'", 'in', 'headers', ':', 'cookies', '=', 'SimpleCookie', '(', 'headers', '[', "'Cookie'", ']', ')', 'new_cookie', '=', "'; '", '.', 'join', '(', '[', 'i', '[', '1', ']', '.', 'OutputString', '(', ')', 'for', 'i', 'in', 'cookies', '.', 'items', '(', ')', 'if', 'i', '[', '0', ']', 'not', 'in', 'self', '.', 'ignore', '[', "'Cookie'", ']', ']', ')', 'self', '.', 'new_request', '[', "'headers'", ']', '[', "'Cookie'", ']', '=', 'new_cookie', 'if', 'self', '.', 'new_request', '[', "'method'", ']', '==', "'post'", ':', 'data', '=', 'self', '.', 'new_request', '.', 'get', '(', "'data'", ')', 'if', 'data', ':', 'if', 'isinstance', '(', 'data', ',', 'dict', ')', ':', 'for', 'key', 'in', 'self', '.', 'ignore', '[', "'form_data'", ']', ':', 'data', '.', 'pop', '(', 'key', ')', 'if', '(', 'not', 'data', ')', 'or', 'self', '.', 'ignore', '[', "'total_data'", ']', ':', '# not need data any more', 'self', '.', 'new_request', '.', 'pop', '(', "'data'", ',', 'None', ')', 'if', 'self', '.', 'has_json_data', 'and', "'data'", 'in', 'self', '.', 'new_request', ':', 'json_data', '=', 'json', '.', 'loads', '(', 'data', '.', 'decode', '(', 'self', '.', 'encoding', ')', ')', 'for', 'key', 'in', 'self', '.', 'ignore', '[', "'json_data'", ']', ':', 'json_data', '.', 'pop', '(', 'key', ')', 'self', '.', 'new_request', '[', "'data'", ']', '=', 'json', '.', 'dumps', '(', 'json_data', ')', '.', 'encode', '(', 'self', '.', 'encoding', ')', 'return', 'self', '.', 'new_request']
Remove the non-sense args from the self.ignore, return self.new_request
['Remove', 'the', 'non', '-', 'sense', 'args', 'from', 'the', 'self', '.', 'ignore', 'return', 'self', '.', 'new_request']
train
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L282-L323
3,971
codelv/enaml-native
src/enamlnative/android/android_picker.py
AndroidPicker.create_widget
def create_widget(self): """ Create the underlying widget. """ d = self.declaration self.widget = Picker(self.get_context(), None, d.style or '@attr/numberPickerStyle')
python
def create_widget(self): """ Create the underlying widget. """ d = self.declaration self.widget = Picker(self.get_context(), None, d.style or '@attr/numberPickerStyle')
['def', 'create_widget', '(', 'self', ')', ':', 'd', '=', 'self', '.', 'declaration', 'self', '.', 'widget', '=', 'Picker', '(', 'self', '.', 'get_context', '(', ')', ',', 'None', ',', 'd', '.', 'style', 'or', "'@attr/numberPickerStyle'", ')']
Create the underlying widget.
['Create', 'the', 'underlying', 'widget', '.']
train
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_picker.py#L45-L51
3,972
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewpanel.py
XViewPanel.setLocked
def setLocked(self, state, force=False): """ Sets the locked state for this panel to the inputed state. :param state | <bool> """ if not force and state == self._locked: return self._locked = state tabbar = self.tabBar() tabbar.setLocked(state) if self.hideTabsWhenLocked(): tabbar.setVisible(self.count() > 1 or not state) else: tabbar.setVisible(True) if tabbar.isVisible(): self.setContentsMargins(6, tabbar.height(), 6, 6) else: self.setContentsMargins(1, 1, 1, 1) self.adjustSizeConstraint()
python
def setLocked(self, state, force=False): """ Sets the locked state for this panel to the inputed state. :param state | <bool> """ if not force and state == self._locked: return self._locked = state tabbar = self.tabBar() tabbar.setLocked(state) if self.hideTabsWhenLocked(): tabbar.setVisible(self.count() > 1 or not state) else: tabbar.setVisible(True) if tabbar.isVisible(): self.setContentsMargins(6, tabbar.height(), 6, 6) else: self.setContentsMargins(1, 1, 1, 1) self.adjustSizeConstraint()
['def', 'setLocked', '(', 'self', ',', 'state', ',', 'force', '=', 'False', ')', ':', 'if', 'not', 'force', 'and', 'state', '==', 'self', '.', '_locked', ':', 'return', 'self', '.', '_locked', '=', 'state', 'tabbar', '=', 'self', '.', 'tabBar', '(', ')', 'tabbar', '.', 'setLocked', '(', 'state', ')', 'if', 'self', '.', 'hideTabsWhenLocked', '(', ')', ':', 'tabbar', '.', 'setVisible', '(', 'self', '.', 'count', '(', ')', '>', '1', 'or', 'not', 'state', ')', 'else', ':', 'tabbar', '.', 'setVisible', '(', 'True', ')', 'if', 'tabbar', '.', 'isVisible', '(', ')', ':', 'self', '.', 'setContentsMargins', '(', '6', ',', 'tabbar', '.', 'height', '(', ')', ',', '6', ',', '6', ')', 'else', ':', 'self', '.', 'setContentsMargins', '(', '1', ',', '1', ',', '1', ',', '1', ')', 'self', '.', 'adjustSizeConstraint', '(', ')']
Sets the locked state for this panel to the inputed state. :param state | <bool>
['Sets', 'the', 'locked', 'state', 'for', 'this', 'panel', 'to', 'the', 'inputed', 'state', '.', ':', 'param', 'state', '|', '<bool', '>']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewpanel.py#L1470-L1492
3,973
watson-developer-cloud/python-sdk
ibm_watson/discovery_v1.py
Credentials._to_dict
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'credential_id') and self.credential_id is not None: _dict['credential_id'] = self.credential_id if hasattr(self, 'source_type') and self.source_type is not None: _dict['source_type'] = self.source_type if hasattr( self, 'credential_details') and self.credential_details is not None: _dict['credential_details'] = self.credential_details._to_dict() return _dict
python
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'credential_id') and self.credential_id is not None: _dict['credential_id'] = self.credential_id if hasattr(self, 'source_type') and self.source_type is not None: _dict['source_type'] = self.source_type if hasattr( self, 'credential_details') and self.credential_details is not None: _dict['credential_details'] = self.credential_details._to_dict() return _dict
['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'credential_id'", ')', 'and', 'self', '.', 'credential_id', 'is', 'not', 'None', ':', '_dict', '[', "'credential_id'", ']', '=', 'self', '.', 'credential_id', 'if', 'hasattr', '(', 'self', ',', "'source_type'", ')', 'and', 'self', '.', 'source_type', 'is', 'not', 'None', ':', '_dict', '[', "'source_type'", ']', '=', 'self', '.', 'source_type', 'if', 'hasattr', '(', 'self', ',', "'credential_details'", ')', 'and', 'self', '.', 'credential_details', 'is', 'not', 'None', ':', '_dict', '[', "'credential_details'", ']', '=', 'self', '.', 'credential_details', '.', '_to_dict', '(', ')', 'return', '_dict']
Return a json dictionary representing this model.
['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.']
train
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L4549-L4560
3,974
mattbierner/blotre-py
blotre.py
Blotre._add_auth_headers
def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base
python
def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base
['def', '_add_auth_headers', '(', 'self', ',', 'base', ')', ':', 'if', "'access_token'", 'in', 'self', '.', 'creds', ':', 'return', '_extend', '(', 'base', ',', '{', "'authorization'", ':', "'Bearer '", '+', 'self', '.', 'creds', '[', "'access_token'", ']', '}', ')', 'return', 'base']
Attach the acces_token to a request.
['Attach', 'the', 'acces_token', 'to', 'a', 'request', '.']
train
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L200-L206
3,975
PMBio/limix-backup
limix/deprecated/archive/varianceDecompositionOld.py
VarianceDecomposition.estimateHeritabilities
def estimateHeritabilities(self, K, verbose=False): """ estimate variance components and fixed effects from a single trait model having only two terms """ # Fit single trait model varg = SP.zeros(self.P) varn = SP.zeros(self.P) fixed = SP.zeros((1,self.P)) for p in range(self.P): y = self.Y[:,p:p+1] lmm = limix.CLMM() lmm.setK(K) lmm.setSNPs(SP.ones((K.shape[0],1))) lmm.setPheno(y) lmm.setCovs(SP.zeros((K.shape[0],1))) lmm.setVarcompApprox0(-20, 20, 1000) lmm.process() delta = SP.exp(lmm.getLdelta0()[0,0]) Vtot = SP.exp(lmm.getLSigma()[0,0]) varg[p] = Vtot varn[p] = delta*Vtot fixed[:,p] = lmm.getBetaSNP() if verbose: print(p) sth = {} sth['varg'] = varg sth['varn'] = varn sth['fixed'] = fixed return sth
python
def estimateHeritabilities(self, K, verbose=False): """ estimate variance components and fixed effects from a single trait model having only two terms """ # Fit single trait model varg = SP.zeros(self.P) varn = SP.zeros(self.P) fixed = SP.zeros((1,self.P)) for p in range(self.P): y = self.Y[:,p:p+1] lmm = limix.CLMM() lmm.setK(K) lmm.setSNPs(SP.ones((K.shape[0],1))) lmm.setPheno(y) lmm.setCovs(SP.zeros((K.shape[0],1))) lmm.setVarcompApprox0(-20, 20, 1000) lmm.process() delta = SP.exp(lmm.getLdelta0()[0,0]) Vtot = SP.exp(lmm.getLSigma()[0,0]) varg[p] = Vtot varn[p] = delta*Vtot fixed[:,p] = lmm.getBetaSNP() if verbose: print(p) sth = {} sth['varg'] = varg sth['varn'] = varn sth['fixed'] = fixed return sth
['def', 'estimateHeritabilities', '(', 'self', ',', 'K', ',', 'verbose', '=', 'False', ')', ':', '# Fit single trait model', 'varg', '=', 'SP', '.', 'zeros', '(', 'self', '.', 'P', ')', 'varn', '=', 'SP', '.', 'zeros', '(', 'self', '.', 'P', ')', 'fixed', '=', 'SP', '.', 'zeros', '(', '(', '1', ',', 'self', '.', 'P', ')', ')', 'for', 'p', 'in', 'range', '(', 'self', '.', 'P', ')', ':', 'y', '=', 'self', '.', 'Y', '[', ':', ',', 'p', ':', 'p', '+', '1', ']', 'lmm', '=', 'limix', '.', 'CLMM', '(', ')', 'lmm', '.', 'setK', '(', 'K', ')', 'lmm', '.', 'setSNPs', '(', 'SP', '.', 'ones', '(', '(', 'K', '.', 'shape', '[', '0', ']', ',', '1', ')', ')', ')', 'lmm', '.', 'setPheno', '(', 'y', ')', 'lmm', '.', 'setCovs', '(', 'SP', '.', 'zeros', '(', '(', 'K', '.', 'shape', '[', '0', ']', ',', '1', ')', ')', ')', 'lmm', '.', 'setVarcompApprox0', '(', '-', '20', ',', '20', ',', '1000', ')', 'lmm', '.', 'process', '(', ')', 'delta', '=', 'SP', '.', 'exp', '(', 'lmm', '.', 'getLdelta0', '(', ')', '[', '0', ',', '0', ']', ')', 'Vtot', '=', 'SP', '.', 'exp', '(', 'lmm', '.', 'getLSigma', '(', ')', '[', '0', ',', '0', ']', ')', 'varg', '[', 'p', ']', '=', 'Vtot', 'varn', '[', 'p', ']', '=', 'delta', '*', 'Vtot', 'fixed', '[', ':', ',', 'p', ']', '=', 'lmm', '.', 'getBetaSNP', '(', ')', 'if', 'verbose', ':', 'print', '(', 'p', ')', 'sth', '=', '{', '}', 'sth', '[', "'varg'", ']', '=', 'varg', 'sth', '[', "'varn'", ']', '=', 'varn', 'sth', '[', "'fixed'", ']', '=', 'fixed', 'return', 'sth']
estimate variance components and fixed effects from a single trait model having only two terms
['estimate', 'variance', 'components', 'and', 'fixed', 'effects', 'from', 'a', 'single', 'trait', 'model', 'having', 'only', 'two', 'terms']
train
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/deprecated/archive/varianceDecompositionOld.py#L768-L802
3,976
kbr/fritzconnection
fritzconnection/fritzconnection.py
FritzConnection.get_action_arguments
def get_action_arguments(self, service_name, action_name): """ Returns a list of tuples with all known arguments for the given service- and action-name combination. The tuples contain the argument-name, direction and data_type. """ return self.services[service_name].actions[action_name].info
python
def get_action_arguments(self, service_name, action_name): """ Returns a list of tuples with all known arguments for the given service- and action-name combination. The tuples contain the argument-name, direction and data_type. """ return self.services[service_name].actions[action_name].info
['def', 'get_action_arguments', '(', 'self', ',', 'service_name', ',', 'action_name', ')', ':', 'return', 'self', '.', 'services', '[', 'service_name', ']', '.', 'actions', '[', 'action_name', ']', '.', 'info']
Returns a list of tuples with all known arguments for the given service- and action-name combination. The tuples contain the argument-name, direction and data_type.
['Returns', 'a', 'list', 'of', 'tuples', 'with', 'all', 'known', 'arguments', 'for', 'the', 'given', 'service', '-', 'and', 'action', '-', 'name', 'combination', '.', 'The', 'tuples', 'contain', 'the', 'argument', '-', 'name', 'direction', 'and', 'data_type', '.']
train
https://github.com/kbr/fritzconnection/blob/b183f759ef19dd1652371e912d36cfe34f6639ac/fritzconnection/fritzconnection.py#L351-L357
3,977
jbarlow83/OCRmyPDF
src/ocrmypdf/pdfinfo/__init__.py
_normalize_stack
def _normalize_stack(graphobjs): """Convert runs of qQ's in the stack into single graphobjs""" for operands, operator in graphobjs: operator = str(operator) if re.match(r'Q*q+$', operator): # Zero or more Q, one or more q for char in operator: # Split into individual yield ([], char) # Yield individual else: yield (operands, operator)
python
def _normalize_stack(graphobjs): """Convert runs of qQ's in the stack into single graphobjs""" for operands, operator in graphobjs: operator = str(operator) if re.match(r'Q*q+$', operator): # Zero or more Q, one or more q for char in operator: # Split into individual yield ([], char) # Yield individual else: yield (operands, operator)
['def', '_normalize_stack', '(', 'graphobjs', ')', ':', 'for', 'operands', ',', 'operator', 'in', 'graphobjs', ':', 'operator', '=', 'str', '(', 'operator', ')', 'if', 're', '.', 'match', '(', "r'Q*q+$'", ',', 'operator', ')', ':', '# Zero or more Q, one or more q', 'for', 'char', 'in', 'operator', ':', '# Split into individual', 'yield', '(', '[', ']', ',', 'char', ')', '# Yield individual', 'else', ':', 'yield', '(', 'operands', ',', 'operator', ')']
Convert runs of qQ's in the stack into single graphobjs
['Convert', 'runs', 'of', 'qQ', 's', 'in', 'the', 'stack', 'into', 'single', 'graphobjs']
train
https://github.com/jbarlow83/OCRmyPDF/blob/79c84eefa353632a3d7ccddbd398c6678c1c1777/src/ocrmypdf/pdfinfo/__init__.py#L109-L117
3,978
pkgw/pwkit
pwkit/ellipses.py
sigmascale
def sigmascale (nsigma): """Say we take a Gaussian bivariate and convert the parameters of the distribution to an ellipse (major, minor, PA). By what factor should we scale those axes to make the area of the ellipse correspond to the n-sigma confidence interval? Negative or zero values result in NaN. """ from scipy.special import erfc return np.sqrt (-2 * np.log (erfc (nsigma / np.sqrt (2))))
python
def sigmascale (nsigma): """Say we take a Gaussian bivariate and convert the parameters of the distribution to an ellipse (major, minor, PA). By what factor should we scale those axes to make the area of the ellipse correspond to the n-sigma confidence interval? Negative or zero values result in NaN. """ from scipy.special import erfc return np.sqrt (-2 * np.log (erfc (nsigma / np.sqrt (2))))
['def', 'sigmascale', '(', 'nsigma', ')', ':', 'from', 'scipy', '.', 'special', 'import', 'erfc', 'return', 'np', '.', 'sqrt', '(', '-', '2', '*', 'np', '.', 'log', '(', 'erfc', '(', 'nsigma', '/', 'np', '.', 'sqrt', '(', '2', ')', ')', ')', ')']
Say we take a Gaussian bivariate and convert the parameters of the distribution to an ellipse (major, minor, PA). By what factor should we scale those axes to make the area of the ellipse correspond to the n-sigma confidence interval? Negative or zero values result in NaN.
['Say', 'we', 'take', 'a', 'Gaussian', 'bivariate', 'and', 'convert', 'the', 'parameters', 'of', 'the', 'distribution', 'to', 'an', 'ellipse', '(', 'major', 'minor', 'PA', ')', '.', 'By', 'what', 'factor', 'should', 'we', 'scale', 'those', 'axes', 'to', 'make', 'the', 'area', 'of', 'the', 'ellipse', 'correspond', 'to', 'the', 'n', '-', 'sigma', 'confidence', 'interval?']
train
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/ellipses.py#L44-L54
3,979
pandas-dev/pandas
pandas/core/strings.py
str_replace
def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): r""" Replace occurrences of pattern/regex in the Series/Index with some other string. Equivalent to :meth:`str.replace` or :func:`re.sub`. Parameters ---------- pat : str or compiled regex String can be a character sequence or regular expression. .. versionadded:: 0.20.0 `pat` also accepts a compiled regex. repl : str or callable Replacement string or a callable. The callable is passed the regex match object and must return a replacement string to be used. See :func:`re.sub`. .. versionadded:: 0.20.0 `repl` also accepts a callable. n : int, default -1 (all) Number of replacements to make from start. case : bool, default None - If True, case sensitive (the default if `pat` is a string) - Set to False for case insensitive - Cannot be set if `pat` is a compiled regex flags : int, default 0 (no flags) - re module flags, e.g. re.IGNORECASE - Cannot be set if `pat` is a compiled regex regex : bool, default True - If True, assumes the passed-in pattern is a regular expression. - If False, treats the pattern as a literal string - Cannot be set to False if `pat` is a compiled regex or `repl` is a callable. .. versionadded:: 0.23.0 Returns ------- Series or Index of object A copy of the object with all matching occurrences of `pat` replaced by `repl`. Raises ------ ValueError * if `regex` is False and `repl` is a callable or `pat` is a compiled regex * if `pat` is a compiled regex and `case` or `flags` is set Notes ----- When `pat` is a compiled regex, all flags should be included in the compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled regex will raise an error. Examples -------- When `pat` is a string and `regex` is True (the default), the given `pat` is compiled as a regex. When `repl` is a string, it replaces matching regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are left as is: >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True) 0 bao 1 baz 2 NaN dtype: object When `pat` is a string and `regex` is False, every `pat` is replaced with `repl` as with :meth:`str.replace`: >>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False) 0 bao 1 fuz 2 NaN dtype: object When `repl` is a callable, it is called on every `pat` using :func:`re.sub`. The callable should expect one positional argument (a regex object) and return a string. To get the idea: >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr) 0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo 1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz 2 NaN dtype: object Reverse every lowercase alphabetic word: >>> repl = lambda m: m.group(0)[::-1] >>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl) 0 oof 123 1 rab zab 2 NaN dtype: object Using regex groups (extract second group and swap case): >>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)" >>> repl = lambda m: m.group('two').swapcase() >>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl) 0 tWO 1 bAR dtype: object Using a compiled regex with flags >>> import re >>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE) >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar') 0 foo 1 bar 2 NaN dtype: object """ # Check whether repl is valid (GH 13438, GH 15055) if not (is_string_like(repl) or callable(repl)): raise TypeError("repl must be a string or callable") is_compiled_re = is_re(pat) if regex: if is_compiled_re: if (case is not None) or (flags != 0): raise ValueError("case and flags cannot be set" " when pat is a compiled regex") else: # not a compiled regex # set default case if case is None: case = True # add case flag, if provided if case is False: flags |= re.IGNORECASE if is_compiled_re or len(pat) > 1 or flags or callable(repl): n = n if n >= 0 else 0 compiled = re.compile(pat, flags=flags) f = lambda x: compiled.sub(repl=repl, string=x, count=n) else: f = lambda x: x.replace(pat, repl, n) else: if is_compiled_re: raise ValueError("Cannot use a compiled regex as replacement " "pattern with regex=False") if callable(repl): raise ValueError("Cannot use a callable replacement when " "regex=False") f = lambda x: x.replace(pat, repl, n) return _na_map(f, arr)
python
def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): r""" Replace occurrences of pattern/regex in the Series/Index with some other string. Equivalent to :meth:`str.replace` or :func:`re.sub`. Parameters ---------- pat : str or compiled regex String can be a character sequence or regular expression. .. versionadded:: 0.20.0 `pat` also accepts a compiled regex. repl : str or callable Replacement string or a callable. The callable is passed the regex match object and must return a replacement string to be used. See :func:`re.sub`. .. versionadded:: 0.20.0 `repl` also accepts a callable. n : int, default -1 (all) Number of replacements to make from start. case : bool, default None - If True, case sensitive (the default if `pat` is a string) - Set to False for case insensitive - Cannot be set if `pat` is a compiled regex flags : int, default 0 (no flags) - re module flags, e.g. re.IGNORECASE - Cannot be set if `pat` is a compiled regex regex : bool, default True - If True, assumes the passed-in pattern is a regular expression. - If False, treats the pattern as a literal string - Cannot be set to False if `pat` is a compiled regex or `repl` is a callable. .. versionadded:: 0.23.0 Returns ------- Series or Index of object A copy of the object with all matching occurrences of `pat` replaced by `repl`. Raises ------ ValueError * if `regex` is False and `repl` is a callable or `pat` is a compiled regex * if `pat` is a compiled regex and `case` or `flags` is set Notes ----- When `pat` is a compiled regex, all flags should be included in the compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled regex will raise an error. Examples -------- When `pat` is a string and `regex` is True (the default), the given `pat` is compiled as a regex. When `repl` is a string, it replaces matching regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are left as is: >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True) 0 bao 1 baz 2 NaN dtype: object When `pat` is a string and `regex` is False, every `pat` is replaced with `repl` as with :meth:`str.replace`: >>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False) 0 bao 1 fuz 2 NaN dtype: object When `repl` is a callable, it is called on every `pat` using :func:`re.sub`. The callable should expect one positional argument (a regex object) and return a string. To get the idea: >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr) 0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo 1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz 2 NaN dtype: object Reverse every lowercase alphabetic word: >>> repl = lambda m: m.group(0)[::-1] >>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl) 0 oof 123 1 rab zab 2 NaN dtype: object Using regex groups (extract second group and swap case): >>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)" >>> repl = lambda m: m.group('two').swapcase() >>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl) 0 tWO 1 bAR dtype: object Using a compiled regex with flags >>> import re >>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE) >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar') 0 foo 1 bar 2 NaN dtype: object """ # Check whether repl is valid (GH 13438, GH 15055) if not (is_string_like(repl) or callable(repl)): raise TypeError("repl must be a string or callable") is_compiled_re = is_re(pat) if regex: if is_compiled_re: if (case is not None) or (flags != 0): raise ValueError("case and flags cannot be set" " when pat is a compiled regex") else: # not a compiled regex # set default case if case is None: case = True # add case flag, if provided if case is False: flags |= re.IGNORECASE if is_compiled_re or len(pat) > 1 or flags or callable(repl): n = n if n >= 0 else 0 compiled = re.compile(pat, flags=flags) f = lambda x: compiled.sub(repl=repl, string=x, count=n) else: f = lambda x: x.replace(pat, repl, n) else: if is_compiled_re: raise ValueError("Cannot use a compiled regex as replacement " "pattern with regex=False") if callable(repl): raise ValueError("Cannot use a callable replacement when " "regex=False") f = lambda x: x.replace(pat, repl, n) return _na_map(f, arr)
['def', 'str_replace', '(', 'arr', ',', 'pat', ',', 'repl', ',', 'n', '=', '-', '1', ',', 'case', '=', 'None', ',', 'flags', '=', '0', ',', 'regex', '=', 'True', ')', ':', '# Check whether repl is valid (GH 13438, GH 15055)', 'if', 'not', '(', 'is_string_like', '(', 'repl', ')', 'or', 'callable', '(', 'repl', ')', ')', ':', 'raise', 'TypeError', '(', '"repl must be a string or callable"', ')', 'is_compiled_re', '=', 'is_re', '(', 'pat', ')', 'if', 'regex', ':', 'if', 'is_compiled_re', ':', 'if', '(', 'case', 'is', 'not', 'None', ')', 'or', '(', 'flags', '!=', '0', ')', ':', 'raise', 'ValueError', '(', '"case and flags cannot be set"', '" when pat is a compiled regex"', ')', 'else', ':', '# not a compiled regex', '# set default case', 'if', 'case', 'is', 'None', ':', 'case', '=', 'True', '# add case flag, if provided', 'if', 'case', 'is', 'False', ':', 'flags', '|=', 're', '.', 'IGNORECASE', 'if', 'is_compiled_re', 'or', 'len', '(', 'pat', ')', '>', '1', 'or', 'flags', 'or', 'callable', '(', 'repl', ')', ':', 'n', '=', 'n', 'if', 'n', '>=', '0', 'else', '0', 'compiled', '=', 're', '.', 'compile', '(', 'pat', ',', 'flags', '=', 'flags', ')', 'f', '=', 'lambda', 'x', ':', 'compiled', '.', 'sub', '(', 'repl', '=', 'repl', ',', 'string', '=', 'x', ',', 'count', '=', 'n', ')', 'else', ':', 'f', '=', 'lambda', 'x', ':', 'x', '.', 'replace', '(', 'pat', ',', 'repl', ',', 'n', ')', 'else', ':', 'if', 'is_compiled_re', ':', 'raise', 'ValueError', '(', '"Cannot use a compiled regex as replacement "', '"pattern with regex=False"', ')', 'if', 'callable', '(', 'repl', ')', ':', 'raise', 'ValueError', '(', '"Cannot use a callable replacement when "', '"regex=False"', ')', 'f', '=', 'lambda', 'x', ':', 'x', '.', 'replace', '(', 'pat', ',', 'repl', ',', 'n', ')', 'return', '_na_map', '(', 'f', ',', 'arr', ')']
r""" Replace occurrences of pattern/regex in the Series/Index with some other string. Equivalent to :meth:`str.replace` or :func:`re.sub`. Parameters ---------- pat : str or compiled regex String can be a character sequence or regular expression. .. versionadded:: 0.20.0 `pat` also accepts a compiled regex. repl : str or callable Replacement string or a callable. The callable is passed the regex match object and must return a replacement string to be used. See :func:`re.sub`. .. versionadded:: 0.20.0 `repl` also accepts a callable. n : int, default -1 (all) Number of replacements to make from start. case : bool, default None - If True, case sensitive (the default if `pat` is a string) - Set to False for case insensitive - Cannot be set if `pat` is a compiled regex flags : int, default 0 (no flags) - re module flags, e.g. re.IGNORECASE - Cannot be set if `pat` is a compiled regex regex : bool, default True - If True, assumes the passed-in pattern is a regular expression. - If False, treats the pattern as a literal string - Cannot be set to False if `pat` is a compiled regex or `repl` is a callable. .. versionadded:: 0.23.0 Returns ------- Series or Index of object A copy of the object with all matching occurrences of `pat` replaced by `repl`. Raises ------ ValueError * if `regex` is False and `repl` is a callable or `pat` is a compiled regex * if `pat` is a compiled regex and `case` or `flags` is set Notes ----- When `pat` is a compiled regex, all flags should be included in the compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled regex will raise an error. Examples -------- When `pat` is a string and `regex` is True (the default), the given `pat` is compiled as a regex. When `repl` is a string, it replaces matching regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are left as is: >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True) 0 bao 1 baz 2 NaN dtype: object When `pat` is a string and `regex` is False, every `pat` is replaced with `repl` as with :meth:`str.replace`: >>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False) 0 bao 1 fuz 2 NaN dtype: object When `repl` is a callable, it is called on every `pat` using :func:`re.sub`. The callable should expect one positional argument (a regex object) and return a string. To get the idea: >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr) 0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo 1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz 2 NaN dtype: object Reverse every lowercase alphabetic word: >>> repl = lambda m: m.group(0)[::-1] >>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl) 0 oof 123 1 rab zab 2 NaN dtype: object Using regex groups (extract second group and swap case): >>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)" >>> repl = lambda m: m.group('two').swapcase() >>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl) 0 tWO 1 bAR dtype: object Using a compiled regex with flags >>> import re >>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE) >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar') 0 foo 1 bar 2 NaN dtype: object
['r', 'Replace', 'occurrences', 'of', 'pattern', '/', 'regex', 'in', 'the', 'Series', '/', 'Index', 'with', 'some', 'other', 'string', '.', 'Equivalent', 'to', ':', 'meth', ':', 'str', '.', 'replace', 'or', ':', 'func', ':', 're', '.', 'sub', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L423-L578
3,980
log2timeline/plaso
plaso/engine/profilers.py
SampleFileProfiler.Start
def Start(self): """Starts the profiler.""" filename = '{0:s}-{1:s}.csv.gz'.format( self._FILENAME_PREFIX, self._identifier) if self._path: filename = os.path.join(self._path, filename) self._sample_file = gzip.open(filename, 'wb') self._WritesString(self._FILE_HEADER) self._start_time = time.time()
python
def Start(self): """Starts the profiler.""" filename = '{0:s}-{1:s}.csv.gz'.format( self._FILENAME_PREFIX, self._identifier) if self._path: filename = os.path.join(self._path, filename) self._sample_file = gzip.open(filename, 'wb') self._WritesString(self._FILE_HEADER) self._start_time = time.time()
['def', 'Start', '(', 'self', ')', ':', 'filename', '=', "'{0:s}-{1:s}.csv.gz'", '.', 'format', '(', 'self', '.', '_FILENAME_PREFIX', ',', 'self', '.', '_identifier', ')', 'if', 'self', '.', '_path', ':', 'filename', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', '_path', ',', 'filename', ')', 'self', '.', '_sample_file', '=', 'gzip', '.', 'open', '(', 'filename', ',', "'wb'", ')', 'self', '.', '_WritesString', '(', 'self', '.', '_FILE_HEADER', ')', 'self', '.', '_start_time', '=', 'time', '.', 'time', '(', ')']
Starts the profiler.
['Starts', 'the', 'profiler', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/profilers.py#L89-L99
3,981
pyhys/minimalmodbus
dummy_serial.py
Serial.write
def write(self, inputdata): """Write to a port on dummy_serial. Args: inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response for subsequent read operations. Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**. """ if VERBOSE: _print_out('\nDummy_serial: Writing to port. Given:' + repr(inputdata) + '\n') if sys.version_info[0] > 2: if not type(inputdata) == bytes: raise TypeError('The input must be type bytes. Given:' + repr(inputdata)) inputstring = str(inputdata, encoding='latin1') else: inputstring = inputdata if not self._isOpen: raise IOError('Dummy_serial: Trying to write, but the port is not open. Given:' + repr(inputdata)) # Look up which data that should be waiting for subsequent read commands try: response = RESPONSES[inputstring] except: response = DEFAULT_RESPONSE self._waiting_data = response
python
def write(self, inputdata): """Write to a port on dummy_serial. Args: inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response for subsequent read operations. Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**. """ if VERBOSE: _print_out('\nDummy_serial: Writing to port. Given:' + repr(inputdata) + '\n') if sys.version_info[0] > 2: if not type(inputdata) == bytes: raise TypeError('The input must be type bytes. Given:' + repr(inputdata)) inputstring = str(inputdata, encoding='latin1') else: inputstring = inputdata if not self._isOpen: raise IOError('Dummy_serial: Trying to write, but the port is not open. Given:' + repr(inputdata)) # Look up which data that should be waiting for subsequent read commands try: response = RESPONSES[inputstring] except: response = DEFAULT_RESPONSE self._waiting_data = response
['def', 'write', '(', 'self', ',', 'inputdata', ')', ':', 'if', 'VERBOSE', ':', '_print_out', '(', "'\\nDummy_serial: Writing to port. Given:'", '+', 'repr', '(', 'inputdata', ')', '+', "'\\n'", ')', 'if', 'sys', '.', 'version_info', '[', '0', ']', '>', '2', ':', 'if', 'not', 'type', '(', 'inputdata', ')', '==', 'bytes', ':', 'raise', 'TypeError', '(', "'The input must be type bytes. Given:'", '+', 'repr', '(', 'inputdata', ')', ')', 'inputstring', '=', 'str', '(', 'inputdata', ',', 'encoding', '=', "'latin1'", ')', 'else', ':', 'inputstring', '=', 'inputdata', 'if', 'not', 'self', '.', '_isOpen', ':', 'raise', 'IOError', '(', "'Dummy_serial: Trying to write, but the port is not open. Given:'", '+', 'repr', '(', 'inputdata', ')', ')', '# Look up which data that should be waiting for subsequent read commands', 'try', ':', 'response', '=', 'RESPONSES', '[', 'inputstring', ']', 'except', ':', 'response', '=', 'DEFAULT_RESPONSE', 'self', '.', '_waiting_data', '=', 'response']
Write to a port on dummy_serial. Args: inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response for subsequent read operations. Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.
['Write', 'to', 'a', 'port', 'on', 'dummy_serial', '.']
train
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/dummy_serial.py#L141-L169
3,982
wonambi-python/wonambi
wonambi/trans/analyze.py
export_event_params
def export_event_params(filename, params, count=None, density=None): """Write event analysis data to CSV.""" heading_row_1 = ['Segment index', 'Start time', 'End time', 'Stitches', 'Stage', 'Cycle', 'Event type', 'Channel'] spacer = [''] * (len(heading_row_1) - 1) param_headings_1 = ['Min. amplitude (uV)', 'Max. amplitude (uV)', 'Peak-to-peak amplitude (uV)', 'RMS (uV)'] param_headings_2 = ['Power (uV^2)', 'Peak power frequency (Hz)', 'Energy (uV^2s)', 'Peak energy frequency (Hz)'] slope_headings = ['Q1 average slope (uV/s)', 'Q2 average slope (uV/s)', 'Q3 average slope (uV/s)', 'Q4 average slope (uV/s)', 'Q23 average slope (uV/s)', 'Q1 max. slope (uV/s^2)', 'Q2 max. slope (uV/s^2)', 'Q3 max. slope (uV/s^2)', 'Q4 max. slope (uV/s^2)', 'Q23 max. slope (uV/s^2)'] ordered_params_1 = ['minamp', 'maxamp', 'ptp', 'rms'] ordered_params_2 = ['power', 'peakpf', 'energy', 'peakef'] idx_params_1 = in1d(ordered_params_1, list(params[0].keys())) sel_params_1 = list(compress(ordered_params_1, idx_params_1)) heading_row_2 = list(compress(param_headings_1, idx_params_1)) if 'dur' in params[0].keys(): heading_row_2 = ['Duration (s)'] + heading_row_2 idx_params_2 = in1d(ordered_params_2, list(params[0].keys())) sel_params_2 = list(compress(ordered_params_2, idx_params_2)) heading_row_3 = list(compress(param_headings_2, idx_params_2)) heading_row_4 = [] if 'slope' in params[0].keys(): if next(iter(params[0]['slope']))[0]: heading_row_4.extend(slope_headings[:5]) if next(iter(params[0]['slope']))[1]: heading_row_4.extend(slope_headings[5:]) # Get data as matrix and compute descriptives dat = [] if 'dur' in params[0].keys(): one_mat = asarray([seg['dur'] for seg in params \ for chan in seg['data'].axis['chan'][0]]) one_mat = reshape(one_mat, (len(one_mat), 1)) dat.append(one_mat) if sel_params_1: one_mat = asarray([[seg[x](chan=chan)[0] for x in sel_params_1] \ for seg in params for chan in seg['data'].axis['chan'][0]]) dat.append(one_mat) if sel_params_2: one_mat = asarray([[seg[x][chan] for x in sel_params_2] \ for seg in params for chan in seg['data'].axis['chan'][0]]) dat.append(one_mat) if 'slope' in params[0].keys(): one_mat = asarray([[x for y in seg['slope'][chan] for x in y] \ for seg in params for chan in seg['data'].axis['chan'][0]]) dat.append(one_mat) if dat: dat = concatenate(dat, axis=1) desc = get_descriptives(dat) with open(filename, 'w', newline='') as f: lg.info('Writing to ' + str(filename)) csv_file = writer(f) csv_file.writerow(['Wonambi v{}'.format(__version__)]) if count: csv_file.writerow(['Count', count]) if density: csv_file.writerow(['Density', density]) if dat == []: return csv_file.writerow(heading_row_1 + heading_row_2 + heading_row_3 \ + heading_row_4) csv_file.writerow(['Mean'] + spacer + list(desc['mean'])) csv_file.writerow(['SD'] + spacer + list(desc['sd'])) csv_file.writerow(['Mean of ln'] + spacer + list(desc['mean_log'])) csv_file.writerow(['SD of ln'] + spacer + list(desc['sd_log'])) idx = 0 for seg in params: if seg['cycle'] is not None: seg['cycle'] = seg['cycle'][2] for chan in seg['data'].axis['chan'][0]: idx += 1 data_row_1 = [seg[x](chan=chan)[0] for x in sel_params_1] data_row_2 = [seg[x][chan] for x in sel_params_2] if 'dur' in seg.keys(): data_row_1 = [seg['dur']] + data_row_1 if 'slope' in seg.keys(): data_row_3 = [x for y in seg['slope'][chan] for x in y] data_row_2 = data_row_2 + data_row_3 csv_file.writerow([idx, seg['start'], seg['end'], seg['n_stitch'], seg['stage'], seg['cycle'], seg['name'], chan, ] + data_row_1 + data_row_2)
python
def export_event_params(filename, params, count=None, density=None): """Write event analysis data to CSV.""" heading_row_1 = ['Segment index', 'Start time', 'End time', 'Stitches', 'Stage', 'Cycle', 'Event type', 'Channel'] spacer = [''] * (len(heading_row_1) - 1) param_headings_1 = ['Min. amplitude (uV)', 'Max. amplitude (uV)', 'Peak-to-peak amplitude (uV)', 'RMS (uV)'] param_headings_2 = ['Power (uV^2)', 'Peak power frequency (Hz)', 'Energy (uV^2s)', 'Peak energy frequency (Hz)'] slope_headings = ['Q1 average slope (uV/s)', 'Q2 average slope (uV/s)', 'Q3 average slope (uV/s)', 'Q4 average slope (uV/s)', 'Q23 average slope (uV/s)', 'Q1 max. slope (uV/s^2)', 'Q2 max. slope (uV/s^2)', 'Q3 max. slope (uV/s^2)', 'Q4 max. slope (uV/s^2)', 'Q23 max. slope (uV/s^2)'] ordered_params_1 = ['minamp', 'maxamp', 'ptp', 'rms'] ordered_params_2 = ['power', 'peakpf', 'energy', 'peakef'] idx_params_1 = in1d(ordered_params_1, list(params[0].keys())) sel_params_1 = list(compress(ordered_params_1, idx_params_1)) heading_row_2 = list(compress(param_headings_1, idx_params_1)) if 'dur' in params[0].keys(): heading_row_2 = ['Duration (s)'] + heading_row_2 idx_params_2 = in1d(ordered_params_2, list(params[0].keys())) sel_params_2 = list(compress(ordered_params_2, idx_params_2)) heading_row_3 = list(compress(param_headings_2, idx_params_2)) heading_row_4 = [] if 'slope' in params[0].keys(): if next(iter(params[0]['slope']))[0]: heading_row_4.extend(slope_headings[:5]) if next(iter(params[0]['slope']))[1]: heading_row_4.extend(slope_headings[5:]) # Get data as matrix and compute descriptives dat = [] if 'dur' in params[0].keys(): one_mat = asarray([seg['dur'] for seg in params \ for chan in seg['data'].axis['chan'][0]]) one_mat = reshape(one_mat, (len(one_mat), 1)) dat.append(one_mat) if sel_params_1: one_mat = asarray([[seg[x](chan=chan)[0] for x in sel_params_1] \ for seg in params for chan in seg['data'].axis['chan'][0]]) dat.append(one_mat) if sel_params_2: one_mat = asarray([[seg[x][chan] for x in sel_params_2] \ for seg in params for chan in seg['data'].axis['chan'][0]]) dat.append(one_mat) if 'slope' in params[0].keys(): one_mat = asarray([[x for y in seg['slope'][chan] for x in y] \ for seg in params for chan in seg['data'].axis['chan'][0]]) dat.append(one_mat) if dat: dat = concatenate(dat, axis=1) desc = get_descriptives(dat) with open(filename, 'w', newline='') as f: lg.info('Writing to ' + str(filename)) csv_file = writer(f) csv_file.writerow(['Wonambi v{}'.format(__version__)]) if count: csv_file.writerow(['Count', count]) if density: csv_file.writerow(['Density', density]) if dat == []: return csv_file.writerow(heading_row_1 + heading_row_2 + heading_row_3 \ + heading_row_4) csv_file.writerow(['Mean'] + spacer + list(desc['mean'])) csv_file.writerow(['SD'] + spacer + list(desc['sd'])) csv_file.writerow(['Mean of ln'] + spacer + list(desc['mean_log'])) csv_file.writerow(['SD of ln'] + spacer + list(desc['sd_log'])) idx = 0 for seg in params: if seg['cycle'] is not None: seg['cycle'] = seg['cycle'][2] for chan in seg['data'].axis['chan'][0]: idx += 1 data_row_1 = [seg[x](chan=chan)[0] for x in sel_params_1] data_row_2 = [seg[x][chan] for x in sel_params_2] if 'dur' in seg.keys(): data_row_1 = [seg['dur']] + data_row_1 if 'slope' in seg.keys(): data_row_3 = [x for y in seg['slope'][chan] for x in y] data_row_2 = data_row_2 + data_row_3 csv_file.writerow([idx, seg['start'], seg['end'], seg['n_stitch'], seg['stage'], seg['cycle'], seg['name'], chan, ] + data_row_1 + data_row_2)
['def', 'export_event_params', '(', 'filename', ',', 'params', ',', 'count', '=', 'None', ',', 'density', '=', 'None', ')', ':', 'heading_row_1', '=', '[', "'Segment index'", ',', "'Start time'", ',', "'End time'", ',', "'Stitches'", ',', "'Stage'", ',', "'Cycle'", ',', "'Event type'", ',', "'Channel'", ']', 'spacer', '=', '[', "''", ']', '*', '(', 'len', '(', 'heading_row_1', ')', '-', '1', ')', 'param_headings_1', '=', '[', "'Min. amplitude (uV)'", ',', "'Max. amplitude (uV)'", ',', "'Peak-to-peak amplitude (uV)'", ',', "'RMS (uV)'", ']', 'param_headings_2', '=', '[', "'Power (uV^2)'", ',', "'Peak power frequency (Hz)'", ',', "'Energy (uV^2s)'", ',', "'Peak energy frequency (Hz)'", ']', 'slope_headings', '=', '[', "'Q1 average slope (uV/s)'", ',', "'Q2 average slope (uV/s)'", ',', "'Q3 average slope (uV/s)'", ',', "'Q4 average slope (uV/s)'", ',', "'Q23 average slope (uV/s)'", ',', "'Q1 max. slope (uV/s^2)'", ',', "'Q2 max. slope (uV/s^2)'", ',', "'Q3 max. slope (uV/s^2)'", ',', "'Q4 max. slope (uV/s^2)'", ',', "'Q23 max. slope (uV/s^2)'", ']', 'ordered_params_1', '=', '[', "'minamp'", ',', "'maxamp'", ',', "'ptp'", ',', "'rms'", ']', 'ordered_params_2', '=', '[', "'power'", ',', "'peakpf'", ',', "'energy'", ',', "'peakef'", ']', 'idx_params_1', '=', 'in1d', '(', 'ordered_params_1', ',', 'list', '(', 'params', '[', '0', ']', '.', 'keys', '(', ')', ')', ')', 'sel_params_1', '=', 'list', '(', 'compress', '(', 'ordered_params_1', ',', 'idx_params_1', ')', ')', 'heading_row_2', '=', 'list', '(', 'compress', '(', 'param_headings_1', ',', 'idx_params_1', ')', ')', 'if', "'dur'", 'in', 'params', '[', '0', ']', '.', 'keys', '(', ')', ':', 'heading_row_2', '=', '[', "'Duration (s)'", ']', '+', 'heading_row_2', 'idx_params_2', '=', 'in1d', '(', 'ordered_params_2', ',', 'list', '(', 'params', '[', '0', ']', '.', 'keys', '(', ')', ')', ')', 'sel_params_2', '=', 'list', '(', 'compress', '(', 'ordered_params_2', ',', 'idx_params_2', ')', ')', 'heading_row_3', '=', 'list', '(', 'compress', '(', 'param_headings_2', ',', 'idx_params_2', ')', ')', 'heading_row_4', '=', '[', ']', 'if', "'slope'", 'in', 'params', '[', '0', ']', '.', 'keys', '(', ')', ':', 'if', 'next', '(', 'iter', '(', 'params', '[', '0', ']', '[', "'slope'", ']', ')', ')', '[', '0', ']', ':', 'heading_row_4', '.', 'extend', '(', 'slope_headings', '[', ':', '5', ']', ')', 'if', 'next', '(', 'iter', '(', 'params', '[', '0', ']', '[', "'slope'", ']', ')', ')', '[', '1', ']', ':', 'heading_row_4', '.', 'extend', '(', 'slope_headings', '[', '5', ':', ']', ')', '# Get data as matrix and compute descriptives', 'dat', '=', '[', ']', 'if', "'dur'", 'in', 'params', '[', '0', ']', '.', 'keys', '(', ')', ':', 'one_mat', '=', 'asarray', '(', '[', 'seg', '[', "'dur'", ']', 'for', 'seg', 'in', 'params', 'for', 'chan', 'in', 'seg', '[', "'data'", ']', '.', 'axis', '[', "'chan'", ']', '[', '0', ']', ']', ')', 'one_mat', '=', 'reshape', '(', 'one_mat', ',', '(', 'len', '(', 'one_mat', ')', ',', '1', ')', ')', 'dat', '.', 'append', '(', 'one_mat', ')', 'if', 'sel_params_1', ':', 'one_mat', '=', 'asarray', '(', '[', '[', 'seg', '[', 'x', ']', '(', 'chan', '=', 'chan', ')', '[', '0', ']', 'for', 'x', 'in', 'sel_params_1', ']', 'for', 'seg', 'in', 'params', 'for', 'chan', 'in', 'seg', '[', "'data'", ']', '.', 'axis', '[', "'chan'", ']', '[', '0', ']', ']', ')', 'dat', '.', 'append', '(', 'one_mat', ')', 'if', 'sel_params_2', ':', 'one_mat', '=', 'asarray', '(', '[', '[', 'seg', '[', 'x', ']', '[', 'chan', ']', 'for', 'x', 'in', 'sel_params_2', ']', 'for', 'seg', 'in', 'params', 'for', 'chan', 'in', 'seg', '[', "'data'", ']', '.', 'axis', '[', "'chan'", ']', '[', '0', ']', ']', ')', 'dat', '.', 'append', '(', 'one_mat', ')', 'if', "'slope'", 'in', 'params', '[', '0', ']', '.', 'keys', '(', ')', ':', 'one_mat', '=', 'asarray', '(', '[', '[', 'x', 'for', 'y', 'in', 'seg', '[', "'slope'", ']', '[', 'chan', ']', 'for', 'x', 'in', 'y', ']', 'for', 'seg', 'in', 'params', 'for', 'chan', 'in', 'seg', '[', "'data'", ']', '.', 'axis', '[', "'chan'", ']', '[', '0', ']', ']', ')', 'dat', '.', 'append', '(', 'one_mat', ')', 'if', 'dat', ':', 'dat', '=', 'concatenate', '(', 'dat', ',', 'axis', '=', '1', ')', 'desc', '=', 'get_descriptives', '(', 'dat', ')', 'with', 'open', '(', 'filename', ',', "'w'", ',', 'newline', '=', "''", ')', 'as', 'f', ':', 'lg', '.', 'info', '(', "'Writing to '", '+', 'str', '(', 'filename', ')', ')', 'csv_file', '=', 'writer', '(', 'f', ')', 'csv_file', '.', 'writerow', '(', '[', "'Wonambi v{}'", '.', 'format', '(', '__version__', ')', ']', ')', 'if', 'count', ':', 'csv_file', '.', 'writerow', '(', '[', "'Count'", ',', 'count', ']', ')', 'if', 'density', ':', 'csv_file', '.', 'writerow', '(', '[', "'Density'", ',', 'density', ']', ')', 'if', 'dat', '==', '[', ']', ':', 'return', 'csv_file', '.', 'writerow', '(', 'heading_row_1', '+', 'heading_row_2', '+', 'heading_row_3', '+', 'heading_row_4', ')', 'csv_file', '.', 'writerow', '(', '[', "'Mean'", ']', '+', 'spacer', '+', 'list', '(', 'desc', '[', "'mean'", ']', ')', ')', 'csv_file', '.', 'writerow', '(', '[', "'SD'", ']', '+', 'spacer', '+', 'list', '(', 'desc', '[', "'sd'", ']', ')', ')', 'csv_file', '.', 'writerow', '(', '[', "'Mean of ln'", ']', '+', 'spacer', '+', 'list', '(', 'desc', '[', "'mean_log'", ']', ')', ')', 'csv_file', '.', 'writerow', '(', '[', "'SD of ln'", ']', '+', 'spacer', '+', 'list', '(', 'desc', '[', "'sd_log'", ']', ')', ')', 'idx', '=', '0', 'for', 'seg', 'in', 'params', ':', 'if', 'seg', '[', "'cycle'", ']', 'is', 'not', 'None', ':', 'seg', '[', "'cycle'", ']', '=', 'seg', '[', "'cycle'", ']', '[', '2', ']', 'for', 'chan', 'in', 'seg', '[', "'data'", ']', '.', 'axis', '[', "'chan'", ']', '[', '0', ']', ':', 'idx', '+=', '1', 'data_row_1', '=', '[', 'seg', '[', 'x', ']', '(', 'chan', '=', 'chan', ')', '[', '0', ']', 'for', 'x', 'in', 'sel_params_1', ']', 'data_row_2', '=', '[', 'seg', '[', 'x', ']', '[', 'chan', ']', 'for', 'x', 'in', 'sel_params_2', ']', 'if', "'dur'", 'in', 'seg', '.', 'keys', '(', ')', ':', 'data_row_1', '=', '[', 'seg', '[', "'dur'", ']', ']', '+', 'data_row_1', 'if', "'slope'", 'in', 'seg', '.', 'keys', '(', ')', ':', 'data_row_3', '=', '[', 'x', 'for', 'y', 'in', 'seg', '[', "'slope'", ']', '[', 'chan', ']', 'for', 'x', 'in', 'y', ']', 'data_row_2', '=', 'data_row_2', '+', 'data_row_3', 'csv_file', '.', 'writerow', '(', '[', 'idx', ',', 'seg', '[', "'start'", ']', ',', 'seg', '[', "'end'", ']', ',', 'seg', '[', "'n_stitch'", ']', ',', 'seg', '[', "'stage'", ']', ',', 'seg', '[', "'cycle'", ']', ',', 'seg', '[', "'name'", ']', ',', 'chan', ',', ']', '+', 'data_row_1', '+', 'data_row_2', ')']
Write event analysis data to CSV.
['Write', 'event', 'analysis', 'data', 'to', 'CSV', '.']
train
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/trans/analyze.py#L169-L292
3,983
happyleavesaoc/python-voobly
voobly/__init__.py
_make_request
def _make_request(session, url, argument=None, params=None, raw=False): """Make a request to API endpoint.""" if not params: params = {} params['key'] = session.auth.key try: if argument: request_url = '{}{}{}{}'.format(session.auth.base_url, VOOBLY_API_URL, url, argument) else: request_url = '{}{}'.format(VOOBLY_API_URL, url) resp = session.get(request_url, params=params) except RequestException: raise VooblyError('failed to connect') if resp.text == 'bad-key': raise VooblyError('bad api key') elif resp.text == 'too-busy': raise VooblyError('service too busy') elif not resp.text: raise VooblyError('no data returned') if raw: return resp.text try: return tablib.Dataset().load(resp.text).dict except UnsupportedFormat: raise VooblyError('unexpected error {}'.format(resp.text))
python
def _make_request(session, url, argument=None, params=None, raw=False): """Make a request to API endpoint.""" if not params: params = {} params['key'] = session.auth.key try: if argument: request_url = '{}{}{}{}'.format(session.auth.base_url, VOOBLY_API_URL, url, argument) else: request_url = '{}{}'.format(VOOBLY_API_URL, url) resp = session.get(request_url, params=params) except RequestException: raise VooblyError('failed to connect') if resp.text == 'bad-key': raise VooblyError('bad api key') elif resp.text == 'too-busy': raise VooblyError('service too busy') elif not resp.text: raise VooblyError('no data returned') if raw: return resp.text try: return tablib.Dataset().load(resp.text).dict except UnsupportedFormat: raise VooblyError('unexpected error {}'.format(resp.text))
['def', '_make_request', '(', 'session', ',', 'url', ',', 'argument', '=', 'None', ',', 'params', '=', 'None', ',', 'raw', '=', 'False', ')', ':', 'if', 'not', 'params', ':', 'params', '=', '{', '}', 'params', '[', "'key'", ']', '=', 'session', '.', 'auth', '.', 'key', 'try', ':', 'if', 'argument', ':', 'request_url', '=', "'{}{}{}{}'", '.', 'format', '(', 'session', '.', 'auth', '.', 'base_url', ',', 'VOOBLY_API_URL', ',', 'url', ',', 'argument', ')', 'else', ':', 'request_url', '=', "'{}{}'", '.', 'format', '(', 'VOOBLY_API_URL', ',', 'url', ')', 'resp', '=', 'session', '.', 'get', '(', 'request_url', ',', 'params', '=', 'params', ')', 'except', 'RequestException', ':', 'raise', 'VooblyError', '(', "'failed to connect'", ')', 'if', 'resp', '.', 'text', '==', "'bad-key'", ':', 'raise', 'VooblyError', '(', "'bad api key'", ')', 'elif', 'resp', '.', 'text', '==', "'too-busy'", ':', 'raise', 'VooblyError', '(', "'service too busy'", ')', 'elif', 'not', 'resp', '.', 'text', ':', 'raise', 'VooblyError', '(', "'no data returned'", ')', 'if', 'raw', ':', 'return', 'resp', '.', 'text', 'try', ':', 'return', 'tablib', '.', 'Dataset', '(', ')', '.', 'load', '(', 'resp', '.', 'text', ')', '.', 'dict', 'except', 'UnsupportedFormat', ':', 'raise', 'VooblyError', '(', "'unexpected error {}'", '.', 'format', '(', 'resp', '.', 'text', ')', ')']
Make a request to API endpoint.
['Make', 'a', 'request', 'to', 'API', 'endpoint', '.']
train
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L112-L136
3,984
readbeyond/aeneas
aeneas/plotter.py
PlotTimeScale.draw_png
def draw_png(self, image, h_zoom, v_zoom, current_y): """ Draw this time scale to PNG. :param image: the image to draw onto :param int h_zoom: the horizontal zoom :param int v_zoom: the vertical zoom :param int current_y: the current y offset, in modules :type image: :class:`PIL.Image` """ # PIL object draw = ImageDraw.Draw(image) mws = self.rconf.mws pixels_per_second = int(h_zoom / mws) current_y_px = current_y * v_zoom # create font, as tall as possible font_height_pt = 18 font = ImageFont.truetype(self.FONT_PATH, font_height_pt) # draw a tick every self.time_step seconds for i in range(0, 1 + int(self.max_time), self.time_step): # base x position begin_px = i * pixels_per_second # tick left_px = begin_px - self.TICK_WIDTH right_px = begin_px + self.TICK_WIDTH top_px = current_y_px bottom_px = current_y_px + v_zoom draw.rectangle((left_px, top_px, right_px, bottom_px), fill=PlotterColors.BLACK) # text time_text = self._time_string(i) left_px = begin_px + self.TICK_WIDTH + self.TEXT_MARGIN top_px = current_y_px + (v_zoom - self.text_bounding_box(font_height_pt, time_text)[1]) // 2 draw.text((left_px, top_px), time_text, PlotterColors.BLACK, font=font)
python
def draw_png(self, image, h_zoom, v_zoom, current_y): """ Draw this time scale to PNG. :param image: the image to draw onto :param int h_zoom: the horizontal zoom :param int v_zoom: the vertical zoom :param int current_y: the current y offset, in modules :type image: :class:`PIL.Image` """ # PIL object draw = ImageDraw.Draw(image) mws = self.rconf.mws pixels_per_second = int(h_zoom / mws) current_y_px = current_y * v_zoom # create font, as tall as possible font_height_pt = 18 font = ImageFont.truetype(self.FONT_PATH, font_height_pt) # draw a tick every self.time_step seconds for i in range(0, 1 + int(self.max_time), self.time_step): # base x position begin_px = i * pixels_per_second # tick left_px = begin_px - self.TICK_WIDTH right_px = begin_px + self.TICK_WIDTH top_px = current_y_px bottom_px = current_y_px + v_zoom draw.rectangle((left_px, top_px, right_px, bottom_px), fill=PlotterColors.BLACK) # text time_text = self._time_string(i) left_px = begin_px + self.TICK_WIDTH + self.TEXT_MARGIN top_px = current_y_px + (v_zoom - self.text_bounding_box(font_height_pt, time_text)[1]) // 2 draw.text((left_px, top_px), time_text, PlotterColors.BLACK, font=font)
['def', 'draw_png', '(', 'self', ',', 'image', ',', 'h_zoom', ',', 'v_zoom', ',', 'current_y', ')', ':', '# PIL object', 'draw', '=', 'ImageDraw', '.', 'Draw', '(', 'image', ')', 'mws', '=', 'self', '.', 'rconf', '.', 'mws', 'pixels_per_second', '=', 'int', '(', 'h_zoom', '/', 'mws', ')', 'current_y_px', '=', 'current_y', '*', 'v_zoom', '# create font, as tall as possible', 'font_height_pt', '=', '18', 'font', '=', 'ImageFont', '.', 'truetype', '(', 'self', '.', 'FONT_PATH', ',', 'font_height_pt', ')', '# draw a tick every self.time_step seconds', 'for', 'i', 'in', 'range', '(', '0', ',', '1', '+', 'int', '(', 'self', '.', 'max_time', ')', ',', 'self', '.', 'time_step', ')', ':', '# base x position', 'begin_px', '=', 'i', '*', 'pixels_per_second', '# tick', 'left_px', '=', 'begin_px', '-', 'self', '.', 'TICK_WIDTH', 'right_px', '=', 'begin_px', '+', 'self', '.', 'TICK_WIDTH', 'top_px', '=', 'current_y_px', 'bottom_px', '=', 'current_y_px', '+', 'v_zoom', 'draw', '.', 'rectangle', '(', '(', 'left_px', ',', 'top_px', ',', 'right_px', ',', 'bottom_px', ')', ',', 'fill', '=', 'PlotterColors', '.', 'BLACK', ')', '# text', 'time_text', '=', 'self', '.', '_time_string', '(', 'i', ')', 'left_px', '=', 'begin_px', '+', 'self', '.', 'TICK_WIDTH', '+', 'self', '.', 'TEXT_MARGIN', 'top_px', '=', 'current_y_px', '+', '(', 'v_zoom', '-', 'self', '.', 'text_bounding_box', '(', 'font_height_pt', ',', 'time_text', ')', '[', '1', ']', ')', '//', '2', 'draw', '.', 'text', '(', '(', 'left_px', ',', 'top_px', ')', ',', 'time_text', ',', 'PlotterColors', '.', 'BLACK', ',', 'font', '=', 'font', ')']
Draw this time scale to PNG. :param image: the image to draw onto :param int h_zoom: the horizontal zoom :param int v_zoom: the vertical zoom :param int current_y: the current y offset, in modules :type image: :class:`PIL.Image`
['Draw', 'this', 'time', 'scale', 'to', 'PNG', '.']
train
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/plotter.py#L305-L341
3,985
django-cumulus/django-cumulus
cumulus/management/commands/syncfiles.py
Command.set_options
def set_options(self, options): """ Sets instance variables based on an options dict """ # COMMAND LINE OPTIONS self.wipe = options.get("wipe") self.test_run = options.get("test_run") self.quiet = options.get("test_run") self.container_name = options.get("container") self.verbosity = int(options.get("verbosity")) self.syncmedia = options.get("syncmedia") self.syncstatic = options.get("syncstatic") if self.test_run: self.verbosity = 2 cli_includes = options.get("includes") cli_excludes = options.get("excludes") # CUMULUS CONNECTION AND SETTINGS FROM SETTINGS.PY if self.syncmedia and self.syncstatic: raise CommandError("options --media and --static are mutually exclusive") if not self.container_name: if self.syncmedia: self.container_name = CUMULUS["CONTAINER"] elif self.syncstatic: self.container_name = CUMULUS["STATIC_CONTAINER"] else: raise CommandError("must select one of the required options, either --media or --static") settings_includes = CUMULUS["INCLUDE_LIST"] settings_excludes = CUMULUS["EXCLUDE_LIST"] # PATH SETTINGS if self.syncmedia: self.file_root = os.path.abspath(settings.MEDIA_ROOT) self.file_url = settings.MEDIA_URL elif self.syncstatic: self.file_root = os.path.abspath(settings.STATIC_ROOT) self.file_url = settings.STATIC_URL if not self.file_root.endswith("/"): self.file_root = self.file_root + "/" if self.file_url.startswith("/"): self.file_url = self.file_url[1:] # SYNCSTATIC VARS # combine includes and excludes from the cli and django settings file self.includes = list(set(cli_includes + settings_includes)) self.excludes = list(set(cli_excludes + settings_excludes)) # transform glob patterns to regular expressions self.local_filenames = [] self.create_count = 0 self.upload_count = 0 self.update_count = 0 self.skip_count = 0 self.delete_count = 0
python
def set_options(self, options): """ Sets instance variables based on an options dict """ # COMMAND LINE OPTIONS self.wipe = options.get("wipe") self.test_run = options.get("test_run") self.quiet = options.get("test_run") self.container_name = options.get("container") self.verbosity = int(options.get("verbosity")) self.syncmedia = options.get("syncmedia") self.syncstatic = options.get("syncstatic") if self.test_run: self.verbosity = 2 cli_includes = options.get("includes") cli_excludes = options.get("excludes") # CUMULUS CONNECTION AND SETTINGS FROM SETTINGS.PY if self.syncmedia and self.syncstatic: raise CommandError("options --media and --static are mutually exclusive") if not self.container_name: if self.syncmedia: self.container_name = CUMULUS["CONTAINER"] elif self.syncstatic: self.container_name = CUMULUS["STATIC_CONTAINER"] else: raise CommandError("must select one of the required options, either --media or --static") settings_includes = CUMULUS["INCLUDE_LIST"] settings_excludes = CUMULUS["EXCLUDE_LIST"] # PATH SETTINGS if self.syncmedia: self.file_root = os.path.abspath(settings.MEDIA_ROOT) self.file_url = settings.MEDIA_URL elif self.syncstatic: self.file_root = os.path.abspath(settings.STATIC_ROOT) self.file_url = settings.STATIC_URL if not self.file_root.endswith("/"): self.file_root = self.file_root + "/" if self.file_url.startswith("/"): self.file_url = self.file_url[1:] # SYNCSTATIC VARS # combine includes and excludes from the cli and django settings file self.includes = list(set(cli_includes + settings_includes)) self.excludes = list(set(cli_excludes + settings_excludes)) # transform glob patterns to regular expressions self.local_filenames = [] self.create_count = 0 self.upload_count = 0 self.update_count = 0 self.skip_count = 0 self.delete_count = 0
['def', 'set_options', '(', 'self', ',', 'options', ')', ':', '# COMMAND LINE OPTIONS', 'self', '.', 'wipe', '=', 'options', '.', 'get', '(', '"wipe"', ')', 'self', '.', 'test_run', '=', 'options', '.', 'get', '(', '"test_run"', ')', 'self', '.', 'quiet', '=', 'options', '.', 'get', '(', '"test_run"', ')', 'self', '.', 'container_name', '=', 'options', '.', 'get', '(', '"container"', ')', 'self', '.', 'verbosity', '=', 'int', '(', 'options', '.', 'get', '(', '"verbosity"', ')', ')', 'self', '.', 'syncmedia', '=', 'options', '.', 'get', '(', '"syncmedia"', ')', 'self', '.', 'syncstatic', '=', 'options', '.', 'get', '(', '"syncstatic"', ')', 'if', 'self', '.', 'test_run', ':', 'self', '.', 'verbosity', '=', '2', 'cli_includes', '=', 'options', '.', 'get', '(', '"includes"', ')', 'cli_excludes', '=', 'options', '.', 'get', '(', '"excludes"', ')', '# CUMULUS CONNECTION AND SETTINGS FROM SETTINGS.PY', 'if', 'self', '.', 'syncmedia', 'and', 'self', '.', 'syncstatic', ':', 'raise', 'CommandError', '(', '"options --media and --static are mutually exclusive"', ')', 'if', 'not', 'self', '.', 'container_name', ':', 'if', 'self', '.', 'syncmedia', ':', 'self', '.', 'container_name', '=', 'CUMULUS', '[', '"CONTAINER"', ']', 'elif', 'self', '.', 'syncstatic', ':', 'self', '.', 'container_name', '=', 'CUMULUS', '[', '"STATIC_CONTAINER"', ']', 'else', ':', 'raise', 'CommandError', '(', '"must select one of the required options, either --media or --static"', ')', 'settings_includes', '=', 'CUMULUS', '[', '"INCLUDE_LIST"', ']', 'settings_excludes', '=', 'CUMULUS', '[', '"EXCLUDE_LIST"', ']', '# PATH SETTINGS', 'if', 'self', '.', 'syncmedia', ':', 'self', '.', 'file_root', '=', 'os', '.', 'path', '.', 'abspath', '(', 'settings', '.', 'MEDIA_ROOT', ')', 'self', '.', 'file_url', '=', 'settings', '.', 'MEDIA_URL', 'elif', 'self', '.', 'syncstatic', ':', 'self', '.', 'file_root', '=', 'os', '.', 'path', '.', 'abspath', '(', 'settings', '.', 'STATIC_ROOT', ')', 'self', '.', 'file_url', '=', 'settings', '.', 'STATIC_URL', 'if', 'not', 'self', '.', 'file_root', '.', 'endswith', '(', '"/"', ')', ':', 'self', '.', 'file_root', '=', 'self', '.', 'file_root', '+', '"/"', 'if', 'self', '.', 'file_url', '.', 'startswith', '(', '"/"', ')', ':', 'self', '.', 'file_url', '=', 'self', '.', 'file_url', '[', '1', ':', ']', '# SYNCSTATIC VARS', '# combine includes and excludes from the cli and django settings file', 'self', '.', 'includes', '=', 'list', '(', 'set', '(', 'cli_includes', '+', 'settings_includes', ')', ')', 'self', '.', 'excludes', '=', 'list', '(', 'set', '(', 'cli_excludes', '+', 'settings_excludes', ')', ')', '# transform glob patterns to regular expressions', 'self', '.', 'local_filenames', '=', '[', ']', 'self', '.', 'create_count', '=', '0', 'self', '.', 'upload_count', '=', '0', 'self', '.', 'update_count', '=', '0', 'self', '.', 'skip_count', '=', '0', 'self', '.', 'delete_count', '=', '0']
Sets instance variables based on an options dict
['Sets', 'instance', 'variables', 'based', 'on', 'an', 'options', 'dict']
train
https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/management/commands/syncfiles.py#L45-L97
3,986
tanghaibao/jcvi
jcvi/assembly/hic.py
density
def density(args): """ %prog density test.clm Estimate link density of contigs. """ p = OptionParser(density.__doc__) p.add_option("--save", default=False, action="store_true", help="Write log densitites of contigs to file") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) clmfile, = args clm = CLMFile(clmfile) pf = clmfile.rsplit(".", 1)[0] if opts.save: logdensities = clm.calculate_densities() densityfile = pf + ".density" fw = open(densityfile, "w") for name, logd in logdensities.items(): s = clm.tig_to_size[name] print("\t".join(str(x) for x in (name, s, logd)), file=fw) fw.close() logging.debug("Density written to `{}`".format(densityfile)) tourfile = clmfile.rsplit(".", 1)[0] + ".tour" tour = clm.activate(tourfile=tourfile, backuptour=False) clm.flip_all(tour) clm.flip_whole(tour) clm.flip_one(tour)
python
def density(args): """ %prog density test.clm Estimate link density of contigs. """ p = OptionParser(density.__doc__) p.add_option("--save", default=False, action="store_true", help="Write log densitites of contigs to file") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) clmfile, = args clm = CLMFile(clmfile) pf = clmfile.rsplit(".", 1)[0] if opts.save: logdensities = clm.calculate_densities() densityfile = pf + ".density" fw = open(densityfile, "w") for name, logd in logdensities.items(): s = clm.tig_to_size[name] print("\t".join(str(x) for x in (name, s, logd)), file=fw) fw.close() logging.debug("Density written to `{}`".format(densityfile)) tourfile = clmfile.rsplit(".", 1)[0] + ".tour" tour = clm.activate(tourfile=tourfile, backuptour=False) clm.flip_all(tour) clm.flip_whole(tour) clm.flip_one(tour)
['def', 'density', '(', 'args', ')', ':', 'p', '=', 'OptionParser', '(', 'density', '.', '__doc__', ')', 'p', '.', 'add_option', '(', '"--save"', ',', 'default', '=', 'False', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"Write log densitites of contigs to file"', ')', 'p', '.', 'set_cpus', '(', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', '!=', '1', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'clmfile', ',', '=', 'args', 'clm', '=', 'CLMFile', '(', 'clmfile', ')', 'pf', '=', 'clmfile', '.', 'rsplit', '(', '"."', ',', '1', ')', '[', '0', ']', 'if', 'opts', '.', 'save', ':', 'logdensities', '=', 'clm', '.', 'calculate_densities', '(', ')', 'densityfile', '=', 'pf', '+', '".density"', 'fw', '=', 'open', '(', 'densityfile', ',', '"w"', ')', 'for', 'name', ',', 'logd', 'in', 'logdensities', '.', 'items', '(', ')', ':', 's', '=', 'clm', '.', 'tig_to_size', '[', 'name', ']', 'print', '(', '"\\t"', '.', 'join', '(', 'str', '(', 'x', ')', 'for', 'x', 'in', '(', 'name', ',', 's', ',', 'logd', ')', ')', ',', 'file', '=', 'fw', ')', 'fw', '.', 'close', '(', ')', 'logging', '.', 'debug', '(', '"Density written to `{}`"', '.', 'format', '(', 'densityfile', ')', ')', 'tourfile', '=', 'clmfile', '.', 'rsplit', '(', '"."', ',', '1', ')', '[', '0', ']', '+', '".tour"', 'tour', '=', 'clm', '.', 'activate', '(', 'tourfile', '=', 'tourfile', ',', 'backuptour', '=', 'False', ')', 'clm', '.', 'flip_all', '(', 'tour', ')', 'clm', '.', 'flip_whole', '(', 'tour', ')', 'clm', '.', 'flip_one', '(', 'tour', ')']
%prog density test.clm Estimate link density of contigs.
['%prog', 'density', 'test', '.', 'clm']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L952-L985
3,987
watson-developer-cloud/python-sdk
ibm_watson/assistant_v1.py
Context._from_dict
def _from_dict(cls, _dict): """Initialize a Context object from a json dictionary.""" args = {} xtra = _dict.copy() if 'conversation_id' in _dict: args['conversation_id'] = _dict.get('conversation_id') del xtra['conversation_id'] if 'system' in _dict: args['system'] = SystemResponse._from_dict(_dict.get('system')) del xtra['system'] if 'metadata' in _dict: args['metadata'] = MessageContextMetadata._from_dict( _dict.get('metadata')) del xtra['metadata'] args.update(xtra) return cls(**args)
python
def _from_dict(cls, _dict): """Initialize a Context object from a json dictionary.""" args = {} xtra = _dict.copy() if 'conversation_id' in _dict: args['conversation_id'] = _dict.get('conversation_id') del xtra['conversation_id'] if 'system' in _dict: args['system'] = SystemResponse._from_dict(_dict.get('system')) del xtra['system'] if 'metadata' in _dict: args['metadata'] = MessageContextMetadata._from_dict( _dict.get('metadata')) del xtra['metadata'] args.update(xtra) return cls(**args)
['def', '_from_dict', '(', 'cls', ',', '_dict', ')', ':', 'args', '=', '{', '}', 'xtra', '=', '_dict', '.', 'copy', '(', ')', 'if', "'conversation_id'", 'in', '_dict', ':', 'args', '[', "'conversation_id'", ']', '=', '_dict', '.', 'get', '(', "'conversation_id'", ')', 'del', 'xtra', '[', "'conversation_id'", ']', 'if', "'system'", 'in', '_dict', ':', 'args', '[', "'system'", ']', '=', 'SystemResponse', '.', '_from_dict', '(', '_dict', '.', 'get', '(', "'system'", ')', ')', 'del', 'xtra', '[', "'system'", ']', 'if', "'metadata'", 'in', '_dict', ':', 'args', '[', "'metadata'", ']', '=', 'MessageContextMetadata', '.', '_from_dict', '(', '_dict', '.', 'get', '(', "'metadata'", ')', ')', 'del', 'xtra', '[', "'metadata'", ']', 'args', '.', 'update', '(', 'xtra', ')', 'return', 'cls', '(', '*', '*', 'args', ')']
Initialize a Context object from a json dictionary.
['Initialize', 'a', 'Context', 'object', 'from', 'a', 'json', 'dictionary', '.']
train
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v1.py#L2944-L2959
3,988
Microsoft/knack
knack/arguments.py
ArgumentRegistry.register_cli_argument
def register_cli_argument(self, scope, dest, argtype, **kwargs): """ Add an argument to the argument registry :param scope: The command level to apply the argument registration (e.g. 'mygroup mycommand') :type scope: str :param dest: The parameter/destination that this argument is for :type dest: str :param argtype: The argument type for this command argument :type argtype: knack.arguments.CLIArgumentType :param kwargs: see knack.arguments.CLIArgumentType """ argument = CLIArgumentType(overrides=argtype, **kwargs) self.arguments[scope][dest] = argument
python
def register_cli_argument(self, scope, dest, argtype, **kwargs): """ Add an argument to the argument registry :param scope: The command level to apply the argument registration (e.g. 'mygroup mycommand') :type scope: str :param dest: The parameter/destination that this argument is for :type dest: str :param argtype: The argument type for this command argument :type argtype: knack.arguments.CLIArgumentType :param kwargs: see knack.arguments.CLIArgumentType """ argument = CLIArgumentType(overrides=argtype, **kwargs) self.arguments[scope][dest] = argument
['def', 'register_cli_argument', '(', 'self', ',', 'scope', ',', 'dest', ',', 'argtype', ',', '*', '*', 'kwargs', ')', ':', 'argument', '=', 'CLIArgumentType', '(', 'overrides', '=', 'argtype', ',', '*', '*', 'kwargs', ')', 'self', '.', 'arguments', '[', 'scope', ']', '[', 'dest', ']', '=', 'argument']
Add an argument to the argument registry :param scope: The command level to apply the argument registration (e.g. 'mygroup mycommand') :type scope: str :param dest: The parameter/destination that this argument is for :type dest: str :param argtype: The argument type for this command argument :type argtype: knack.arguments.CLIArgumentType :param kwargs: see knack.arguments.CLIArgumentType
['Add', 'an', 'argument', 'to', 'the', 'argument', 'registry']
train
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/arguments.py#L93-L105
3,989
tgalal/yowsup
yowsup/axolotl/manager.py
AxolotlManager.set_prekeys_as_sent
def set_prekeys_as_sent(self, prekeyIds): """ :param prekeyIds: :type prekeyIds: list :return: :rtype: """ logger.debug("set_prekeys_as_sent(prekeyIds=[%d prekeyIds])" % len(prekeyIds)) self._store.preKeyStore.setAsSent([prekey.getId() for prekey in prekeyIds])
python
def set_prekeys_as_sent(self, prekeyIds): """ :param prekeyIds: :type prekeyIds: list :return: :rtype: """ logger.debug("set_prekeys_as_sent(prekeyIds=[%d prekeyIds])" % len(prekeyIds)) self._store.preKeyStore.setAsSent([prekey.getId() for prekey in prekeyIds])
['def', 'set_prekeys_as_sent', '(', 'self', ',', 'prekeyIds', ')', ':', 'logger', '.', 'debug', '(', '"set_prekeys_as_sent(prekeyIds=[%d prekeyIds])"', '%', 'len', '(', 'prekeyIds', ')', ')', 'self', '.', '_store', '.', 'preKeyStore', '.', 'setAsSent', '(', '[', 'prekey', '.', 'getId', '(', ')', 'for', 'prekey', 'in', 'prekeyIds', ']', ')']
:param prekeyIds: :type prekeyIds: list :return: :rtype:
[':', 'param', 'prekeyIds', ':', ':', 'type', 'prekeyIds', ':', 'list', ':', 'return', ':', ':', 'rtype', ':']
train
https://github.com/tgalal/yowsup/blob/b0739461ba962bf221fc76047d9d60d8ce61bc3e/yowsup/axolotl/manager.py#L86-L94
3,990
pandas-dev/pandas
pandas/core/arrays/timedeltas.py
TimedeltaArray._add_datetime_arraylike
def _add_datetime_arraylike(self, other): """ Add DatetimeArray/Index or ndarray[datetime64] to TimedeltaArray. """ if isinstance(other, np.ndarray): # At this point we have already checked that dtype is datetime64 from pandas.core.arrays import DatetimeArray other = DatetimeArray(other) # defer to implementation in DatetimeArray return other + self
python
def _add_datetime_arraylike(self, other): """ Add DatetimeArray/Index or ndarray[datetime64] to TimedeltaArray. """ if isinstance(other, np.ndarray): # At this point we have already checked that dtype is datetime64 from pandas.core.arrays import DatetimeArray other = DatetimeArray(other) # defer to implementation in DatetimeArray return other + self
['def', '_add_datetime_arraylike', '(', 'self', ',', 'other', ')', ':', 'if', 'isinstance', '(', 'other', ',', 'np', '.', 'ndarray', ')', ':', '# At this point we have already checked that dtype is datetime64', 'from', 'pandas', '.', 'core', '.', 'arrays', 'import', 'DatetimeArray', 'other', '=', 'DatetimeArray', '(', 'other', ')', '# defer to implementation in DatetimeArray', 'return', 'other', '+', 'self']
Add DatetimeArray/Index or ndarray[datetime64] to TimedeltaArray.
['Add', 'DatetimeArray', '/', 'Index', 'or', 'ndarray', '[', 'datetime64', ']', 'to', 'TimedeltaArray', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/timedeltas.py#L392-L402
3,991
phaethon/kamene
kamene/packet.py
Packet.command
def command(self): """Returns a string representing the command you have to type to obtain the same packet""" f = [] for fn,fv in self.fields.items(): fld = self.get_field(fn) if isinstance(fv, Packet): fv = fv.command() elif fld.islist and fld.holds_packets and type(fv) is list: #fv = "[%s]" % ",".join( map(Packet.command, fv)) fv = "[%s]" % ",".join([ Packet.command(i) for i in fv ]) else: fv = repr(fv) f.append("%s=%s" % (fn, fv)) c = "%s(%s)" % (self.__class__.__name__, ", ".join(f)) pc = self.payload.command() if pc: c += "/"+pc return c
python
def command(self): """Returns a string representing the command you have to type to obtain the same packet""" f = [] for fn,fv in self.fields.items(): fld = self.get_field(fn) if isinstance(fv, Packet): fv = fv.command() elif fld.islist and fld.holds_packets and type(fv) is list: #fv = "[%s]" % ",".join( map(Packet.command, fv)) fv = "[%s]" % ",".join([ Packet.command(i) for i in fv ]) else: fv = repr(fv) f.append("%s=%s" % (fn, fv)) c = "%s(%s)" % (self.__class__.__name__, ", ".join(f)) pc = self.payload.command() if pc: c += "/"+pc return c
['def', 'command', '(', 'self', ')', ':', 'f', '=', '[', ']', 'for', 'fn', ',', 'fv', 'in', 'self', '.', 'fields', '.', 'items', '(', ')', ':', 'fld', '=', 'self', '.', 'get_field', '(', 'fn', ')', 'if', 'isinstance', '(', 'fv', ',', 'Packet', ')', ':', 'fv', '=', 'fv', '.', 'command', '(', ')', 'elif', 'fld', '.', 'islist', 'and', 'fld', '.', 'holds_packets', 'and', 'type', '(', 'fv', ')', 'is', 'list', ':', '#fv = "[%s]" % ",".join( map(Packet.command, fv))', 'fv', '=', '"[%s]"', '%', '","', '.', 'join', '(', '[', 'Packet', '.', 'command', '(', 'i', ')', 'for', 'i', 'in', 'fv', ']', ')', 'else', ':', 'fv', '=', 'repr', '(', 'fv', ')', 'f', '.', 'append', '(', '"%s=%s"', '%', '(', 'fn', ',', 'fv', ')', ')', 'c', '=', '"%s(%s)"', '%', '(', 'self', '.', '__class__', '.', '__name__', ',', '", "', '.', 'join', '(', 'f', ')', ')', 'pc', '=', 'self', '.', 'payload', '.', 'command', '(', ')', 'if', 'pc', ':', 'c', '+=', '"/"', '+', 'pc', 'return', 'c']
Returns a string representing the command you have to type to obtain the same packet
['Returns', 'a', 'string', 'representing', 'the', 'command', 'you', 'have', 'to', 'type', 'to', 'obtain', 'the', 'same', 'packet']
train
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/packet.py#L1049-L1066
3,992
numenta/htmresearch
htmresearch/frameworks/layers/sequence_object_machine.py
SequenceObjectMachine._generateFeatures
def _generateFeatures(self): """ Generates a pool of features to be used for the experiments. For each index, numColumns SDR's are created, as locations for the same feature should be different for each column. """ size = self.sensorInputSize bits = self.numInputBits self.features = [] for _ in xrange(self.numColumns): self.features.append( [self._generatePattern(bits, size) for _ in xrange(self.numFeatures)] )
python
def _generateFeatures(self): """ Generates a pool of features to be used for the experiments. For each index, numColumns SDR's are created, as locations for the same feature should be different for each column. """ size = self.sensorInputSize bits = self.numInputBits self.features = [] for _ in xrange(self.numColumns): self.features.append( [self._generatePattern(bits, size) for _ in xrange(self.numFeatures)] )
['def', '_generateFeatures', '(', 'self', ')', ':', 'size', '=', 'self', '.', 'sensorInputSize', 'bits', '=', 'self', '.', 'numInputBits', 'self', '.', 'features', '=', '[', ']', 'for', '_', 'in', 'xrange', '(', 'self', '.', 'numColumns', ')', ':', 'self', '.', 'features', '.', 'append', '(', '[', 'self', '.', '_generatePattern', '(', 'bits', ',', 'size', ')', 'for', '_', 'in', 'xrange', '(', 'self', '.', 'numFeatures', ')', ']', ')']
Generates a pool of features to be used for the experiments. For each index, numColumns SDR's are created, as locations for the same feature should be different for each column.
['Generates', 'a', 'pool', 'of', 'features', 'to', 'be', 'used', 'for', 'the', 'experiments', '.']
train
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/sequence_object_machine.py#L268-L282
3,993
jtwhite79/pyemu
pyemu/mat/mat_handler.py
get_maxsing
def get_maxsing(self,eigthresh=1.0e-5): """ Get the number of singular components with a singular value ratio greater than or equal to eigthresh Parameters ---------- eigthresh : float the ratio of the largest to smallest singular value Returns ------- int : int number of singular components """ #sthresh =np.abs((self.s.x / self.s.x[0]) - eigthresh) sthresh = self.s.x.flatten()/self.s.x[0] ising = 0 for i,st in enumerate(sthresh): if st > eigthresh: ising += 1 #return max(1,i) else: break #return max(1,np.argmin(sthresh)) return max(1,ising)
python
def get_maxsing(self,eigthresh=1.0e-5): """ Get the number of singular components with a singular value ratio greater than or equal to eigthresh Parameters ---------- eigthresh : float the ratio of the largest to smallest singular value Returns ------- int : int number of singular components """ #sthresh =np.abs((self.s.x / self.s.x[0]) - eigthresh) sthresh = self.s.x.flatten()/self.s.x[0] ising = 0 for i,st in enumerate(sthresh): if st > eigthresh: ising += 1 #return max(1,i) else: break #return max(1,np.argmin(sthresh)) return max(1,ising)
['def', 'get_maxsing', '(', 'self', ',', 'eigthresh', '=', '1.0e-5', ')', ':', '#sthresh =np.abs((self.s.x / self.s.x[0]) - eigthresh)', 'sthresh', '=', 'self', '.', 's', '.', 'x', '.', 'flatten', '(', ')', '/', 'self', '.', 's', '.', 'x', '[', '0', ']', 'ising', '=', '0', 'for', 'i', ',', 'st', 'in', 'enumerate', '(', 'sthresh', ')', ':', 'if', 'st', '>', 'eigthresh', ':', 'ising', '+=', '1', '#return max(1,i)', 'else', ':', 'break', '#return max(1,np.argmin(sthresh))', 'return', 'max', '(', '1', ',', 'ising', ')']
Get the number of singular components with a singular value ratio greater than or equal to eigthresh Parameters ---------- eigthresh : float the ratio of the largest to smallest singular value Returns ------- int : int number of singular components
['Get', 'the', 'number', 'of', 'singular', 'components', 'with', 'a', 'singular', 'value', 'ratio', 'greater', 'than', 'or', 'equal', 'to', 'eigthresh']
train
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/mat/mat_handler.py#L973-L998
3,994
creare-com/pydem
pydem/dem_processing.py
TileEdge.get
def get(self, key, side): """ Returns an edge given a particular key Parmeters ---------- key : tuple (te, be, le, re) tuple that identifies a tile side : str top, bottom, left, or right, which edge to return """ return getattr(self, side).ravel()[self.keys[key]]
python
def get(self, key, side): """ Returns an edge given a particular key Parmeters ---------- key : tuple (te, be, le, re) tuple that identifies a tile side : str top, bottom, left, or right, which edge to return """ return getattr(self, side).ravel()[self.keys[key]]
['def', 'get', '(', 'self', ',', 'key', ',', 'side', ')', ':', 'return', 'getattr', '(', 'self', ',', 'side', ')', '.', 'ravel', '(', ')', '[', 'self', '.', 'keys', '[', 'key', ']', ']']
Returns an edge given a particular key Parmeters ---------- key : tuple (te, be, le, re) tuple that identifies a tile side : str top, bottom, left, or right, which edge to return
['Returns', 'an', 'edge', 'given', 'a', 'particular', 'key', 'Parmeters', '----------', 'key', ':', 'tuple', '(', 'te', 'be', 'le', 're', ')', 'tuple', 'that', 'identifies', 'a', 'tile', 'side', ':', 'str', 'top', 'bottom', 'left', 'or', 'right', 'which', 'edge', 'to', 'return']
train
https://github.com/creare-com/pydem/blob/c2fc8d84cfb411df84f71a6dec9edc4b544f710a/pydem/dem_processing.py#L234-L244
3,995
cbrand/vpnchooser
src/vpnchooser/resources/vpn.py
VpnListResource.post
def post(self) -> Vpn: """ Creates the vpn with the given data. """ vpn = Vpn() session.add(vpn) self.update(vpn) session.flush() session.commit() return vpn, 201, { 'Location': url_for('vpn', vpn_id=vpn.id) }
python
def post(self) -> Vpn: """ Creates the vpn with the given data. """ vpn = Vpn() session.add(vpn) self.update(vpn) session.flush() session.commit() return vpn, 201, { 'Location': url_for('vpn', vpn_id=vpn.id) }
['def', 'post', '(', 'self', ')', '->', 'Vpn', ':', 'vpn', '=', 'Vpn', '(', ')', 'session', '.', 'add', '(', 'vpn', ')', 'self', '.', 'update', '(', 'vpn', ')', 'session', '.', 'flush', '(', ')', 'session', '.', 'commit', '(', ')', 'return', 'vpn', ',', '201', ',', '{', "'Location'", ':', 'url_for', '(', "'vpn'", ',', 'vpn_id', '=', 'vpn', '.', 'id', ')', '}']
Creates the vpn with the given data.
['Creates', 'the', 'vpn', 'with', 'the', 'given', 'data', '.']
train
https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/resources/vpn.py#L121-L132
3,996
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py
brocade_notification_stream.RIBVRFRouteLimitExceeded_originator_switch_info_switchVcsId
def RIBVRFRouteLimitExceeded_originator_switch_info_switchVcsId(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") RIBVRFRouteLimitExceeded = ET.SubElement(config, "RIBVRFRouteLimitExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream") originator_switch_info = ET.SubElement(RIBVRFRouteLimitExceeded, "originator-switch-info") switchVcsId = ET.SubElement(originator_switch_info, "switchVcsId") switchVcsId.text = kwargs.pop('switchVcsId') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def RIBVRFRouteLimitExceeded_originator_switch_info_switchVcsId(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") RIBVRFRouteLimitExceeded = ET.SubElement(config, "RIBVRFRouteLimitExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream") originator_switch_info = ET.SubElement(RIBVRFRouteLimitExceeded, "originator-switch-info") switchVcsId = ET.SubElement(originator_switch_info, "switchVcsId") switchVcsId.text = kwargs.pop('switchVcsId') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'RIBVRFRouteLimitExceeded_originator_switch_info_switchVcsId', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'RIBVRFRouteLimitExceeded', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"RIBVRFRouteLimitExceeded"', ',', 'xmlns', '=', '"http://brocade.com/ns/brocade-notification-stream"', ')', 'originator_switch_info', '=', 'ET', '.', 'SubElement', '(', 'RIBVRFRouteLimitExceeded', ',', '"originator-switch-info"', ')', 'switchVcsId', '=', 'ET', '.', 'SubElement', '(', 'originator_switch_info', ',', '"switchVcsId"', ')', 'switchVcsId', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'switchVcsId'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py#L492-L502
3,997
astropy/photutils
photutils/aperture/ellipse.py
EllipticalMaskMixin.to_mask
def to_mask(self, method='exact', subpixels=5): """ Return a list of `~photutils.ApertureMask` objects, one for each aperture position. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- mask : list of `~photutils.ApertureMask` A list of aperture mask objects. """ use_exact, subpixels = self._translate_mask_mode(method, subpixels) if hasattr(self, 'a'): a = self.a b = self.b elif hasattr(self, 'a_in'): # annulus a = self.a_out b = self.b_out b_in = self.a_in * self.b_out / self.a_out else: raise ValueError('Cannot determine the aperture shape.') masks = [] for bbox, edges in zip(self.bounding_boxes, self._centered_edges): ny, nx = bbox.shape mask = elliptical_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, a, b, self.theta, use_exact, subpixels) # subtract the inner ellipse for an annulus if hasattr(self, 'a_in'): mask -= elliptical_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, self.a_in, b_in, self.theta, use_exact, subpixels) masks.append(ApertureMask(mask, bbox)) return masks
python
def to_mask(self, method='exact', subpixels=5): """ Return a list of `~photutils.ApertureMask` objects, one for each aperture position. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- mask : list of `~photutils.ApertureMask` A list of aperture mask objects. """ use_exact, subpixels = self._translate_mask_mode(method, subpixels) if hasattr(self, 'a'): a = self.a b = self.b elif hasattr(self, 'a_in'): # annulus a = self.a_out b = self.b_out b_in = self.a_in * self.b_out / self.a_out else: raise ValueError('Cannot determine the aperture shape.') masks = [] for bbox, edges in zip(self.bounding_boxes, self._centered_edges): ny, nx = bbox.shape mask = elliptical_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, a, b, self.theta, use_exact, subpixels) # subtract the inner ellipse for an annulus if hasattr(self, 'a_in'): mask -= elliptical_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, self.a_in, b_in, self.theta, use_exact, subpixels) masks.append(ApertureMask(mask, bbox)) return masks
['def', 'to_mask', '(', 'self', ',', 'method', '=', "'exact'", ',', 'subpixels', '=', '5', ')', ':', 'use_exact', ',', 'subpixels', '=', 'self', '.', '_translate_mask_mode', '(', 'method', ',', 'subpixels', ')', 'if', 'hasattr', '(', 'self', ',', "'a'", ')', ':', 'a', '=', 'self', '.', 'a', 'b', '=', 'self', '.', 'b', 'elif', 'hasattr', '(', 'self', ',', "'a_in'", ')', ':', '# annulus', 'a', '=', 'self', '.', 'a_out', 'b', '=', 'self', '.', 'b_out', 'b_in', '=', 'self', '.', 'a_in', '*', 'self', '.', 'b_out', '/', 'self', '.', 'a_out', 'else', ':', 'raise', 'ValueError', '(', "'Cannot determine the aperture shape.'", ')', 'masks', '=', '[', ']', 'for', 'bbox', ',', 'edges', 'in', 'zip', '(', 'self', '.', 'bounding_boxes', ',', 'self', '.', '_centered_edges', ')', ':', 'ny', ',', 'nx', '=', 'bbox', '.', 'shape', 'mask', '=', 'elliptical_overlap_grid', '(', 'edges', '[', '0', ']', ',', 'edges', '[', '1', ']', ',', 'edges', '[', '2', ']', ',', 'edges', '[', '3', ']', ',', 'nx', ',', 'ny', ',', 'a', ',', 'b', ',', 'self', '.', 'theta', ',', 'use_exact', ',', 'subpixels', ')', '# subtract the inner ellipse for an annulus', 'if', 'hasattr', '(', 'self', ',', "'a_in'", ')', ':', 'mask', '-=', 'elliptical_overlap_grid', '(', 'edges', '[', '0', ']', ',', 'edges', '[', '1', ']', ',', 'edges', '[', '2', ']', ',', 'edges', '[', '3', ']', ',', 'nx', ',', 'ny', ',', 'self', '.', 'a_in', ',', 'b_in', ',', 'self', '.', 'theta', ',', 'use_exact', ',', 'subpixels', ')', 'masks', '.', 'append', '(', 'ApertureMask', '(', 'mask', ',', 'bbox', ')', ')', 'return', 'masks']
Return a list of `~photutils.ApertureMask` objects, one for each aperture position. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- mask : list of `~photutils.ApertureMask` A list of aperture mask objects.
['Return', 'a', 'list', 'of', '~photutils', '.', 'ApertureMask', 'objects', 'one', 'for', 'each', 'aperture', 'position', '.']
train
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/ellipse.py#L26-L98
3,998
ninuxorg/nodeshot
nodeshot/community/participation/models/base.py
UpdateCountsMixin.delete
def delete(self, *args, **kwargs): """ custom delete method to update counts """ super(UpdateCountsMixin, self).delete(*args, **kwargs) self.update_count()
python
def delete(self, *args, **kwargs): """ custom delete method to update counts """ super(UpdateCountsMixin, self).delete(*args, **kwargs) self.update_count()
['def', 'delete', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'super', '(', 'UpdateCountsMixin', ',', 'self', ')', '.', 'delete', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'self', '.', 'update_count', '(', ')']
custom delete method to update counts
['custom', 'delete', 'method', 'to', 'update', 'counts']
train
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/community/participation/models/base.py#L29-L32
3,999
seleniumbase/SeleniumBase
seleniumbase/fixtures/email_manager.py
EmailManager.replace_entities
def replace_entities(self, html): """ Replace htmlentities with unicode characters @Params html - html source to replace entities in @Returns String html with entities replaced """ def fixup(text): """replace the htmlentities in some text""" text = text.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return chr(int(text[3:-1], 16)) else: return chr(int(text[2:-1])) except ValueError: pass else: # named entity try: text = chr(htmlentitydefs.name2codepoint[text[1:-1]]) except KeyError: pass return text # leave as is return re.sub(r"&#?\w+;", fixup, html)
python
def replace_entities(self, html): """ Replace htmlentities with unicode characters @Params html - html source to replace entities in @Returns String html with entities replaced """ def fixup(text): """replace the htmlentities in some text""" text = text.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return chr(int(text[3:-1], 16)) else: return chr(int(text[2:-1])) except ValueError: pass else: # named entity try: text = chr(htmlentitydefs.name2codepoint[text[1:-1]]) except KeyError: pass return text # leave as is return re.sub(r"&#?\w+;", fixup, html)
['def', 'replace_entities', '(', 'self', ',', 'html', ')', ':', 'def', 'fixup', '(', 'text', ')', ':', '"""replace the htmlentities in some text"""', 'text', '=', 'text', '.', 'group', '(', '0', ')', 'if', 'text', '[', ':', '2', ']', '==', '"&#"', ':', '# character reference', 'try', ':', 'if', 'text', '[', ':', '3', ']', '==', '"&#x"', ':', 'return', 'chr', '(', 'int', '(', 'text', '[', '3', ':', '-', '1', ']', ',', '16', ')', ')', 'else', ':', 'return', 'chr', '(', 'int', '(', 'text', '[', '2', ':', '-', '1', ']', ')', ')', 'except', 'ValueError', ':', 'pass', 'else', ':', '# named entity', 'try', ':', 'text', '=', 'chr', '(', 'htmlentitydefs', '.', 'name2codepoint', '[', 'text', '[', '1', ':', '-', '1', ']', ']', ')', 'except', 'KeyError', ':', 'pass', 'return', 'text', '# leave as is', 'return', 're', '.', 'sub', '(', 'r"&#?\\w+;"', ',', 'fixup', ',', 'html', ')']
Replace htmlentities with unicode characters @Params html - html source to replace entities in @Returns String html with entities replaced
['Replace', 'htmlentities', 'with', 'unicode', 'characters']
train
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/email_manager.py#L454-L481