Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
5,400
rocky/python-uncompyle6
uncompyle6/scanners/scanner15.py
Scanner15.ingest
def ingest(self, co, classname=None, code_objects={}, show_asm=None): """ Pick out tokens from an uncompyle6 code object, and transform them, returning a list of uncompyle6 Token's. The transformations are made to assist the deparsing grammar. """ tokens, customize = scan.Scanner21.ingest(self, co, classname, code_objects, show_asm) for t in tokens: if t.op == self.opc.UNPACK_LIST: t.kind = 'UNPACK_LIST_%d' % t.attr pass return tokens, customize
python
def ingest(self, co, classname=None, code_objects={}, show_asm=None): """ Pick out tokens from an uncompyle6 code object, and transform them, returning a list of uncompyle6 Token's. The transformations are made to assist the deparsing grammar. """ tokens, customize = scan.Scanner21.ingest(self, co, classname, code_objects, show_asm) for t in tokens: if t.op == self.opc.UNPACK_LIST: t.kind = 'UNPACK_LIST_%d' % t.attr pass return tokens, customize
['def', 'ingest', '(', 'self', ',', 'co', ',', 'classname', '=', 'None', ',', 'code_objects', '=', '{', '}', ',', 'show_asm', '=', 'None', ')', ':', 'tokens', ',', 'customize', '=', 'scan', '.', 'Scanner21', '.', 'ingest', '(', 'self', ',', 'co', ',', 'classname', ',', 'code_objects', ',', 'show_asm', ')', 'for', 't', 'in', 'tokens', ':', 'if', 't', '.', 'op', '==', 'self', '.', 'opc', '.', 'UNPACK_LIST', ':', 't', '.', 'kind', '=', "'UNPACK_LIST_%d'", '%', 't', '.', 'attr', 'pass', 'return', 'tokens', ',', 'customize']
Pick out tokens from an uncompyle6 code object, and transform them, returning a list of uncompyle6 Token's. The transformations are made to assist the deparsing grammar.
['Pick', 'out', 'tokens', 'from', 'an', 'uncompyle6', 'code', 'object', 'and', 'transform', 'them', 'returning', 'a', 'list', 'of', 'uncompyle6', 'Token', 's', '.']
train
https://github.com/rocky/python-uncompyle6/blob/c5d7944e657f0ad05a0e2edd34e1acb27001abc0/uncompyle6/scanners/scanner15.py#L29-L41
5,401
Spinmob/spinmob
_data.py
fitter.autoscale_eydata
def autoscale_eydata(self): """ Rescales the error so the next fit will give reduced chi squareds of 1. Each data set will be scaled independently, and you may wish to run this a few times until it converges. """ if not self.results: self._error("You must complete a fit first.") return r = self.reduced_chi_squareds() # loop over the eydata and rescale for n in range(len(r)): self["scale_eydata"][n] *= _n.sqrt(r[n]) # the fit is no longer valid self.clear_results() # replot if self['autoplot']: self.plot() return self
python
def autoscale_eydata(self): """ Rescales the error so the next fit will give reduced chi squareds of 1. Each data set will be scaled independently, and you may wish to run this a few times until it converges. """ if not self.results: self._error("You must complete a fit first.") return r = self.reduced_chi_squareds() # loop over the eydata and rescale for n in range(len(r)): self["scale_eydata"][n] *= _n.sqrt(r[n]) # the fit is no longer valid self.clear_results() # replot if self['autoplot']: self.plot() return self
['def', 'autoscale_eydata', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'results', ':', 'self', '.', '_error', '(', '"You must complete a fit first."', ')', 'return', 'r', '=', 'self', '.', 'reduced_chi_squareds', '(', ')', '# loop over the eydata and rescale', 'for', 'n', 'in', 'range', '(', 'len', '(', 'r', ')', ')', ':', 'self', '[', '"scale_eydata"', ']', '[', 'n', ']', '*=', '_n', '.', 'sqrt', '(', 'r', '[', 'n', ']', ')', '# the fit is no longer valid', 'self', '.', 'clear_results', '(', ')', '# replot', 'if', 'self', '[', "'autoplot'", ']', ':', 'self', '.', 'plot', '(', ')', 'return', 'self']
Rescales the error so the next fit will give reduced chi squareds of 1. Each data set will be scaled independently, and you may wish to run this a few times until it converges.
['Rescales', 'the', 'error', 'so', 'the', 'next', 'fit', 'will', 'give', 'reduced', 'chi', 'squareds', 'of', '1', '.', 'Each', 'data', 'set', 'will', 'be', 'scaled', 'independently', 'and', 'you', 'may', 'wish', 'to', 'run', 'this', 'a', 'few', 'times', 'until', 'it', 'converges', '.']
train
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_data.py#L2545-L2566
5,402
twilio/twilio-python
twilio/rest/studio/v1/flow/engagement/__init__.py
EngagementContext.engagement_context
def engagement_context(self): """ Access the engagement_context :returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList :rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList """ if self._engagement_context is None: self._engagement_context = EngagementContextList( self._version, flow_sid=self._solution['flow_sid'], engagement_sid=self._solution['sid'], ) return self._engagement_context
python
def engagement_context(self): """ Access the engagement_context :returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList :rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList """ if self._engagement_context is None: self._engagement_context = EngagementContextList( self._version, flow_sid=self._solution['flow_sid'], engagement_sid=self._solution['sid'], ) return self._engagement_context
['def', 'engagement_context', '(', 'self', ')', ':', 'if', 'self', '.', '_engagement_context', 'is', 'None', ':', 'self', '.', '_engagement_context', '=', 'EngagementContextList', '(', 'self', '.', '_version', ',', 'flow_sid', '=', 'self', '.', '_solution', '[', "'flow_sid'", ']', ',', 'engagement_sid', '=', 'self', '.', '_solution', '[', "'sid'", ']', ',', ')', 'return', 'self', '.', '_engagement_context']
Access the engagement_context :returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList :rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList
['Access', 'the', 'engagement_context']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/studio/v1/flow/engagement/__init__.py#L285-L298
5,403
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/neighbor/af_ipv6_vrf_neighbor_address_holder/af_ipv6_neighbor_addr/neighbor_route_map/neighbor_route_map_direction_out/__init__.py
neighbor_route_map_direction_out._set_neighbor_route_map_name_direction_out
def _set_neighbor_route_map_name_direction_out(self, v, load=False): """ Setter method for neighbor_route_map_name_direction_out, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/neighbor/af_ipv6_vrf_neighbor_address_holder/af_ipv6_neighbor_addr/neighbor_route_map/neighbor_route_map_direction_out/neighbor_route_map_name_direction_out (common-def:name-string64) If this variable is read-only (config: false) in the source YANG file, then _set_neighbor_route_map_name_direction_out is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_neighbor_route_map_name_direction_out() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,63})'}), is_leaf=True, yang_name="neighbor-route-map-name-direction-out", rest_name="neighbor-route-map-name-direction-out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply route map to neighbor', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='common-def:name-string64', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """neighbor_route_map_name_direction_out must be of a type compatible with common-def:name-string64""", 'defined-type': "common-def:name-string64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,63})'}), is_leaf=True, yang_name="neighbor-route-map-name-direction-out", rest_name="neighbor-route-map-name-direction-out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply route map to neighbor', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='common-def:name-string64', is_config=True)""", }) self.__neighbor_route_map_name_direction_out = t if hasattr(self, '_set'): self._set()
python
def _set_neighbor_route_map_name_direction_out(self, v, load=False): """ Setter method for neighbor_route_map_name_direction_out, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/neighbor/af_ipv6_vrf_neighbor_address_holder/af_ipv6_neighbor_addr/neighbor_route_map/neighbor_route_map_direction_out/neighbor_route_map_name_direction_out (common-def:name-string64) If this variable is read-only (config: false) in the source YANG file, then _set_neighbor_route_map_name_direction_out is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_neighbor_route_map_name_direction_out() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,63})'}), is_leaf=True, yang_name="neighbor-route-map-name-direction-out", rest_name="neighbor-route-map-name-direction-out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply route map to neighbor', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='common-def:name-string64', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """neighbor_route_map_name_direction_out must be of a type compatible with common-def:name-string64""", 'defined-type': "common-def:name-string64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,63})'}), is_leaf=True, yang_name="neighbor-route-map-name-direction-out", rest_name="neighbor-route-map-name-direction-out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply route map to neighbor', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='common-def:name-string64', is_config=True)""", }) self.__neighbor_route_map_name_direction_out = t if hasattr(self, '_set'): self._set()
['def', '_set_neighbor_route_map_name_direction_out', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'RestrictedClassType', '(', 'base_type', '=', 'unicode', ',', 'restriction_dict', '=', '{', "'pattern'", ':', "u'[a-zA-Z]{1}([-a-zA-Z0-9\\\\.\\\\\\\\\\\\\\\\@#\\\\+\\\\*\\\\(\\\\)=\\\\{~\\\\}%<>=$_\\\\[\\\\]\\\\|]{0,63})'", '}', ')', ',', 'is_leaf', '=', 'True', ',', 'yang_name', '=', '"neighbor-route-map-name-direction-out"', ',', 'rest_name', '=', '"neighbor-route-map-name-direction-out"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Apply route map to neighbor'", ',', "u'cli-drop-node-name'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-bgp'", ',', 'defining_module', '=', "'brocade-bgp'", ',', 'yang_type', '=', "'common-def:name-string64'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""neighbor_route_map_name_direction_out must be of a type compatible with common-def:name-string64"""', ',', "'defined-type'", ':', '"common-def:name-string64"', ',', "'generated-type'", ':', '"""YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={\'pattern\': u\'[a-zA-Z]{1}([-a-zA-Z0-9\\\\.\\\\\\\\\\\\\\\\@#\\\\+\\\\*\\\\(\\\\)=\\\\{~\\\\}%<>=$_\\\\[\\\\]\\\\|]{0,63})\'}), is_leaf=True, yang_name="neighbor-route-map-name-direction-out", rest_name="neighbor-route-map-name-direction-out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Apply route map to neighbor\', u\'cli-drop-node-name\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-bgp\', defining_module=\'brocade-bgp\', yang_type=\'common-def:name-string64\', is_config=True)"""', ',', '}', ')', 'self', '.', '__neighbor_route_map_name_direction_out', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for neighbor_route_map_name_direction_out, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/neighbor/af_ipv6_vrf_neighbor_address_holder/af_ipv6_neighbor_addr/neighbor_route_map/neighbor_route_map_direction_out/neighbor_route_map_name_direction_out (common-def:name-string64) If this variable is read-only (config: false) in the source YANG file, then _set_neighbor_route_map_name_direction_out is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_neighbor_route_map_name_direction_out() directly.
['Setter', 'method', 'for', 'neighbor_route_map_name_direction_out', 'mapped', 'from', 'YANG', 'variable', '/', 'rbridge_id', '/', 'router', '/', 'router_bgp', '/', 'address_family', '/', 'ipv6', '/', 'ipv6_unicast', '/', 'af_ipv6_vrf', '/', 'neighbor', '/', 'af_ipv6_vrf_neighbor_address_holder', '/', 'af_ipv6_neighbor_addr', '/', 'neighbor_route_map', '/', 'neighbor_route_map_direction_out', '/', 'neighbor_route_map_name_direction_out', '(', 'common', '-', 'def', ':', 'name', '-', 'string64', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_neighbor_route_map_name_direction_out', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_neighbor_route_map_name_direction_out', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/neighbor/af_ipv6_vrf_neighbor_address_holder/af_ipv6_neighbor_addr/neighbor_route_map/neighbor_route_map_direction_out/__init__.py#L91-L112
5,404
b3j0f/schema
b3j0f/schema/lang/factory.py
SchemaFactory.build
def build(self, _resource, _cache=True, updatecontent=True, **kwargs): """Build a schema class from input _resource. :param _resource: object from where get the right schema. :param bool _cache: use _cache system. :param bool updatecontent: if True (default) update result. :rtype: Schema. """ result = None if _cache and _resource in self._schemasbyresource: result = self._schemasbyresource[_resource] else: for builder in self._builders.values(): try: result = builder.build(_resource=_resource, **kwargs) except Exception: pass else: break if result is None: raise ValueError('No builder found for {0}'.format(_resource)) if _cache: self._schemasbyresource[_resource] = result if updatecontent: from ..utils import updatecontent updatecontent(result, updateparents=False) return result
python
def build(self, _resource, _cache=True, updatecontent=True, **kwargs): """Build a schema class from input _resource. :param _resource: object from where get the right schema. :param bool _cache: use _cache system. :param bool updatecontent: if True (default) update result. :rtype: Schema. """ result = None if _cache and _resource in self._schemasbyresource: result = self._schemasbyresource[_resource] else: for builder in self._builders.values(): try: result = builder.build(_resource=_resource, **kwargs) except Exception: pass else: break if result is None: raise ValueError('No builder found for {0}'.format(_resource)) if _cache: self._schemasbyresource[_resource] = result if updatecontent: from ..utils import updatecontent updatecontent(result, updateparents=False) return result
['def', 'build', '(', 'self', ',', '_resource', ',', '_cache', '=', 'True', ',', 'updatecontent', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'result', '=', 'None', 'if', '_cache', 'and', '_resource', 'in', 'self', '.', '_schemasbyresource', ':', 'result', '=', 'self', '.', '_schemasbyresource', '[', '_resource', ']', 'else', ':', 'for', 'builder', 'in', 'self', '.', '_builders', '.', 'values', '(', ')', ':', 'try', ':', 'result', '=', 'builder', '.', 'build', '(', '_resource', '=', '_resource', ',', '*', '*', 'kwargs', ')', 'except', 'Exception', ':', 'pass', 'else', ':', 'break', 'if', 'result', 'is', 'None', ':', 'raise', 'ValueError', '(', "'No builder found for {0}'", '.', 'format', '(', '_resource', ')', ')', 'if', '_cache', ':', 'self', '.', '_schemasbyresource', '[', '_resource', ']', '=', 'result', 'if', 'updatecontent', ':', 'from', '.', '.', 'utils', 'import', 'updatecontent', 'updatecontent', '(', 'result', ',', 'updateparents', '=', 'False', ')', 'return', 'result']
Build a schema class from input _resource. :param _resource: object from where get the right schema. :param bool _cache: use _cache system. :param bool updatecontent: if True (default) update result. :rtype: Schema.
['Build', 'a', 'schema', 'class', 'from', 'input', '_resource', '.']
train
https://github.com/b3j0f/schema/blob/1c88c23337f5fef50254e65bd407112c43396dd9/b3j0f/schema/lang/factory.py#L93-L127
5,405
cloud-custodian/cloud-custodian
c7n/cwe.py
CloudWatchEvents.get_trail_ids
def get_trail_ids(cls, event, mode): """extract resources ids from a cloud trail event.""" resource_ids = () event_name = event['detail']['eventName'] event_source = event['detail']['eventSource'] for e in mode.get('events', []): if not isinstance(e, dict): # Check if we have a short cut / alias info = CloudWatchEvents.match(event) if info: return info['ids'].search(event) continue if event_name != e.get('event'): continue if event_source != e.get('source'): continue id_query = e.get('ids') if not id_query: raise ValueError("No id query configured") evt = event # be forgiving for users specifying with details or without if not id_query.startswith('detail.'): evt = event.get('detail', {}) resource_ids = jmespath.search(id_query, evt) if resource_ids: break return resource_ids
python
def get_trail_ids(cls, event, mode): """extract resources ids from a cloud trail event.""" resource_ids = () event_name = event['detail']['eventName'] event_source = event['detail']['eventSource'] for e in mode.get('events', []): if not isinstance(e, dict): # Check if we have a short cut / alias info = CloudWatchEvents.match(event) if info: return info['ids'].search(event) continue if event_name != e.get('event'): continue if event_source != e.get('source'): continue id_query = e.get('ids') if not id_query: raise ValueError("No id query configured") evt = event # be forgiving for users specifying with details or without if not id_query.startswith('detail.'): evt = event.get('detail', {}) resource_ids = jmespath.search(id_query, evt) if resource_ids: break return resource_ids
['def', 'get_trail_ids', '(', 'cls', ',', 'event', ',', 'mode', ')', ':', 'resource_ids', '=', '(', ')', 'event_name', '=', 'event', '[', "'detail'", ']', '[', "'eventName'", ']', 'event_source', '=', 'event', '[', "'detail'", ']', '[', "'eventSource'", ']', 'for', 'e', 'in', 'mode', '.', 'get', '(', "'events'", ',', '[', ']', ')', ':', 'if', 'not', 'isinstance', '(', 'e', ',', 'dict', ')', ':', '# Check if we have a short cut / alias', 'info', '=', 'CloudWatchEvents', '.', 'match', '(', 'event', ')', 'if', 'info', ':', 'return', 'info', '[', "'ids'", ']', '.', 'search', '(', 'event', ')', 'continue', 'if', 'event_name', '!=', 'e', '.', 'get', '(', "'event'", ')', ':', 'continue', 'if', 'event_source', '!=', 'e', '.', 'get', '(', "'source'", ')', ':', 'continue', 'id_query', '=', 'e', '.', 'get', '(', "'ids'", ')', 'if', 'not', 'id_query', ':', 'raise', 'ValueError', '(', '"No id query configured"', ')', 'evt', '=', 'event', '# be forgiving for users specifying with details or without', 'if', 'not', 'id_query', '.', 'startswith', '(', "'detail.'", ')', ':', 'evt', '=', 'event', '.', 'get', '(', "'detail'", ',', '{', '}', ')', 'resource_ids', '=', 'jmespath', '.', 'search', '(', 'id_query', ',', 'evt', ')', 'if', 'resource_ids', ':', 'break', 'return', 'resource_ids']
extract resources ids from a cloud trail event.
['extract', 'resources', 'ids', 'from', 'a', 'cloud', 'trail', 'event', '.']
train
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/cwe.py#L120-L147
5,406
NaPs/Kolekto
kolekto/commands/importer.py
copy
def copy(tree, source_filename): """ Copy file in tree, show a progress bar during operations, and return the sha1 sum of copied file. """ #_, ext = os.path.splitext(source_filename) filehash = sha1() with printer.progress(os.path.getsize(source_filename)) as update: with open(source_filename, 'rb') as fsource: with NamedTemporaryFile(dir=os.path.join(tree, '.kolekto', 'movies'), delete=False) as fdestination: # Copy the source into the temporary destination: while True: buf = fsource.read(10 * 1024) if not buf: break filehash.update(buf) fdestination.write(buf) update(len(buf)) # Rename the file to its final name or raise an error if # the file already exists: dest = os.path.join(tree, '.kolekto', 'movies', filehash.hexdigest()) if os.path.exists(dest): raise IOError('This file already exists in tree (%s)' % filehash.hexdigest()) else: os.rename(fdestination.name, dest) return filehash.hexdigest()
python
def copy(tree, source_filename): """ Copy file in tree, show a progress bar during operations, and return the sha1 sum of copied file. """ #_, ext = os.path.splitext(source_filename) filehash = sha1() with printer.progress(os.path.getsize(source_filename)) as update: with open(source_filename, 'rb') as fsource: with NamedTemporaryFile(dir=os.path.join(tree, '.kolekto', 'movies'), delete=False) as fdestination: # Copy the source into the temporary destination: while True: buf = fsource.read(10 * 1024) if not buf: break filehash.update(buf) fdestination.write(buf) update(len(buf)) # Rename the file to its final name or raise an error if # the file already exists: dest = os.path.join(tree, '.kolekto', 'movies', filehash.hexdigest()) if os.path.exists(dest): raise IOError('This file already exists in tree (%s)' % filehash.hexdigest()) else: os.rename(fdestination.name, dest) return filehash.hexdigest()
['def', 'copy', '(', 'tree', ',', 'source_filename', ')', ':', '#_, ext = os.path.splitext(source_filename)', 'filehash', '=', 'sha1', '(', ')', 'with', 'printer', '.', 'progress', '(', 'os', '.', 'path', '.', 'getsize', '(', 'source_filename', ')', ')', 'as', 'update', ':', 'with', 'open', '(', 'source_filename', ',', "'rb'", ')', 'as', 'fsource', ':', 'with', 'NamedTemporaryFile', '(', 'dir', '=', 'os', '.', 'path', '.', 'join', '(', 'tree', ',', "'.kolekto'", ',', "'movies'", ')', ',', 'delete', '=', 'False', ')', 'as', 'fdestination', ':', '# Copy the source into the temporary destination:', 'while', 'True', ':', 'buf', '=', 'fsource', '.', 'read', '(', '10', '*', '1024', ')', 'if', 'not', 'buf', ':', 'break', 'filehash', '.', 'update', '(', 'buf', ')', 'fdestination', '.', 'write', '(', 'buf', ')', 'update', '(', 'len', '(', 'buf', ')', ')', '# Rename the file to its final name or raise an error if', '# the file already exists:', 'dest', '=', 'os', '.', 'path', '.', 'join', '(', 'tree', ',', "'.kolekto'", ',', "'movies'", ',', 'filehash', '.', 'hexdigest', '(', ')', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'dest', ')', ':', 'raise', 'IOError', '(', "'This file already exists in tree (%s)'", '%', 'filehash', '.', 'hexdigest', '(', ')', ')', 'else', ':', 'os', '.', 'rename', '(', 'fdestination', '.', 'name', ',', 'dest', ')', 'return', 'filehash', '.', 'hexdigest', '(', ')']
Copy file in tree, show a progress bar during operations, and return the sha1 sum of copied file.
['Copy', 'file', 'in', 'tree', 'show', 'a', 'progress', 'bar', 'during', 'operations', 'and', 'return', 'the', 'sha1', 'sum', 'of', 'copied', 'file', '.']
train
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/importer.py#L36-L60
5,407
openeemeter/eeweather
eeweather/ranking.py
select_station
def select_station( candidates, coverage_range=None, min_fraction_coverage=0.9, distance_warnings=(50000, 200000), rank=1, ): """ Select a station from a list of candidates that meets given data quality criteria. Parameters ---------- candidates : :any:`pandas.DataFrame` A dataframe of the form given by :any:`eeweather.rank_stations` or :any:`eeweather.combine_ranked_stations`, specifically having at least an index with ``usaf_id`` values and the column ``distance_meters``. Returns ------- isd_station, warnings : tuple of (:any:`eeweather.ISDStation`, list of str) A qualified weather station. ``None`` if no station meets criteria. """ def _test_station(station): if coverage_range is None: return True, [] else: start_date, end_date = coverage_range try: tempC, warnings = eeweather.mockable.load_isd_hourly_temp_data( station, start_date, end_date ) except ISDDataNotAvailableError: return False, [] # reject # TODO(philngo): also need to incorporate within-day limits if len(tempC) > 0: fraction_coverage = tempC.notnull().sum() / float(len(tempC)) return (fraction_coverage > min_fraction_coverage), warnings else: return False, [] # reject def _station_warnings(station, distance_meters): return [ EEWeatherWarning( qualified_name="eeweather.exceeds_maximum_distance", description=( "Distance from target to weather station is greater" "than the specified km." ), data={ "distance_meters": distance_meters, "max_distance_meters": d, "rank": rank, }, ) for d in distance_warnings if distance_meters > d ] n_stations_passed = 0 for usaf_id, row in candidates.iterrows(): station = ISDStation(usaf_id) test_result, warnings = _test_station(station) if test_result: n_stations_passed += 1 if n_stations_passed == rank: if not warnings: warnings = [] warnings.extend(_station_warnings(station, row.distance_meters)) return station, warnings no_station_warning = EEWeatherWarning( qualified_name="eeweather.no_weather_station_selected", description=( "No weather station found with the specified rank and" " minimum fracitional coverage." ), data={"rank": rank, "min_fraction_coverage": min_fraction_coverage}, ) return None, [no_station_warning]
python
def select_station( candidates, coverage_range=None, min_fraction_coverage=0.9, distance_warnings=(50000, 200000), rank=1, ): """ Select a station from a list of candidates that meets given data quality criteria. Parameters ---------- candidates : :any:`pandas.DataFrame` A dataframe of the form given by :any:`eeweather.rank_stations` or :any:`eeweather.combine_ranked_stations`, specifically having at least an index with ``usaf_id`` values and the column ``distance_meters``. Returns ------- isd_station, warnings : tuple of (:any:`eeweather.ISDStation`, list of str) A qualified weather station. ``None`` if no station meets criteria. """ def _test_station(station): if coverage_range is None: return True, [] else: start_date, end_date = coverage_range try: tempC, warnings = eeweather.mockable.load_isd_hourly_temp_data( station, start_date, end_date ) except ISDDataNotAvailableError: return False, [] # reject # TODO(philngo): also need to incorporate within-day limits if len(tempC) > 0: fraction_coverage = tempC.notnull().sum() / float(len(tempC)) return (fraction_coverage > min_fraction_coverage), warnings else: return False, [] # reject def _station_warnings(station, distance_meters): return [ EEWeatherWarning( qualified_name="eeweather.exceeds_maximum_distance", description=( "Distance from target to weather station is greater" "than the specified km." ), data={ "distance_meters": distance_meters, "max_distance_meters": d, "rank": rank, }, ) for d in distance_warnings if distance_meters > d ] n_stations_passed = 0 for usaf_id, row in candidates.iterrows(): station = ISDStation(usaf_id) test_result, warnings = _test_station(station) if test_result: n_stations_passed += 1 if n_stations_passed == rank: if not warnings: warnings = [] warnings.extend(_station_warnings(station, row.distance_meters)) return station, warnings no_station_warning = EEWeatherWarning( qualified_name="eeweather.no_weather_station_selected", description=( "No weather station found with the specified rank and" " minimum fracitional coverage." ), data={"rank": rank, "min_fraction_coverage": min_fraction_coverage}, ) return None, [no_station_warning]
['def', 'select_station', '(', 'candidates', ',', 'coverage_range', '=', 'None', ',', 'min_fraction_coverage', '=', '0.9', ',', 'distance_warnings', '=', '(', '50000', ',', '200000', ')', ',', 'rank', '=', '1', ',', ')', ':', 'def', '_test_station', '(', 'station', ')', ':', 'if', 'coverage_range', 'is', 'None', ':', 'return', 'True', ',', '[', ']', 'else', ':', 'start_date', ',', 'end_date', '=', 'coverage_range', 'try', ':', 'tempC', ',', 'warnings', '=', 'eeweather', '.', 'mockable', '.', 'load_isd_hourly_temp_data', '(', 'station', ',', 'start_date', ',', 'end_date', ')', 'except', 'ISDDataNotAvailableError', ':', 'return', 'False', ',', '[', ']', '# reject', '# TODO(philngo): also need to incorporate within-day limits', 'if', 'len', '(', 'tempC', ')', '>', '0', ':', 'fraction_coverage', '=', 'tempC', '.', 'notnull', '(', ')', '.', 'sum', '(', ')', '/', 'float', '(', 'len', '(', 'tempC', ')', ')', 'return', '(', 'fraction_coverage', '>', 'min_fraction_coverage', ')', ',', 'warnings', 'else', ':', 'return', 'False', ',', '[', ']', '# reject', 'def', '_station_warnings', '(', 'station', ',', 'distance_meters', ')', ':', 'return', '[', 'EEWeatherWarning', '(', 'qualified_name', '=', '"eeweather.exceeds_maximum_distance"', ',', 'description', '=', '(', '"Distance from target to weather station is greater"', '"than the specified km."', ')', ',', 'data', '=', '{', '"distance_meters"', ':', 'distance_meters', ',', '"max_distance_meters"', ':', 'd', ',', '"rank"', ':', 'rank', ',', '}', ',', ')', 'for', 'd', 'in', 'distance_warnings', 'if', 'distance_meters', '>', 'd', ']', 'n_stations_passed', '=', '0', 'for', 'usaf_id', ',', 'row', 'in', 'candidates', '.', 'iterrows', '(', ')', ':', 'station', '=', 'ISDStation', '(', 'usaf_id', ')', 'test_result', ',', 'warnings', '=', '_test_station', '(', 'station', ')', 'if', 'test_result', ':', 'n_stations_passed', '+=', '1', 'if', 'n_stations_passed', '==', 'rank', ':', 'if', 'not', 'warnings', ':', 'warnings', '=', '[', ']', 'warnings', '.', 'extend', '(', '_station_warnings', '(', 'station', ',', 'row', '.', 'distance_meters', ')', ')', 'return', 'station', ',', 'warnings', 'no_station_warning', '=', 'EEWeatherWarning', '(', 'qualified_name', '=', '"eeweather.no_weather_station_selected"', ',', 'description', '=', '(', '"No weather station found with the specified rank and"', '" minimum fracitional coverage."', ')', ',', 'data', '=', '{', '"rank"', ':', 'rank', ',', '"min_fraction_coverage"', ':', 'min_fraction_coverage', '}', ',', ')', 'return', 'None', ',', '[', 'no_station_warning', ']']
Select a station from a list of candidates that meets given data quality criteria. Parameters ---------- candidates : :any:`pandas.DataFrame` A dataframe of the form given by :any:`eeweather.rank_stations` or :any:`eeweather.combine_ranked_stations`, specifically having at least an index with ``usaf_id`` values and the column ``distance_meters``. Returns ------- isd_station, warnings : tuple of (:any:`eeweather.ISDStation`, list of str) A qualified weather station. ``None`` if no station meets criteria.
['Select', 'a', 'station', 'from', 'a', 'list', 'of', 'candidates', 'that', 'meets', 'given', 'data', 'quality', 'criteria', '.']
train
https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/ranking.py#L358-L438
5,408
mjirik/imtools
imtools/sample_data.py
get_conda_path
def get_conda_path(): """ Return anaconda or miniconda directory :return: anaconda directory """ dstdir = '' # try: import subprocess import re # cond info --root work only for root environment # p = subprocess.Popen(['conda', 'info', '--root'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p = subprocess.Popen(['conda', 'info', '-e'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() dstdir = out.strip() dstdir = re.search("\*(.*)\n", dstdir).group(1).strip() # except: # import traceback # traceback.print_exc() # import os.path as op # conda_pth = op.expanduser('~/anaconda/bin') # if not op.exists(conda_pth): # conda_pth = op.expanduser('~/miniconda/bin') # return conda_pth return dstdir
python
def get_conda_path(): """ Return anaconda or miniconda directory :return: anaconda directory """ dstdir = '' # try: import subprocess import re # cond info --root work only for root environment # p = subprocess.Popen(['conda', 'info', '--root'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p = subprocess.Popen(['conda', 'info', '-e'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() dstdir = out.strip() dstdir = re.search("\*(.*)\n", dstdir).group(1).strip() # except: # import traceback # traceback.print_exc() # import os.path as op # conda_pth = op.expanduser('~/anaconda/bin') # if not op.exists(conda_pth): # conda_pth = op.expanduser('~/miniconda/bin') # return conda_pth return dstdir
['def', 'get_conda_path', '(', ')', ':', 'dstdir', '=', "''", '# try:', 'import', 'subprocess', 'import', 're', '# cond info --root work only for root environment', "# p = subprocess.Popen(['conda', 'info', '--root'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)", 'p', '=', 'subprocess', '.', 'Popen', '(', '[', "'conda'", ',', "'info'", ',', "'-e'", ']', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ')', 'out', ',', 'err', '=', 'p', '.', 'communicate', '(', ')', 'dstdir', '=', 'out', '.', 'strip', '(', ')', 'dstdir', '=', 're', '.', 'search', '(', '"\\*(.*)\\n"', ',', 'dstdir', ')', '.', 'group', '(', '1', ')', '.', 'strip', '(', ')', '# except:', '# import traceback', '# traceback.print_exc()', '# import os.path as op', "# conda_pth = op.expanduser('~/anaconda/bin')", '# if not op.exists(conda_pth):', "# conda_pth = op.expanduser('~/miniconda/bin')", '# return conda_pth', 'return', 'dstdir']
Return anaconda or miniconda directory :return: anaconda directory
['Return', 'anaconda', 'or', 'miniconda', 'directory', ':', 'return', ':', 'anaconda', 'directory']
train
https://github.com/mjirik/imtools/blob/eb29fa59df0e0684d8334eb3bc5ef36ea46d1d3a/imtools/sample_data.py#L227-L253
5,409
inveniosoftware/invenio-files-rest
invenio_files_rest/helpers.py
populate_from_path
def populate_from_path(bucket, source, checksum=True, key_prefix='', chunk_size=None): """Populate a ``bucket`` from all files in path. :param bucket: The bucket (instance or id) to create the object in. :param source: The file or directory path. :param checksum: If ``True`` then a MD5 checksum will be computed for each file. (Default: ``True``) :param key_prefix: The key prefix for the bucket. :param chunk_size: Chunk size to read from file. :returns: A iterator for all :class:`invenio_files_rest.models.ObjectVersion` instances. """ from .models import FileInstance, ObjectVersion def create_file(key, path): """Create new ``ObjectVersion`` from path or existing ``FileInstance``. It checks MD5 checksum and size of existing ``FileInstance``s. """ key = key_prefix + key if checksum: file_checksum = compute_md5_checksum( open(path, 'rb'), chunk_size=chunk_size) file_instance = FileInstance.query.filter_by( checksum=file_checksum, size=os.path.getsize(path) ).first() if file_instance: return ObjectVersion.create( bucket, key, _file_id=file_instance.id ) return ObjectVersion.create(bucket, key, stream=open(path, 'rb')) if os.path.isfile(source): yield create_file(os.path.basename(source), source) else: for root, dirs, files in os.walk(source, topdown=False): for name in files: filename = os.path.join(root, name) assert filename.startswith(source) parts = [p for p in filename[len(source):].split(os.sep) if p] yield create_file('/'.join(parts), os.path.join(root, name))
python
def populate_from_path(bucket, source, checksum=True, key_prefix='', chunk_size=None): """Populate a ``bucket`` from all files in path. :param bucket: The bucket (instance or id) to create the object in. :param source: The file or directory path. :param checksum: If ``True`` then a MD5 checksum will be computed for each file. (Default: ``True``) :param key_prefix: The key prefix for the bucket. :param chunk_size: Chunk size to read from file. :returns: A iterator for all :class:`invenio_files_rest.models.ObjectVersion` instances. """ from .models import FileInstance, ObjectVersion def create_file(key, path): """Create new ``ObjectVersion`` from path or existing ``FileInstance``. It checks MD5 checksum and size of existing ``FileInstance``s. """ key = key_prefix + key if checksum: file_checksum = compute_md5_checksum( open(path, 'rb'), chunk_size=chunk_size) file_instance = FileInstance.query.filter_by( checksum=file_checksum, size=os.path.getsize(path) ).first() if file_instance: return ObjectVersion.create( bucket, key, _file_id=file_instance.id ) return ObjectVersion.create(bucket, key, stream=open(path, 'rb')) if os.path.isfile(source): yield create_file(os.path.basename(source), source) else: for root, dirs, files in os.walk(source, topdown=False): for name in files: filename = os.path.join(root, name) assert filename.startswith(source) parts = [p for p in filename[len(source):].split(os.sep) if p] yield create_file('/'.join(parts), os.path.join(root, name))
['def', 'populate_from_path', '(', 'bucket', ',', 'source', ',', 'checksum', '=', 'True', ',', 'key_prefix', '=', "''", ',', 'chunk_size', '=', 'None', ')', ':', 'from', '.', 'models', 'import', 'FileInstance', ',', 'ObjectVersion', 'def', 'create_file', '(', 'key', ',', 'path', ')', ':', '"""Create new ``ObjectVersion`` from path or existing ``FileInstance``.\n\n It checks MD5 checksum and size of existing ``FileInstance``s.\n """', 'key', '=', 'key_prefix', '+', 'key', 'if', 'checksum', ':', 'file_checksum', '=', 'compute_md5_checksum', '(', 'open', '(', 'path', ',', "'rb'", ')', ',', 'chunk_size', '=', 'chunk_size', ')', 'file_instance', '=', 'FileInstance', '.', 'query', '.', 'filter_by', '(', 'checksum', '=', 'file_checksum', ',', 'size', '=', 'os', '.', 'path', '.', 'getsize', '(', 'path', ')', ')', '.', 'first', '(', ')', 'if', 'file_instance', ':', 'return', 'ObjectVersion', '.', 'create', '(', 'bucket', ',', 'key', ',', '_file_id', '=', 'file_instance', '.', 'id', ')', 'return', 'ObjectVersion', '.', 'create', '(', 'bucket', ',', 'key', ',', 'stream', '=', 'open', '(', 'path', ',', "'rb'", ')', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'source', ')', ':', 'yield', 'create_file', '(', 'os', '.', 'path', '.', 'basename', '(', 'source', ')', ',', 'source', ')', 'else', ':', 'for', 'root', ',', 'dirs', ',', 'files', 'in', 'os', '.', 'walk', '(', 'source', ',', 'topdown', '=', 'False', ')', ':', 'for', 'name', 'in', 'files', ':', 'filename', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'name', ')', 'assert', 'filename', '.', 'startswith', '(', 'source', ')', 'parts', '=', '[', 'p', 'for', 'p', 'in', 'filename', '[', 'len', '(', 'source', ')', ':', ']', '.', 'split', '(', 'os', '.', 'sep', ')', 'if', 'p', ']', 'yield', 'create_file', '(', "'/'", '.', 'join', '(', 'parts', ')', ',', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'name', ')', ')']
Populate a ``bucket`` from all files in path. :param bucket: The bucket (instance or id) to create the object in. :param source: The file or directory path. :param checksum: If ``True`` then a MD5 checksum will be computed for each file. (Default: ``True``) :param key_prefix: The key prefix for the bucket. :param chunk_size: Chunk size to read from file. :returns: A iterator for all :class:`invenio_files_rest.models.ObjectVersion` instances.
['Populate', 'a', 'bucket', 'from', 'all', 'files', 'in', 'path', '.']
train
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/helpers.py#L251-L293
5,410
pandas-dev/pandas
pandas/core/frame.py
DataFrame.to_html
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False): """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. .. versionadded:: 0.19.0 table_id : str, optional A css id is included in the opening `<table>` tag if specified. .. versionadded:: 0.23.0 render_links : bool, default False Convert URLs to HTML links. .. versionadded:: 0.24.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if (justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS): raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, bold_rows=bold_rows, escape=escape, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, table_id=table_id, render_links=render_links) # TODO: a generic formatter wld b in DataFrameFormatter formatter.to_html(classes=classes, notebook=notebook, border=border) if buf is None: return formatter.buf.getvalue()
python
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False): """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. .. versionadded:: 0.19.0 table_id : str, optional A css id is included in the opening `<table>` tag if specified. .. versionadded:: 0.23.0 render_links : bool, default False Convert URLs to HTML links. .. versionadded:: 0.24.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if (justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS): raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, bold_rows=bold_rows, escape=escape, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, table_id=table_id, render_links=render_links) # TODO: a generic formatter wld b in DataFrameFormatter formatter.to_html(classes=classes, notebook=notebook, border=border) if buf is None: return formatter.buf.getvalue()
['def', 'to_html', '(', 'self', ',', 'buf', '=', 'None', ',', 'columns', '=', 'None', ',', 'col_space', '=', 'None', ',', 'header', '=', 'True', ',', 'index', '=', 'True', ',', 'na_rep', '=', "'NaN'", ',', 'formatters', '=', 'None', ',', 'float_format', '=', 'None', ',', 'sparsify', '=', 'None', ',', 'index_names', '=', 'True', ',', 'justify', '=', 'None', ',', 'max_rows', '=', 'None', ',', 'max_cols', '=', 'None', ',', 'show_dimensions', '=', 'False', ',', 'decimal', '=', "'.'", ',', 'bold_rows', '=', 'True', ',', 'classes', '=', 'None', ',', 'escape', '=', 'True', ',', 'notebook', '=', 'False', ',', 'border', '=', 'None', ',', 'table_id', '=', 'None', ',', 'render_links', '=', 'False', ')', ':', 'if', '(', 'justify', 'is', 'not', 'None', 'and', 'justify', 'not', 'in', 'fmt', '.', '_VALID_JUSTIFY_PARAMETERS', ')', ':', 'raise', 'ValueError', '(', '"Invalid value for justify parameter"', ')', 'formatter', '=', 'fmt', '.', 'DataFrameFormatter', '(', 'self', ',', 'buf', '=', 'buf', ',', 'columns', '=', 'columns', ',', 'col_space', '=', 'col_space', ',', 'na_rep', '=', 'na_rep', ',', 'formatters', '=', 'formatters', ',', 'float_format', '=', 'float_format', ',', 'sparsify', '=', 'sparsify', ',', 'justify', '=', 'justify', ',', 'index_names', '=', 'index_names', ',', 'header', '=', 'header', ',', 'index', '=', 'index', ',', 'bold_rows', '=', 'bold_rows', ',', 'escape', '=', 'escape', ',', 'max_rows', '=', 'max_rows', ',', 'max_cols', '=', 'max_cols', ',', 'show_dimensions', '=', 'show_dimensions', ',', 'decimal', '=', 'decimal', ',', 'table_id', '=', 'table_id', ',', 'render_links', '=', 'render_links', ')', '# TODO: a generic formatter wld b in DataFrameFormatter', 'formatter', '.', 'to_html', '(', 'classes', '=', 'classes', ',', 'notebook', '=', 'notebook', ',', 'border', '=', 'border', ')', 'if', 'buf', 'is', 'None', ':', 'return', 'formatter', '.', 'buf', '.', 'getvalue', '(', ')']
Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. .. versionadded:: 0.19.0 table_id : str, optional A css id is included in the opening `<table>` tag if specified. .. versionadded:: 0.23.0 render_links : bool, default False Convert URLs to HTML links. .. versionadded:: 0.24.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string.
['Render', 'a', 'DataFrame', 'as', 'an', 'HTML', 'table', '.', '%', '(', 'shared_params', ')', 's', 'bold_rows', ':', 'bool', 'default', 'True', 'Make', 'the', 'row', 'labels', 'bold', 'in', 'the', 'output', '.', 'classes', ':', 'str', 'or', 'list', 'or', 'tuple', 'default', 'None', 'CSS', 'class', '(', 'es', ')', 'to', 'apply', 'to', 'the', 'resulting', 'html', 'table', '.', 'escape', ':', 'bool', 'default', 'True', 'Convert', 'the', 'characters', '<', '>', 'and', '&', 'to', 'HTML', '-', 'safe', 'sequences', '.', 'notebook', ':', '{', 'True', 'False', '}', 'default', 'False', 'Whether', 'the', 'generated', 'HTML', 'is', 'for', 'IPython', 'Notebook', '.', 'border', ':', 'int', 'A', 'border', '=', 'border', 'attribute', 'is', 'included', 'in', 'the', 'opening', '<table', '>', 'tag', '.', 'Default', 'pd', '.', 'options', '.', 'html', '.', 'border', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2151-L2210
5,411
BeyondTheClouds/enoslib
enoslib/infra/enos_chameleonbaremetal/provider.py
create_blazar_client
def create_blazar_client(config, session): """Check the reservation, creates a new one if nescessary.""" return blazar_client.Client(session=session, service_type="reservation", region_name=os.environ["OS_REGION_NAME"])
python
def create_blazar_client(config, session): """Check the reservation, creates a new one if nescessary.""" return blazar_client.Client(session=session, service_type="reservation", region_name=os.environ["OS_REGION_NAME"])
['def', 'create_blazar_client', '(', 'config', ',', 'session', ')', ':', 'return', 'blazar_client', '.', 'Client', '(', 'session', '=', 'session', ',', 'service_type', '=', '"reservation"', ',', 'region_name', '=', 'os', '.', 'environ', '[', '"OS_REGION_NAME"', ']', ')']
Check the reservation, creates a new one if nescessary.
['Check', 'the', 'reservation', 'creates', 'a', 'new', 'one', 'if', 'nescessary', '.']
train
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_chameleonbaremetal/provider.py#L46-L50
5,412
google/tangent
tangent/utils.py
shapes_match
def shapes_match(a, b): """Recursively check if shapes of object `a` and `b` match. Will walk lists, tuples and dicts. Args: a: object of type (numpy.ndarray,tf.Tensor,list,tuple,dict) to check for matching shapes against `b`. b: object to check for matching shape against `a`. Returns: A boolean indicating whether the shapes of `a` and `b` match. """ if isinstance(a, (tuple, list)) and isinstance(b, (tuple, list)): if len(a) != len(b): return False return all([shapes_match(ia, ib) for ia, ib in zip(a, b)]) elif isinstance(a, dict) and isinstance(b, dict): if len(a) != len(b): return False match = True for (ak, av), (bk, bv) in zip(a.items(), b.items()): match = match and all([ak == bk and shapes_match(av, bv)]) return match else: shape_checker = shape_checkers[(type(a), type(b))] return shape_checker(a, b)
python
def shapes_match(a, b): """Recursively check if shapes of object `a` and `b` match. Will walk lists, tuples and dicts. Args: a: object of type (numpy.ndarray,tf.Tensor,list,tuple,dict) to check for matching shapes against `b`. b: object to check for matching shape against `a`. Returns: A boolean indicating whether the shapes of `a` and `b` match. """ if isinstance(a, (tuple, list)) and isinstance(b, (tuple, list)): if len(a) != len(b): return False return all([shapes_match(ia, ib) for ia, ib in zip(a, b)]) elif isinstance(a, dict) and isinstance(b, dict): if len(a) != len(b): return False match = True for (ak, av), (bk, bv) in zip(a.items(), b.items()): match = match and all([ak == bk and shapes_match(av, bv)]) return match else: shape_checker = shape_checkers[(type(a), type(b))] return shape_checker(a, b)
['def', 'shapes_match', '(', 'a', ',', 'b', ')', ':', 'if', 'isinstance', '(', 'a', ',', '(', 'tuple', ',', 'list', ')', ')', 'and', 'isinstance', '(', 'b', ',', '(', 'tuple', ',', 'list', ')', ')', ':', 'if', 'len', '(', 'a', ')', '!=', 'len', '(', 'b', ')', ':', 'return', 'False', 'return', 'all', '(', '[', 'shapes_match', '(', 'ia', ',', 'ib', ')', 'for', 'ia', ',', 'ib', 'in', 'zip', '(', 'a', ',', 'b', ')', ']', ')', 'elif', 'isinstance', '(', 'a', ',', 'dict', ')', 'and', 'isinstance', '(', 'b', ',', 'dict', ')', ':', 'if', 'len', '(', 'a', ')', '!=', 'len', '(', 'b', ')', ':', 'return', 'False', 'match', '=', 'True', 'for', '(', 'ak', ',', 'av', ')', ',', '(', 'bk', ',', 'bv', ')', 'in', 'zip', '(', 'a', '.', 'items', '(', ')', ',', 'b', '.', 'items', '(', ')', ')', ':', 'match', '=', 'match', 'and', 'all', '(', '[', 'ak', '==', 'bk', 'and', 'shapes_match', '(', 'av', ',', 'bv', ')', ']', ')', 'return', 'match', 'else', ':', 'shape_checker', '=', 'shape_checkers', '[', '(', 'type', '(', 'a', ')', ',', 'type', '(', 'b', ')', ')', ']', 'return', 'shape_checker', '(', 'a', ',', 'b', ')']
Recursively check if shapes of object `a` and `b` match. Will walk lists, tuples and dicts. Args: a: object of type (numpy.ndarray,tf.Tensor,list,tuple,dict) to check for matching shapes against `b`. b: object to check for matching shape against `a`. Returns: A boolean indicating whether the shapes of `a` and `b` match.
['Recursively', 'check', 'if', 'shapes', 'of', 'object', 'a', 'and', 'b', 'match', '.']
train
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/utils.py#L604-L630
5,413
inasafe/inasafe
safe/gui/tools/wizard/step_kw57_extra_keywords.py
StepKwExtraKeywords.set_existing_extra_keywords
def set_existing_extra_keywords(self): """Set extra keywords from the value from metadata.""" extra_keywords = self.parent.get_existing_keyword('extra_keywords') for key, widgets in list(self.widgets_dict.items()): value = extra_keywords.get(key) if value is None: widgets[0].setChecked(False) else: widgets[0].setChecked(True) if isinstance(widgets[1], QLineEdit): widgets[1].setText(value) elif isinstance(widgets[1], QComboBox): value_index = widgets[1].findData(value) widgets[1].setCurrentIndex(value_index) elif isinstance(widgets[1], QDoubleSpinBox): try: value = float(value) widgets[1].setValue(value) except ValueError: LOGGER.warning('Failed to convert %s to float' % value) elif isinstance(widgets[1], QDateTimeEdit): try: value_datetime = datetime.strptime( value, "%Y-%m-%dT%H:%M:%S.%f") widgets[1].setDateTime(value_datetime) except ValueError: try: value_datetime = datetime.strptime( value, "%Y-%m-%dT%H:%M:%S") widgets[1].setDateTime(value_datetime) except ValueError: LOGGER.info( 'Failed to convert %s to datetime' % value)
python
def set_existing_extra_keywords(self): """Set extra keywords from the value from metadata.""" extra_keywords = self.parent.get_existing_keyword('extra_keywords') for key, widgets in list(self.widgets_dict.items()): value = extra_keywords.get(key) if value is None: widgets[0].setChecked(False) else: widgets[0].setChecked(True) if isinstance(widgets[1], QLineEdit): widgets[1].setText(value) elif isinstance(widgets[1], QComboBox): value_index = widgets[1].findData(value) widgets[1].setCurrentIndex(value_index) elif isinstance(widgets[1], QDoubleSpinBox): try: value = float(value) widgets[1].setValue(value) except ValueError: LOGGER.warning('Failed to convert %s to float' % value) elif isinstance(widgets[1], QDateTimeEdit): try: value_datetime = datetime.strptime( value, "%Y-%m-%dT%H:%M:%S.%f") widgets[1].setDateTime(value_datetime) except ValueError: try: value_datetime = datetime.strptime( value, "%Y-%m-%dT%H:%M:%S") widgets[1].setDateTime(value_datetime) except ValueError: LOGGER.info( 'Failed to convert %s to datetime' % value)
['def', 'set_existing_extra_keywords', '(', 'self', ')', ':', 'extra_keywords', '=', 'self', '.', 'parent', '.', 'get_existing_keyword', '(', "'extra_keywords'", ')', 'for', 'key', ',', 'widgets', 'in', 'list', '(', 'self', '.', 'widgets_dict', '.', 'items', '(', ')', ')', ':', 'value', '=', 'extra_keywords', '.', 'get', '(', 'key', ')', 'if', 'value', 'is', 'None', ':', 'widgets', '[', '0', ']', '.', 'setChecked', '(', 'False', ')', 'else', ':', 'widgets', '[', '0', ']', '.', 'setChecked', '(', 'True', ')', 'if', 'isinstance', '(', 'widgets', '[', '1', ']', ',', 'QLineEdit', ')', ':', 'widgets', '[', '1', ']', '.', 'setText', '(', 'value', ')', 'elif', 'isinstance', '(', 'widgets', '[', '1', ']', ',', 'QComboBox', ')', ':', 'value_index', '=', 'widgets', '[', '1', ']', '.', 'findData', '(', 'value', ')', 'widgets', '[', '1', ']', '.', 'setCurrentIndex', '(', 'value_index', ')', 'elif', 'isinstance', '(', 'widgets', '[', '1', ']', ',', 'QDoubleSpinBox', ')', ':', 'try', ':', 'value', '=', 'float', '(', 'value', ')', 'widgets', '[', '1', ']', '.', 'setValue', '(', 'value', ')', 'except', 'ValueError', ':', 'LOGGER', '.', 'warning', '(', "'Failed to convert %s to float'", '%', 'value', ')', 'elif', 'isinstance', '(', 'widgets', '[', '1', ']', ',', 'QDateTimeEdit', ')', ':', 'try', ':', 'value_datetime', '=', 'datetime', '.', 'strptime', '(', 'value', ',', '"%Y-%m-%dT%H:%M:%S.%f"', ')', 'widgets', '[', '1', ']', '.', 'setDateTime', '(', 'value_datetime', ')', 'except', 'ValueError', ':', 'try', ':', 'value_datetime', '=', 'datetime', '.', 'strptime', '(', 'value', ',', '"%Y-%m-%dT%H:%M:%S"', ')', 'widgets', '[', '1', ']', '.', 'setDateTime', '(', 'value_datetime', ')', 'except', 'ValueError', ':', 'LOGGER', '.', 'info', '(', "'Failed to convert %s to datetime'", '%', 'value', ')']
Set extra keywords from the value from metadata.
['Set', 'extra', 'keywords', 'from', 'the', 'value', 'from', 'metadata', '.']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_kw57_extra_keywords.py#L131-L163
5,414
spyder-ide/spyder
spyder/widgets/onecolumntree.py
OneColumnTree.item_selection_changed
def item_selection_changed(self): """Item selection has changed""" is_selection = len(self.selectedItems()) > 0 self.expand_selection_action.setEnabled(is_selection) self.collapse_selection_action.setEnabled(is_selection)
python
def item_selection_changed(self): """Item selection has changed""" is_selection = len(self.selectedItems()) > 0 self.expand_selection_action.setEnabled(is_selection) self.collapse_selection_action.setEnabled(is_selection)
['def', 'item_selection_changed', '(', 'self', ')', ':', 'is_selection', '=', 'len', '(', 'self', '.', 'selectedItems', '(', ')', ')', '>', '0', 'self', '.', 'expand_selection_action', '.', 'setEnabled', '(', 'is_selection', ')', 'self', '.', 'collapse_selection_action', '.', 'setEnabled', '(', 'is_selection', ')']
Item selection has changed
['Item', 'selection', 'has', 'changed']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/onecolumntree.py#L139-L143
5,415
koszullab/instaGRAAL
instagraal/pyramid_sparse.py
pyramid.zoom_out_pixel
def zoom_out_pixel(self, curr_pixel): """ return the curr_frag at a lower resolution""" low_frag = curr_pixel[0] high_frag = curr_pixel[1] level = curr_pixel[2] str_level = str(level) if level < self.n_level - 1: low_super = self.spec_level[str_level]["fragments_dict"][low_frag][ "super_index" ] high_super = self.spec_level[str_level]["fragments_dict"][ high_frag ]["sub_index"] new_pix_low = min([low_super, high_super]) new_pix_high = max([low_super, high_super]) new_level = level + 1 new_pixel = [new_pix_low, new_pix_high, new_level] else: new_pixel = curr_pixel return new_pixel
python
def zoom_out_pixel(self, curr_pixel): """ return the curr_frag at a lower resolution""" low_frag = curr_pixel[0] high_frag = curr_pixel[1] level = curr_pixel[2] str_level = str(level) if level < self.n_level - 1: low_super = self.spec_level[str_level]["fragments_dict"][low_frag][ "super_index" ] high_super = self.spec_level[str_level]["fragments_dict"][ high_frag ]["sub_index"] new_pix_low = min([low_super, high_super]) new_pix_high = max([low_super, high_super]) new_level = level + 1 new_pixel = [new_pix_low, new_pix_high, new_level] else: new_pixel = curr_pixel return new_pixel
['def', 'zoom_out_pixel', '(', 'self', ',', 'curr_pixel', ')', ':', 'low_frag', '=', 'curr_pixel', '[', '0', ']', 'high_frag', '=', 'curr_pixel', '[', '1', ']', 'level', '=', 'curr_pixel', '[', '2', ']', 'str_level', '=', 'str', '(', 'level', ')', 'if', 'level', '<', 'self', '.', 'n_level', '-', '1', ':', 'low_super', '=', 'self', '.', 'spec_level', '[', 'str_level', ']', '[', '"fragments_dict"', ']', '[', 'low_frag', ']', '[', '"super_index"', ']', 'high_super', '=', 'self', '.', 'spec_level', '[', 'str_level', ']', '[', '"fragments_dict"', ']', '[', 'high_frag', ']', '[', '"sub_index"', ']', 'new_pix_low', '=', 'min', '(', '[', 'low_super', ',', 'high_super', ']', ')', 'new_pix_high', '=', 'max', '(', '[', 'low_super', ',', 'high_super', ']', ')', 'new_level', '=', 'level', '+', '1', 'new_pixel', '=', '[', 'new_pix_low', ',', 'new_pix_high', ',', 'new_level', ']', 'else', ':', 'new_pixel', '=', 'curr_pixel', 'return', 'new_pixel']
return the curr_frag at a lower resolution
['return', 'the', 'curr_frag', 'at', 'a', 'lower', 'resolution']
train
https://github.com/koszullab/instaGRAAL/blob/1c02ca838e57d8178eec79f223644b2acd0153dd/instagraal/pyramid_sparse.py#L1787-L1807
5,416
saltstack/salt
salt/modules/lxd.py
pylxd_save_object
def pylxd_save_object(obj): ''' Saves an object (profile/image/container) and translate its execpetion on failure obj : The object to save This is an internal method, no CLI Example. ''' try: obj.save() except pylxd.exceptions.LXDAPIException as e: raise CommandExecutionError(six.text_type(e)) return True
python
def pylxd_save_object(obj): ''' Saves an object (profile/image/container) and translate its execpetion on failure obj : The object to save This is an internal method, no CLI Example. ''' try: obj.save() except pylxd.exceptions.LXDAPIException as e: raise CommandExecutionError(six.text_type(e)) return True
['def', 'pylxd_save_object', '(', 'obj', ')', ':', 'try', ':', 'obj', '.', 'save', '(', ')', 'except', 'pylxd', '.', 'exceptions', '.', 'LXDAPIException', 'as', 'e', ':', 'raise', 'CommandExecutionError', '(', 'six', '.', 'text_type', '(', 'e', ')', ')', 'return', 'True']
Saves an object (profile/image/container) and translate its execpetion on failure obj : The object to save This is an internal method, no CLI Example.
['Saves', 'an', 'object', '(', 'profile', '/', 'image', '/', 'container', ')', 'and', 'translate', 'its', 'execpetion', 'on', 'failure']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxd.py#L402-L416
5,417
JIC-CSB/jicimagelib
jicimagelib/util/array.py
check_dtype
def check_dtype(array, allowed): """Raises TypeError if the array is not of an allowed dtype. :param array: array whose dtype is to be checked :param allowed: instance or list of allowed dtypes """ if not hasattr(allowed, "__iter__"): allowed = [allowed,] if array.dtype not in allowed: raise(TypeError( "Invalid dtype {}. Allowed dtype(s): {}".format(array.dtype, allowed)))
python
def check_dtype(array, allowed): """Raises TypeError if the array is not of an allowed dtype. :param array: array whose dtype is to be checked :param allowed: instance or list of allowed dtypes """ if not hasattr(allowed, "__iter__"): allowed = [allowed,] if array.dtype not in allowed: raise(TypeError( "Invalid dtype {}. Allowed dtype(s): {}".format(array.dtype, allowed)))
['def', 'check_dtype', '(', 'array', ',', 'allowed', ')', ':', 'if', 'not', 'hasattr', '(', 'allowed', ',', '"__iter__"', ')', ':', 'allowed', '=', '[', 'allowed', ',', ']', 'if', 'array', '.', 'dtype', 'not', 'in', 'allowed', ':', 'raise', '(', 'TypeError', '(', '"Invalid dtype {}. Allowed dtype(s): {}"', '.', 'format', '(', 'array', '.', 'dtype', ',', 'allowed', ')', ')', ')']
Raises TypeError if the array is not of an allowed dtype. :param array: array whose dtype is to be checked :param allowed: instance or list of allowed dtypes
['Raises', 'TypeError', 'if', 'the', 'array', 'is', 'not', 'of', 'an', 'allowed', 'dtype', '.']
train
https://github.com/JIC-CSB/jicimagelib/blob/fbd67accb2e6d55969c6d4ed7e8b4bb4ab65cd44/jicimagelib/util/array.py#L54-L64
5,418
iotile/coretools
iotileemulate/iotile/emulate/virtual/emulated_tile.py
EmulatedTile.list_config_variables
def list_config_variables(self, offset): """List defined config variables up to 9 at a time.""" names = sorted(self._config_variables) names = names[offset:offset + 9] count = len(names) if len(names) < 9: names += [0]*(9 - count) return [count] + names
python
def list_config_variables(self, offset): """List defined config variables up to 9 at a time.""" names = sorted(self._config_variables) names = names[offset:offset + 9] count = len(names) if len(names) < 9: names += [0]*(9 - count) return [count] + names
['def', 'list_config_variables', '(', 'self', ',', 'offset', ')', ':', 'names', '=', 'sorted', '(', 'self', '.', '_config_variables', ')', 'names', '=', 'names', '[', 'offset', ':', 'offset', '+', '9', ']', 'count', '=', 'len', '(', 'names', ')', 'if', 'len', '(', 'names', ')', '<', '9', ':', 'names', '+=', '[', '0', ']', '*', '(', '9', '-', 'count', ')', 'return', '[', 'count', ']', '+', 'names']
List defined config variables up to 9 at a time.
['List', 'defined', 'config', 'variables', 'up', 'to', '9', 'at', 'a', 'time', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/virtual/emulated_tile.py#L383-L393
5,419
callowayproject/Transmogrify
transmogrify/images2gif.py
GifWriter.get_sub_rectangles
def get_sub_rectangles(self, ims): """ get_sub_rectangles(ims) Calculate the minimal rectangles that need updating each frame. Returns a two-element tuple containing the cropped images and a list of x-y positions. Calculating the subrectangles takes extra time, obviously. However, if the image sizes were reduced, the actual writing of the GIF goes faster. In some cases applying this method produces a GIF faster. """ # Check image count if len(ims) < 2: return ims, [(0, 0) for i in ims] # We need numpy if np is None: raise RuntimeError("Need Numpy to calculate sub-rectangles. ") # Prepare ims2 = [ims[0]] xy = [(0, 0)] # Iterate over images prev = ims[0] for im in ims[1:]: # Get difference, sum over colors diff = np.abs(im - prev) if diff.ndim == 3: diff = diff.sum(2) # Get begin and end for both dimensions x = np.argwhere(diff.sum(0)) y = np.argwhere(diff.sum(1)) # Get rect coordinates if x.size and y.size: x0, x1 = x[0], x[-1] + 1 y0, y1 = y[0], y[-1] + 1 else: # No change ... make it minimal x0, x1 = 0, 2 y0, y1 = 0, 2 # Cut out and store im2 = im[y0:y1, x0:x1] prev = im ims2.append(im2) xy.append((x0, y0)) return ims2, xy
python
def get_sub_rectangles(self, ims): """ get_sub_rectangles(ims) Calculate the minimal rectangles that need updating each frame. Returns a two-element tuple containing the cropped images and a list of x-y positions. Calculating the subrectangles takes extra time, obviously. However, if the image sizes were reduced, the actual writing of the GIF goes faster. In some cases applying this method produces a GIF faster. """ # Check image count if len(ims) < 2: return ims, [(0, 0) for i in ims] # We need numpy if np is None: raise RuntimeError("Need Numpy to calculate sub-rectangles. ") # Prepare ims2 = [ims[0]] xy = [(0, 0)] # Iterate over images prev = ims[0] for im in ims[1:]: # Get difference, sum over colors diff = np.abs(im - prev) if diff.ndim == 3: diff = diff.sum(2) # Get begin and end for both dimensions x = np.argwhere(diff.sum(0)) y = np.argwhere(diff.sum(1)) # Get rect coordinates if x.size and y.size: x0, x1 = x[0], x[-1] + 1 y0, y1 = y[0], y[-1] + 1 else: # No change ... make it minimal x0, x1 = 0, 2 y0, y1 = 0, 2 # Cut out and store im2 = im[y0:y1, x0:x1] prev = im ims2.append(im2) xy.append((x0, y0)) return ims2, xy
['def', 'get_sub_rectangles', '(', 'self', ',', 'ims', ')', ':', '# Check image count', 'if', 'len', '(', 'ims', ')', '<', '2', ':', 'return', 'ims', ',', '[', '(', '0', ',', '0', ')', 'for', 'i', 'in', 'ims', ']', '# We need numpy', 'if', 'np', 'is', 'None', ':', 'raise', 'RuntimeError', '(', '"Need Numpy to calculate sub-rectangles. "', ')', '# Prepare', 'ims2', '=', '[', 'ims', '[', '0', ']', ']', 'xy', '=', '[', '(', '0', ',', '0', ')', ']', '# Iterate over images', 'prev', '=', 'ims', '[', '0', ']', 'for', 'im', 'in', 'ims', '[', '1', ':', ']', ':', '# Get difference, sum over colors', 'diff', '=', 'np', '.', 'abs', '(', 'im', '-', 'prev', ')', 'if', 'diff', '.', 'ndim', '==', '3', ':', 'diff', '=', 'diff', '.', 'sum', '(', '2', ')', '# Get begin and end for both dimensions', 'x', '=', 'np', '.', 'argwhere', '(', 'diff', '.', 'sum', '(', '0', ')', ')', 'y', '=', 'np', '.', 'argwhere', '(', 'diff', '.', 'sum', '(', '1', ')', ')', '# Get rect coordinates', 'if', 'x', '.', 'size', 'and', 'y', '.', 'size', ':', 'x0', ',', 'x1', '=', 'x', '[', '0', ']', ',', 'x', '[', '-', '1', ']', '+', '1', 'y0', ',', 'y1', '=', 'y', '[', '0', ']', ',', 'y', '[', '-', '1', ']', '+', '1', 'else', ':', '# No change ... make it minimal', 'x0', ',', 'x1', '=', '0', ',', '2', 'y0', ',', 'y1', '=', '0', ',', '2', '# Cut out and store', 'im2', '=', 'im', '[', 'y0', ':', 'y1', ',', 'x0', ':', 'x1', ']', 'prev', '=', 'im', 'ims2', '.', 'append', '(', 'im2', ')', 'xy', '.', 'append', '(', '(', 'x0', ',', 'y0', ')', ')', 'return', 'ims2', ',', 'xy']
get_sub_rectangles(ims) Calculate the minimal rectangles that need updating each frame. Returns a two-element tuple containing the cropped images and a list of x-y positions. Calculating the subrectangles takes extra time, obviously. However, if the image sizes were reduced, the actual writing of the GIF goes faster. In some cases applying this method produces a GIF faster.
['get_sub_rectangles', '(', 'ims', ')']
train
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/images2gif.py#L294-L344
5,420
juergberinger/cmdhelper
cmdhelper.py
LogWriter.write
def write(self, text): """Write text. An additional attribute terminator with a value of None is added to the logging record to indicate that StreamHandler should not add a newline.""" self.logger.log(self.loglevel, text, extra={'terminator': None})
python
def write(self, text): """Write text. An additional attribute terminator with a value of None is added to the logging record to indicate that StreamHandler should not add a newline.""" self.logger.log(self.loglevel, text, extra={'terminator': None})
['def', 'write', '(', 'self', ',', 'text', ')', ':', 'self', '.', 'logger', '.', 'log', '(', 'self', '.', 'loglevel', ',', 'text', ',', 'extra', '=', '{', "'terminator'", ':', 'None', '}', ')']
Write text. An additional attribute terminator with a value of None is added to the logging record to indicate that StreamHandler should not add a newline.
['Write', 'text', '.', 'An', 'additional', 'attribute', 'terminator', 'with', 'a', 'value', 'of', 'None', 'is', 'added', 'to', 'the', 'logging', 'record', 'to', 'indicate', 'that', 'StreamHandler', 'should', 'not', 'add', 'a', 'newline', '.']
train
https://github.com/juergberinger/cmdhelper/blob/f8453659cf5b43f509a5a01470ea784acae7caac/cmdhelper.py#L110-L114
5,421
ltworf/typedload
typedload/dataloader.py
_enumload
def _enumload(l: Loader, value, type_) -> Enum: """ This loads something into an Enum. It tries with basic types first. If that fails, it tries to look for type annotations inside the Enum, and tries to use those to load the value into something that is compatible with the Enum. Of course if that fails too, a ValueError is raised. """ try: # Try naïve conversion return type_(value) except: pass # Try with the typing hints for _, t in get_type_hints(type_).items(): try: return type_(l.load(value, t)) except: pass raise TypedloadValueError( 'Value could not be loaded into %s' % type_, value=value, type_=type_ )
python
def _enumload(l: Loader, value, type_) -> Enum: """ This loads something into an Enum. It tries with basic types first. If that fails, it tries to look for type annotations inside the Enum, and tries to use those to load the value into something that is compatible with the Enum. Of course if that fails too, a ValueError is raised. """ try: # Try naïve conversion return type_(value) except: pass # Try with the typing hints for _, t in get_type_hints(type_).items(): try: return type_(l.load(value, t)) except: pass raise TypedloadValueError( 'Value could not be loaded into %s' % type_, value=value, type_=type_ )
['def', '_enumload', '(', 'l', ':', 'Loader', ',', 'value', ',', 'type_', ')', '->', 'Enum', ':', 'try', ':', '# Try naïve conversion', 'return', 'type_', '(', 'value', ')', 'except', ':', 'pass', '# Try with the typing hints', 'for', '_', ',', 't', 'in', 'get_type_hints', '(', 'type_', ')', '.', 'items', '(', ')', ':', 'try', ':', 'return', 'type_', '(', 'l', '.', 'load', '(', 'value', ',', 't', ')', ')', 'except', ':', 'pass', 'raise', 'TypedloadValueError', '(', "'Value could not be loaded into %s'", '%', 'type_', ',', 'value', '=', 'value', ',', 'type_', '=', 'type_', ')']
This loads something into an Enum. It tries with basic types first. If that fails, it tries to look for type annotations inside the Enum, and tries to use those to load the value into something that is compatible with the Enum. Of course if that fails too, a ValueError is raised.
['This', 'loads', 'something', 'into', 'an', 'Enum', '.']
train
https://github.com/ltworf/typedload/blob/7fd130612963bfcec3242698463ef863ca4af927/typedload/dataloader.py#L436-L464
5,422
VasilyStepanov/pywidl
pywidl/grammar.py
p_OptionalOrRequiredArgument_optional
def p_OptionalOrRequiredArgument_optional(p): """OptionalOrRequiredArgument : optional Type IDENTIFIER Default""" p[0] = model.OperationArgument( type=p[2], name=p[3], optional=True, default=p[4])
python
def p_OptionalOrRequiredArgument_optional(p): """OptionalOrRequiredArgument : optional Type IDENTIFIER Default""" p[0] = model.OperationArgument( type=p[2], name=p[3], optional=True, default=p[4])
['def', 'p_OptionalOrRequiredArgument_optional', '(', 'p', ')', ':', 'p', '[', '0', ']', '=', 'model', '.', 'OperationArgument', '(', 'type', '=', 'p', '[', '2', ']', ',', 'name', '=', 'p', '[', '3', ']', ',', 'optional', '=', 'True', ',', 'default', '=', 'p', '[', '4', ']', ')']
OptionalOrRequiredArgument : optional Type IDENTIFIER Default
['OptionalOrRequiredArgument', ':', 'optional', 'Type', 'IDENTIFIER', 'Default']
train
https://github.com/VasilyStepanov/pywidl/blob/8d84b2e53157bfe276bf16301c19e8b6b32e861e/pywidl/grammar.py#L510-L513
5,423
Hackerfleet/hfos
hfos/database.py
backup
def backup(schema, uuid, export_filter, export_format, filename, pretty, export_all, omit): """Exports all collections to (JSON-) files.""" export_format = export_format.upper() if pretty: indent = 4 else: indent = 0 f = None if filename: try: f = open(filename, 'w') except (IOError, PermissionError) as e: backup_log('Could not open output file for writing:', exc=True, lvl=error) return def output(what, convert=False): """Output the backup in a specified format.""" if convert: if export_format == 'JSON': data = json.dumps(what, indent=indent) else: data = "" else: data = what if not filename: print(data) else: f.write(data) if schema is None: if export_all is False: backup_log('No schema given.', lvl=warn) return else: schemata = objectmodels.keys() else: schemata = [schema] all_items = {} for schema_item in schemata: model = objectmodels[schema_item] if uuid: obj = model.find({'uuid': uuid}) elif export_filter: obj = model.find(literal_eval(export_filter)) else: obj = model.find() items = [] for item in obj: fields = item.serializablefields() for field in omit: try: fields.pop(field) except KeyError: pass items.append(fields) all_items[schema_item] = items # if pretty is True: # output('\n// Objectmodel: ' + schema_item + '\n\n') # output(schema_item + ' = [\n') output(all_items, convert=True) if f is not None: f.flush() f.close()
python
def backup(schema, uuid, export_filter, export_format, filename, pretty, export_all, omit): """Exports all collections to (JSON-) files.""" export_format = export_format.upper() if pretty: indent = 4 else: indent = 0 f = None if filename: try: f = open(filename, 'w') except (IOError, PermissionError) as e: backup_log('Could not open output file for writing:', exc=True, lvl=error) return def output(what, convert=False): """Output the backup in a specified format.""" if convert: if export_format == 'JSON': data = json.dumps(what, indent=indent) else: data = "" else: data = what if not filename: print(data) else: f.write(data) if schema is None: if export_all is False: backup_log('No schema given.', lvl=warn) return else: schemata = objectmodels.keys() else: schemata = [schema] all_items = {} for schema_item in schemata: model = objectmodels[schema_item] if uuid: obj = model.find({'uuid': uuid}) elif export_filter: obj = model.find(literal_eval(export_filter)) else: obj = model.find() items = [] for item in obj: fields = item.serializablefields() for field in omit: try: fields.pop(field) except KeyError: pass items.append(fields) all_items[schema_item] = items # if pretty is True: # output('\n// Objectmodel: ' + schema_item + '\n\n') # output(schema_item + ' = [\n') output(all_items, convert=True) if f is not None: f.flush() f.close()
['def', 'backup', '(', 'schema', ',', 'uuid', ',', 'export_filter', ',', 'export_format', ',', 'filename', ',', 'pretty', ',', 'export_all', ',', 'omit', ')', ':', 'export_format', '=', 'export_format', '.', 'upper', '(', ')', 'if', 'pretty', ':', 'indent', '=', '4', 'else', ':', 'indent', '=', '0', 'f', '=', 'None', 'if', 'filename', ':', 'try', ':', 'f', '=', 'open', '(', 'filename', ',', "'w'", ')', 'except', '(', 'IOError', ',', 'PermissionError', ')', 'as', 'e', ':', 'backup_log', '(', "'Could not open output file for writing:'", ',', 'exc', '=', 'True', ',', 'lvl', '=', 'error', ')', 'return', 'def', 'output', '(', 'what', ',', 'convert', '=', 'False', ')', ':', '"""Output the backup in a specified format."""', 'if', 'convert', ':', 'if', 'export_format', '==', "'JSON'", ':', 'data', '=', 'json', '.', 'dumps', '(', 'what', ',', 'indent', '=', 'indent', ')', 'else', ':', 'data', '=', '""', 'else', ':', 'data', '=', 'what', 'if', 'not', 'filename', ':', 'print', '(', 'data', ')', 'else', ':', 'f', '.', 'write', '(', 'data', ')', 'if', 'schema', 'is', 'None', ':', 'if', 'export_all', 'is', 'False', ':', 'backup_log', '(', "'No schema given.'", ',', 'lvl', '=', 'warn', ')', 'return', 'else', ':', 'schemata', '=', 'objectmodels', '.', 'keys', '(', ')', 'else', ':', 'schemata', '=', '[', 'schema', ']', 'all_items', '=', '{', '}', 'for', 'schema_item', 'in', 'schemata', ':', 'model', '=', 'objectmodels', '[', 'schema_item', ']', 'if', 'uuid', ':', 'obj', '=', 'model', '.', 'find', '(', '{', "'uuid'", ':', 'uuid', '}', ')', 'elif', 'export_filter', ':', 'obj', '=', 'model', '.', 'find', '(', 'literal_eval', '(', 'export_filter', ')', ')', 'else', ':', 'obj', '=', 'model', '.', 'find', '(', ')', 'items', '=', '[', ']', 'for', 'item', 'in', 'obj', ':', 'fields', '=', 'item', '.', 'serializablefields', '(', ')', 'for', 'field', 'in', 'omit', ':', 'try', ':', 'fields', '.', 'pop', '(', 'field', ')', 'except', 'KeyError', ':', 'pass', 'items', '.', 'append', '(', 'fields', ')', 'all_items', '[', 'schema_item', ']', '=', 'items', '# if pretty is True:', "# output('\\n// Objectmodel: ' + schema_item + '\\n\\n')", "# output(schema_item + ' = [\\n')", 'output', '(', 'all_items', ',', 'convert', '=', 'True', ')', 'if', 'f', 'is', 'not', 'None', ':', 'f', '.', 'flush', '(', ')', 'f', '.', 'close', '(', ')']
Exports all collections to (JSON-) files.
['Exports', 'all', 'collections', 'to', '(', 'JSON', '-', ')', 'files', '.']
train
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/database.py#L657-L733
5,424
ibis-project/ibis
ibis/expr/api.py
ifelse
def ifelse(arg, true_expr, false_expr): """ Shorthand for implementing ternary expressions bool_expr.ifelse(0, 1) e.g., in SQL: CASE WHEN bool_expr THEN 0 else 1 END """ # Result will be the result of promotion of true/false exprs. These # might be conflicting types; same type resolution as case expressions # must be used. case = ops.SearchedCaseBuilder() return case.when(arg, true_expr).else_(false_expr).end()
python
def ifelse(arg, true_expr, false_expr): """ Shorthand for implementing ternary expressions bool_expr.ifelse(0, 1) e.g., in SQL: CASE WHEN bool_expr THEN 0 else 1 END """ # Result will be the result of promotion of true/false exprs. These # might be conflicting types; same type resolution as case expressions # must be used. case = ops.SearchedCaseBuilder() return case.when(arg, true_expr).else_(false_expr).end()
['def', 'ifelse', '(', 'arg', ',', 'true_expr', ',', 'false_expr', ')', ':', '# Result will be the result of promotion of true/false exprs. These', '# might be conflicting types; same type resolution as case expressions', '# must be used.', 'case', '=', 'ops', '.', 'SearchedCaseBuilder', '(', ')', 'return', 'case', '.', 'when', '(', 'arg', ',', 'true_expr', ')', '.', 'else_', '(', 'false_expr', ')', '.', 'end', '(', ')']
Shorthand for implementing ternary expressions bool_expr.ifelse(0, 1) e.g., in SQL: CASE WHEN bool_expr THEN 0 else 1 END
['Shorthand', 'for', 'implementing', 'ternary', 'expressions']
train
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L1899-L1910
5,425
konstantinstadler/country_converter
country_converter/country_converter.py
CountryConverter._separate_exclude_cases
def _separate_exclude_cases(name, exclude_prefix): """ Splits the excluded Parameters ---------- name : str Name of the country/region to convert. exclude_prefix : list of valid regex strings List of indicators which negate the subsequent country/region. These prefixes and everything following will not be converted. E.g. 'Asia excluding China' becomes 'Asia' and 'China excluding Hong Kong' becomes 'China' prior to conversion Returns ------- dict with 'clean_name' : str as name without anything following exclude_prefix 'excluded_countries' : list list of excluded countries """ excluder = re.compile('|'.join(exclude_prefix)) split_entries = excluder.split(name) return {'clean_name': split_entries[0], 'excluded_countries': split_entries[1:]}
python
def _separate_exclude_cases(name, exclude_prefix): """ Splits the excluded Parameters ---------- name : str Name of the country/region to convert. exclude_prefix : list of valid regex strings List of indicators which negate the subsequent country/region. These prefixes and everything following will not be converted. E.g. 'Asia excluding China' becomes 'Asia' and 'China excluding Hong Kong' becomes 'China' prior to conversion Returns ------- dict with 'clean_name' : str as name without anything following exclude_prefix 'excluded_countries' : list list of excluded countries """ excluder = re.compile('|'.join(exclude_prefix)) split_entries = excluder.split(name) return {'clean_name': split_entries[0], 'excluded_countries': split_entries[1:]}
['def', '_separate_exclude_cases', '(', 'name', ',', 'exclude_prefix', ')', ':', 'excluder', '=', 're', '.', 'compile', '(', "'|'", '.', 'join', '(', 'exclude_prefix', ')', ')', 'split_entries', '=', 'excluder', '.', 'split', '(', 'name', ')', 'return', '{', "'clean_name'", ':', 'split_entries', '[', '0', ']', ',', "'excluded_countries'", ':', 'split_entries', '[', '1', ':', ']', '}']
Splits the excluded Parameters ---------- name : str Name of the country/region to convert. exclude_prefix : list of valid regex strings List of indicators which negate the subsequent country/region. These prefixes and everything following will not be converted. E.g. 'Asia excluding China' becomes 'Asia' and 'China excluding Hong Kong' becomes 'China' prior to conversion Returns ------- dict with 'clean_name' : str as name without anything following exclude_prefix 'excluded_countries' : list list of excluded countries
['Splits', 'the', 'excluded']
train
https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L332-L361
5,426
TeamHG-Memex/eli5
eli5/formatters/html.py
_format_feature
def _format_feature(feature, weight, hl_spaces): # type: (...) -> str """ Format any feature. """ if isinstance(feature, FormattedFeatureName): return feature.format() elif (isinstance(feature, list) and all('name' in x and 'sign' in x for x in feature)): return _format_unhashed_feature(feature, weight, hl_spaces=hl_spaces) else: return _format_single_feature(feature, weight, hl_spaces=hl_spaces)
python
def _format_feature(feature, weight, hl_spaces): # type: (...) -> str """ Format any feature. """ if isinstance(feature, FormattedFeatureName): return feature.format() elif (isinstance(feature, list) and all('name' in x and 'sign' in x for x in feature)): return _format_unhashed_feature(feature, weight, hl_spaces=hl_spaces) else: return _format_single_feature(feature, weight, hl_spaces=hl_spaces)
['def', '_format_feature', '(', 'feature', ',', 'weight', ',', 'hl_spaces', ')', ':', '# type: (...) -> str', 'if', 'isinstance', '(', 'feature', ',', 'FormattedFeatureName', ')', ':', 'return', 'feature', '.', 'format', '(', ')', 'elif', '(', 'isinstance', '(', 'feature', ',', 'list', ')', 'and', 'all', '(', "'name'", 'in', 'x', 'and', "'sign'", 'in', 'x', 'for', 'x', 'in', 'feature', ')', ')', ':', 'return', '_format_unhashed_feature', '(', 'feature', ',', 'weight', ',', 'hl_spaces', '=', 'hl_spaces', ')', 'else', ':', 'return', '_format_single_feature', '(', 'feature', ',', 'weight', ',', 'hl_spaces', '=', 'hl_spaces', ')']
Format any feature.
['Format', 'any', 'feature', '.']
train
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/formatters/html.py#L299-L309
5,427
rtfd/sphinxcontrib-dotnetdomain
sphinxcontrib/dotnetdomain.py
DotNetSignature.full_name
def full_name(self): """Return full name of member""" if self.prefix is not None: return '.'.join([self.prefix, self.member]) return self.member
python
def full_name(self): """Return full name of member""" if self.prefix is not None: return '.'.join([self.prefix, self.member]) return self.member
['def', 'full_name', '(', 'self', ')', ':', 'if', 'self', '.', 'prefix', 'is', 'not', 'None', ':', 'return', "'.'", '.', 'join', '(', '[', 'self', '.', 'prefix', ',', 'self', '.', 'member', ']', ')', 'return', 'self', '.', 'member']
Return full name of member
['Return', 'full', 'name', 'of', 'member']
train
https://github.com/rtfd/sphinxcontrib-dotnetdomain/blob/fbc6a81b9993dc5d06866c4483593421b53b9a61/sphinxcontrib/dotnetdomain.py#L70-L74
5,428
pytroll/satpy
satpy/writers/cf_writer.py
omerc2cf
def omerc2cf(area): """Return the cf grid mapping for the omerc projection.""" proj_dict = area.proj_dict args = dict(azimuth_of_central_line=proj_dict.get('alpha'), latitude_of_projection_origin=proj_dict.get('lat_0'), longitude_of_projection_origin=proj_dict.get('lonc'), grid_mapping_name='oblique_mercator', reference_ellipsoid_name=proj_dict.get('ellps', 'WGS84'), false_easting=0., false_northing=0. ) if "no_rot" in proj_dict: args['no_rotation'] = 1 if "gamma" in proj_dict: args['gamma'] = proj_dict['gamma'] return args
python
def omerc2cf(area): """Return the cf grid mapping for the omerc projection.""" proj_dict = area.proj_dict args = dict(azimuth_of_central_line=proj_dict.get('alpha'), latitude_of_projection_origin=proj_dict.get('lat_0'), longitude_of_projection_origin=proj_dict.get('lonc'), grid_mapping_name='oblique_mercator', reference_ellipsoid_name=proj_dict.get('ellps', 'WGS84'), false_easting=0., false_northing=0. ) if "no_rot" in proj_dict: args['no_rotation'] = 1 if "gamma" in proj_dict: args['gamma'] = proj_dict['gamma'] return args
['def', 'omerc2cf', '(', 'area', ')', ':', 'proj_dict', '=', 'area', '.', 'proj_dict', 'args', '=', 'dict', '(', 'azimuth_of_central_line', '=', 'proj_dict', '.', 'get', '(', "'alpha'", ')', ',', 'latitude_of_projection_origin', '=', 'proj_dict', '.', 'get', '(', "'lat_0'", ')', ',', 'longitude_of_projection_origin', '=', 'proj_dict', '.', 'get', '(', "'lonc'", ')', ',', 'grid_mapping_name', '=', "'oblique_mercator'", ',', 'reference_ellipsoid_name', '=', 'proj_dict', '.', 'get', '(', "'ellps'", ',', "'WGS84'", ')', ',', 'false_easting', '=', '0.', ',', 'false_northing', '=', '0.', ')', 'if', '"no_rot"', 'in', 'proj_dict', ':', 'args', '[', "'no_rotation'", ']', '=', '1', 'if', '"gamma"', 'in', 'proj_dict', ':', 'args', '[', "'gamma'", ']', '=', 'proj_dict', '[', "'gamma'", ']', 'return', 'args']
Return the cf grid mapping for the omerc projection.
['Return', 'the', 'cf', 'grid', 'mapping', 'for', 'the', 'omerc', 'projection', '.']
train
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/writers/cf_writer.py#L39-L55
5,429
jobovy/galpy
galpy/potential/TwoPowerSphericalPotential.py
NFWPotential._R2deriv
def _R2deriv(self,R,z,phi=0.,t=0.): """ NAME: _R2deriv PURPOSE: evaluate the second radial derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the second radial derivative HISTORY: 2011-10-09 - Written - Bovy (IAS) """ Rz= R**2.+z**2. sqrtRz= numpy.sqrt(Rz) return (3.*R**4.+2.*R**2.*(z**2.+self.a*sqrtRz)\ -z**2.*(z**2.+self.a*sqrtRz)\ -(2.*R**2.-z**2.)*(self.a**2.+R**2.+z**2.+2.*self.a*sqrtRz)\ *numpy.log(1.+sqrtRz/self.a))\ /Rz**2.5/(self.a+sqrtRz)**2.
python
def _R2deriv(self,R,z,phi=0.,t=0.): """ NAME: _R2deriv PURPOSE: evaluate the second radial derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the second radial derivative HISTORY: 2011-10-09 - Written - Bovy (IAS) """ Rz= R**2.+z**2. sqrtRz= numpy.sqrt(Rz) return (3.*R**4.+2.*R**2.*(z**2.+self.a*sqrtRz)\ -z**2.*(z**2.+self.a*sqrtRz)\ -(2.*R**2.-z**2.)*(self.a**2.+R**2.+z**2.+2.*self.a*sqrtRz)\ *numpy.log(1.+sqrtRz/self.a))\ /Rz**2.5/(self.a+sqrtRz)**2.
['def', '_R2deriv', '(', 'self', ',', 'R', ',', 'z', ',', 'phi', '=', '0.', ',', 't', '=', '0.', ')', ':', 'Rz', '=', 'R', '**', '2.', '+', 'z', '**', '2.', 'sqrtRz', '=', 'numpy', '.', 'sqrt', '(', 'Rz', ')', 'return', '(', '3.', '*', 'R', '**', '4.', '+', '2.', '*', 'R', '**', '2.', '*', '(', 'z', '**', '2.', '+', 'self', '.', 'a', '*', 'sqrtRz', ')', '-', 'z', '**', '2.', '*', '(', 'z', '**', '2.', '+', 'self', '.', 'a', '*', 'sqrtRz', ')', '-', '(', '2.', '*', 'R', '**', '2.', '-', 'z', '**', '2.', ')', '*', '(', 'self', '.', 'a', '**', '2.', '+', 'R', '**', '2.', '+', 'z', '**', '2.', '+', '2.', '*', 'self', '.', 'a', '*', 'sqrtRz', ')', '*', 'numpy', '.', 'log', '(', '1.', '+', 'sqrtRz', '/', 'self', '.', 'a', ')', ')', '/', 'Rz', '**', '2.5', '/', '(', 'self', '.', 'a', '+', 'sqrtRz', ')', '**', '2.']
NAME: _R2deriv PURPOSE: evaluate the second radial derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the second radial derivative HISTORY: 2011-10-09 - Written - Bovy (IAS)
['NAME', ':', '_R2deriv', 'PURPOSE', ':', 'evaluate', 'the', 'second', 'radial', 'derivative', 'for', 'this', 'potential', 'INPUT', ':', 'R', '-', 'Galactocentric', 'cylindrical', 'radius', 'z', '-', 'vertical', 'height', 'phi', '-', 'azimuth', 't', '-', 'time', 'OUTPUT', ':', 'the', 'second', 'radial', 'derivative', 'HISTORY', ':', '2011', '-', '10', '-', '09', '-', 'Written', '-', 'Bovy', '(', 'IAS', ')']
train
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/TwoPowerSphericalPotential.py#L937-L959
5,430
davenquinn/Attitude
attitude/error/axes.py
statistical_axes
def statistical_axes(fit, **kw): """ Hyperbolic error using a statistical process (either sampling or noise errors) Integrates covariance with error level and degrees of freedom for plotting confidence intervals. Degrees of freedom is set to 2, which is the relevant number of independent dimensions to planar fitting of *a priori* centered data. """ method = kw.pop('method', 'noise') confidence_level = kw.pop('confidence_level', 0.95) dof = kw.pop('dof',2) nominal = fit.eigenvalues if method == 'sampling': cov = sampling_covariance(fit,**kw) elif method == 'noise': cov = noise_covariance(fit,**kw) if kw.pop('chisq', False): # Model the incorrect behaviour of using the # Chi2 distribution instead of the Fisher # distribution (which is a measure of the # ratio between the two). z = chi2.ppf(confidence_level,dof) else: z = fisher_statistic(fit.n,confidence_level,dof=dof) # Apply two fisher F parameters (one along each axis) # Since we apply to each axis without division, # it is as if we are applying N.sqrt(2*F) to the entire # distribution, aligning us with (Francq, 2014) err = z*N.sqrt(cov) return apply_error_scaling(nominal, err, n=fit.n, **kw)
python
def statistical_axes(fit, **kw): """ Hyperbolic error using a statistical process (either sampling or noise errors) Integrates covariance with error level and degrees of freedom for plotting confidence intervals. Degrees of freedom is set to 2, which is the relevant number of independent dimensions to planar fitting of *a priori* centered data. """ method = kw.pop('method', 'noise') confidence_level = kw.pop('confidence_level', 0.95) dof = kw.pop('dof',2) nominal = fit.eigenvalues if method == 'sampling': cov = sampling_covariance(fit,**kw) elif method == 'noise': cov = noise_covariance(fit,**kw) if kw.pop('chisq', False): # Model the incorrect behaviour of using the # Chi2 distribution instead of the Fisher # distribution (which is a measure of the # ratio between the two). z = chi2.ppf(confidence_level,dof) else: z = fisher_statistic(fit.n,confidence_level,dof=dof) # Apply two fisher F parameters (one along each axis) # Since we apply to each axis without division, # it is as if we are applying N.sqrt(2*F) to the entire # distribution, aligning us with (Francq, 2014) err = z*N.sqrt(cov) return apply_error_scaling(nominal, err, n=fit.n, **kw)
['def', 'statistical_axes', '(', 'fit', ',', '*', '*', 'kw', ')', ':', 'method', '=', 'kw', '.', 'pop', '(', "'method'", ',', "'noise'", ')', 'confidence_level', '=', 'kw', '.', 'pop', '(', "'confidence_level'", ',', '0.95', ')', 'dof', '=', 'kw', '.', 'pop', '(', "'dof'", ',', '2', ')', 'nominal', '=', 'fit', '.', 'eigenvalues', 'if', 'method', '==', "'sampling'", ':', 'cov', '=', 'sampling_covariance', '(', 'fit', ',', '*', '*', 'kw', ')', 'elif', 'method', '==', "'noise'", ':', 'cov', '=', 'noise_covariance', '(', 'fit', ',', '*', '*', 'kw', ')', 'if', 'kw', '.', 'pop', '(', "'chisq'", ',', 'False', ')', ':', '# Model the incorrect behaviour of using the', '# Chi2 distribution instead of the Fisher', '# distribution (which is a measure of the', '# ratio between the two).', 'z', '=', 'chi2', '.', 'ppf', '(', 'confidence_level', ',', 'dof', ')', 'else', ':', 'z', '=', 'fisher_statistic', '(', 'fit', '.', 'n', ',', 'confidence_level', ',', 'dof', '=', 'dof', ')', '# Apply two fisher F parameters (one along each axis)', '# Since we apply to each axis without division,', '# it is as if we are applying N.sqrt(2*F) to the entire', '# distribution, aligning us with (Francq, 2014)', 'err', '=', 'z', '*', 'N', '.', 'sqrt', '(', 'cov', ')', 'return', 'apply_error_scaling', '(', 'nominal', ',', 'err', ',', 'n', '=', 'fit', '.', 'n', ',', '*', '*', 'kw', ')']
Hyperbolic error using a statistical process (either sampling or noise errors) Integrates covariance with error level and degrees of freedom for plotting confidence intervals. Degrees of freedom is set to 2, which is the relevant number of independent dimensions to planar fitting of *a priori* centered data.
['Hyperbolic', 'error', 'using', 'a', 'statistical', 'process', '(', 'either', 'sampling', 'or', 'noise', 'errors', ')']
train
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/error/axes.py#L108-L146
5,431
pyupio/changelogs
changelogs/pypi.py
get_url_map
def get_url_map(): """ Loads custom/pypi/map.txt and builds a dict where map[package_name] = url :return: dict, urls """ map = {} path = os.path.join( os.path.dirname(os.path.realpath(__file__)), # current working dir ../ "custom", # ../custom/ "pypi", # ../custom/pypi/ "map.txt" # ../custom/pypi/map.txt ) with open(path) as f: for line in f.readlines(): package, url = line.strip().split(": ") map[package] = url return map
python
def get_url_map(): """ Loads custom/pypi/map.txt and builds a dict where map[package_name] = url :return: dict, urls """ map = {} path = os.path.join( os.path.dirname(os.path.realpath(__file__)), # current working dir ../ "custom", # ../custom/ "pypi", # ../custom/pypi/ "map.txt" # ../custom/pypi/map.txt ) with open(path) as f: for line in f.readlines(): package, url = line.strip().split(": ") map[package] = url return map
['def', 'get_url_map', '(', ')', ':', 'map', '=', '{', '}', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'realpath', '(', '__file__', ')', ')', ',', '# current working dir ../', '"custom"', ',', '# ../custom/', '"pypi"', ',', '# ../custom/pypi/', '"map.txt"', '# ../custom/pypi/map.txt', ')', 'with', 'open', '(', 'path', ')', 'as', 'f', ':', 'for', 'line', 'in', 'f', '.', 'readlines', '(', ')', ':', 'package', ',', 'url', '=', 'line', '.', 'strip', '(', ')', '.', 'split', '(', '": "', ')', 'map', '[', 'package', ']', '=', 'url', 'return', 'map']
Loads custom/pypi/map.txt and builds a dict where map[package_name] = url :return: dict, urls
['Loads', 'custom', '/', 'pypi', '/', 'map', '.', 'txt', 'and', 'builds', 'a', 'dict', 'where', 'map', '[', 'package_name', ']', '=', 'url', ':', 'return', ':', 'dict', 'urls']
train
https://github.com/pyupio/changelogs/blob/0cdb929ac4546c766cd7eef9ae4eb4baaa08f452/changelogs/pypi.py#L34-L50
5,432
tanghaibao/goatools
goatools/gosubdag/godag_rcnt.py
CountRelatives.get_d1str
def get_d1str(self, goobj, reverse=False): """Get D1-string representing all parent terms which are depth-01 GO terms.""" return "".join(sorted(self.get_parents_letters(goobj), reverse=reverse))
python
def get_d1str(self, goobj, reverse=False): """Get D1-string representing all parent terms which are depth-01 GO terms.""" return "".join(sorted(self.get_parents_letters(goobj), reverse=reverse))
['def', 'get_d1str', '(', 'self', ',', 'goobj', ',', 'reverse', '=', 'False', ')', ':', 'return', '""', '.', 'join', '(', 'sorted', '(', 'self', '.', 'get_parents_letters', '(', 'goobj', ')', ',', 'reverse', '=', 'reverse', ')', ')']
Get D1-string representing all parent terms which are depth-01 GO terms.
['Get', 'D1', '-', 'string', 'representing', 'all', 'parent', 'terms', 'which', 'are', 'depth', '-', '01', 'GO', 'terms', '.']
train
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/godag_rcnt.py#L36-L38
5,433
eventbrite/pysoa
pysoa/server/django/cache.py
PySOAPyLibMCCache.close
def close(self, for_shutdown=False, **_kwargs): """ Only call super().close() if the server is shutting down (not between requests). :param for_shutdown: If `False` (the default) """ if for_shutdown: super(PySOAPyLibMCCache, self).close()
python
def close(self, for_shutdown=False, **_kwargs): """ Only call super().close() if the server is shutting down (not between requests). :param for_shutdown: If `False` (the default) """ if for_shutdown: super(PySOAPyLibMCCache, self).close()
['def', 'close', '(', 'self', ',', 'for_shutdown', '=', 'False', ',', '*', '*', '_kwargs', ')', ':', 'if', 'for_shutdown', ':', 'super', '(', 'PySOAPyLibMCCache', ',', 'self', ')', '.', 'close', '(', ')']
Only call super().close() if the server is shutting down (not between requests). :param for_shutdown: If `False` (the default)
['Only', 'call', 'super', '()', '.', 'close', '()', 'if', 'the', 'server', 'is', 'shutting', 'down', '(', 'not', 'between', 'requests', ')', '.']
train
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/server/django/cache.py#L71-L78
5,434
inspirehep/inspire-query-parser
inspire_query_parser/visitors/restructuring_visitor.py
RestructuringVisitor.visit_spires_keyword_query
def visit_spires_keyword_query(self, node): """Transform a :class:`SpiresKeywordQuery` into a :class:`KeywordOp`. Notes: In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained :class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or :class:`SimpleValueNegation`.) """ keyword = node.left.accept(self) value = node.right.accept(self) if isinstance(value, SimpleValueBooleanQuery): return _convert_simple_value_boolean_query_to_and_boolean_queries(value, keyword) return KeywordOp(keyword, value)
python
def visit_spires_keyword_query(self, node): """Transform a :class:`SpiresKeywordQuery` into a :class:`KeywordOp`. Notes: In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained :class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or :class:`SimpleValueNegation`.) """ keyword = node.left.accept(self) value = node.right.accept(self) if isinstance(value, SimpleValueBooleanQuery): return _convert_simple_value_boolean_query_to_and_boolean_queries(value, keyword) return KeywordOp(keyword, value)
['def', 'visit_spires_keyword_query', '(', 'self', ',', 'node', ')', ':', 'keyword', '=', 'node', '.', 'left', '.', 'accept', '(', 'self', ')', 'value', '=', 'node', '.', 'right', '.', 'accept', '(', 'self', ')', 'if', 'isinstance', '(', 'value', ',', 'SimpleValueBooleanQuery', ')', ':', 'return', '_convert_simple_value_boolean_query_to_and_boolean_queries', '(', 'value', ',', 'keyword', ')', 'return', 'KeywordOp', '(', 'keyword', ',', 'value', ')']
Transform a :class:`SpiresKeywordQuery` into a :class:`KeywordOp`. Notes: In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained :class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or :class:`SimpleValueNegation`.)
['Transform', 'a', ':', 'class', ':', 'SpiresKeywordQuery', 'into', 'a', ':', 'class', ':', 'KeywordOp', '.']
train
https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/restructuring_visitor.py#L206-L221
5,435
toros-astro/corral
corral/db/__init__.py
session_scope
def session_scope(session_cls=None): """Provide a transactional scope around a series of operations.""" session = session_cls() if session_cls else Session() try: yield session session.commit() except Exception: session.rollback() raise finally: session.close()
python
def session_scope(session_cls=None): """Provide a transactional scope around a series of operations.""" session = session_cls() if session_cls else Session() try: yield session session.commit() except Exception: session.rollback() raise finally: session.close()
['def', 'session_scope', '(', 'session_cls', '=', 'None', ')', ':', 'session', '=', 'session_cls', '(', ')', 'if', 'session_cls', 'else', 'Session', '(', ')', 'try', ':', 'yield', 'session', 'session', '.', 'commit', '(', ')', 'except', 'Exception', ':', 'session', '.', 'rollback', '(', ')', 'raise', 'finally', ':', 'session', '.', 'close', '(', ')']
Provide a transactional scope around a series of operations.
['Provide', 'a', 'transactional', 'scope', 'around', 'a', 'series', 'of', 'operations', '.']
train
https://github.com/toros-astro/corral/blob/75474b38ff366330d33644461a902d07374a5bbc/corral/db/__init__.py#L143-L153
5,436
MarcMeszaros/envitro
envitro/docker.py
isset
def isset(alias_name): """Return a boolean if the docker link is set or not and is a valid looking docker link value. Args: alias_name: The link alias name """ warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) raw_value = read(alias_name, allow_none=True) if raw_value: if re.compile(r'.+://.+:\d+').match(raw_value): return True else: warnings.warn('"{0}_PORT={1}" does not look like a docker link.'.format(alias_name, raw_value), stacklevel=2) return False return False
python
def isset(alias_name): """Return a boolean if the docker link is set or not and is a valid looking docker link value. Args: alias_name: The link alias name """ warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) raw_value = read(alias_name, allow_none=True) if raw_value: if re.compile(r'.+://.+:\d+').match(raw_value): return True else: warnings.warn('"{0}_PORT={1}" does not look like a docker link.'.format(alias_name, raw_value), stacklevel=2) return False return False
['def', 'isset', '(', 'alias_name', ')', ':', 'warnings', '.', 'warn', '(', "'Will be removed in v1.0'", ',', 'DeprecationWarning', ',', 'stacklevel', '=', '2', ')', 'raw_value', '=', 'read', '(', 'alias_name', ',', 'allow_none', '=', 'True', ')', 'if', 'raw_value', ':', 'if', 're', '.', 'compile', '(', "r'.+://.+:\\d+'", ')', '.', 'match', '(', 'raw_value', ')', ':', 'return', 'True', 'else', ':', 'warnings', '.', 'warn', '(', '\'"{0}_PORT={1}" does not look like a docker link.\'', '.', 'format', '(', 'alias_name', ',', 'raw_value', ')', ',', 'stacklevel', '=', '2', ')', 'return', 'False', 'return', 'False']
Return a boolean if the docker link is set or not and is a valid looking docker link value. Args: alias_name: The link alias name
['Return', 'a', 'boolean', 'if', 'the', 'docker', 'link', 'is', 'set', 'or', 'not', 'and', 'is', 'a', 'valid', 'looking', 'docker', 'link', 'value', '.']
train
https://github.com/MarcMeszaros/envitro/blob/19e925cd152c08d4db8126542afed35188cafff4/envitro/docker.py#L47-L62
5,437
materialsproject/pymatgen
pymatgen/io/abinit/utils.py
Dirviz.get_cluster_graph
def get_cluster_graph(self, engine="fdp", graph_attr=None, node_attr=None, edge_attr=None): """ Generate directory graph in the DOT language. Directories are shown as clusters .. warning:: This function scans the entire directory tree starting from top so the resulting graph can be really big. Args: engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage'] graph_attr: Mapping of (attribute, value) pairs for the graph. node_attr: Mapping of (attribute, value) pairs set for all nodes. edge_attr: Mapping of (attribute, value) pairs set for all edges. Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph> """ # https://www.graphviz.org/doc/info/ from graphviz import Digraph g = Digraph("directory", #filename="flow_%s.gv" % os.path.basename(self.relworkdir), engine=engine) # if engine == "automatic" else engine) # Set graph attributes. #g.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir)) g.attr(label=self.top) #g.attr(fontcolor="white", bgcolor='purple:pink') #g.attr(rankdir="LR", pagedir="BL") #g.attr(constraint="false", pack="true", packMode="clust") g.node_attr.update(color='lightblue2', style='filled') #g.node_attr.update(ranksep='equally') # Add input attributes. if graph_attr is not None: fg.graph_attr.update(**graph_attr) if node_attr is not None: fg.node_attr.update(**node_attr) if edge_attr is not None: fg.edge_attr.update(**edge_attr) def node_kwargs(path): return dict( #shape="circle", #shape="none", #shape="plaintext", #shape="point", shape="record", #color=node.color_hex, fontsize="8.0", label=os.path.basename(path), ) edge_kwargs = dict(arrowType="vee", style="solid", minlen="1") cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2") # TODO: Write other method without clusters if not walk. exclude_top_node = False for root, dirs, files in os.walk(self.top): if exclude_top_node and root == self.top: continue cluster_name = "cluster_%s" % root #print("root", root, cluster_name, "dirs", dirs, "files", files, sep="\n") with g.subgraph(name=cluster_name) as d: d.attr(**cluster_kwargs) d.attr(rank="source" if (files or dirs) else "sink") d.attr(label=os.path.basename(root)) for f in files: filepath = os.path.join(root, f) d.node(filepath, **node_kwargs(filepath)) if os.path.islink(filepath): # Follow the link and use the relpath wrt link as label. realp = os.path.realpath(filepath) realp = os.path.relpath(realp, filepath) #realp = os.path.relpath(realp, self.top) #print(filepath, realp) #g.node(realp, **node_kwargs(realp)) g.edge(filepath, realp, **edge_kwargs) for dirname in dirs: dirpath = os.path.join(root, dirname) #head, basename = os.path.split(dirpath) new_cluster_name = "cluster_%s" % dirpath #rank = "source" if os.listdir(dirpath) else "sink" #g.node(dirpath, rank=rank, **node_kwargs(dirpath)) #g.edge(dirpath, new_cluster_name, **edge_kwargs) #d.edge(cluster_name, new_cluster_name, minlen="2", **edge_kwargs) d.edge(cluster_name, new_cluster_name, **edge_kwargs) return g
python
def get_cluster_graph(self, engine="fdp", graph_attr=None, node_attr=None, edge_attr=None): """ Generate directory graph in the DOT language. Directories are shown as clusters .. warning:: This function scans the entire directory tree starting from top so the resulting graph can be really big. Args: engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage'] graph_attr: Mapping of (attribute, value) pairs for the graph. node_attr: Mapping of (attribute, value) pairs set for all nodes. edge_attr: Mapping of (attribute, value) pairs set for all edges. Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph> """ # https://www.graphviz.org/doc/info/ from graphviz import Digraph g = Digraph("directory", #filename="flow_%s.gv" % os.path.basename(self.relworkdir), engine=engine) # if engine == "automatic" else engine) # Set graph attributes. #g.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir)) g.attr(label=self.top) #g.attr(fontcolor="white", bgcolor='purple:pink') #g.attr(rankdir="LR", pagedir="BL") #g.attr(constraint="false", pack="true", packMode="clust") g.node_attr.update(color='lightblue2', style='filled') #g.node_attr.update(ranksep='equally') # Add input attributes. if graph_attr is not None: fg.graph_attr.update(**graph_attr) if node_attr is not None: fg.node_attr.update(**node_attr) if edge_attr is not None: fg.edge_attr.update(**edge_attr) def node_kwargs(path): return dict( #shape="circle", #shape="none", #shape="plaintext", #shape="point", shape="record", #color=node.color_hex, fontsize="8.0", label=os.path.basename(path), ) edge_kwargs = dict(arrowType="vee", style="solid", minlen="1") cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2") # TODO: Write other method without clusters if not walk. exclude_top_node = False for root, dirs, files in os.walk(self.top): if exclude_top_node and root == self.top: continue cluster_name = "cluster_%s" % root #print("root", root, cluster_name, "dirs", dirs, "files", files, sep="\n") with g.subgraph(name=cluster_name) as d: d.attr(**cluster_kwargs) d.attr(rank="source" if (files or dirs) else "sink") d.attr(label=os.path.basename(root)) for f in files: filepath = os.path.join(root, f) d.node(filepath, **node_kwargs(filepath)) if os.path.islink(filepath): # Follow the link and use the relpath wrt link as label. realp = os.path.realpath(filepath) realp = os.path.relpath(realp, filepath) #realp = os.path.relpath(realp, self.top) #print(filepath, realp) #g.node(realp, **node_kwargs(realp)) g.edge(filepath, realp, **edge_kwargs) for dirname in dirs: dirpath = os.path.join(root, dirname) #head, basename = os.path.split(dirpath) new_cluster_name = "cluster_%s" % dirpath #rank = "source" if os.listdir(dirpath) else "sink" #g.node(dirpath, rank=rank, **node_kwargs(dirpath)) #g.edge(dirpath, new_cluster_name, **edge_kwargs) #d.edge(cluster_name, new_cluster_name, minlen="2", **edge_kwargs) d.edge(cluster_name, new_cluster_name, **edge_kwargs) return g
['def', 'get_cluster_graph', '(', 'self', ',', 'engine', '=', '"fdp"', ',', 'graph_attr', '=', 'None', ',', 'node_attr', '=', 'None', ',', 'edge_attr', '=', 'None', ')', ':', '# https://www.graphviz.org/doc/info/', 'from', 'graphviz', 'import', 'Digraph', 'g', '=', 'Digraph', '(', '"directory"', ',', '#filename="flow_%s.gv" % os.path.basename(self.relworkdir),', 'engine', '=', 'engine', ')', '# if engine == "automatic" else engine)', '# Set graph attributes.', '#g.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir))', 'g', '.', 'attr', '(', 'label', '=', 'self', '.', 'top', ')', '#g.attr(fontcolor="white", bgcolor=\'purple:pink\')', '#g.attr(rankdir="LR", pagedir="BL")', '#g.attr(constraint="false", pack="true", packMode="clust")', 'g', '.', 'node_attr', '.', 'update', '(', 'color', '=', "'lightblue2'", ',', 'style', '=', "'filled'", ')', "#g.node_attr.update(ranksep='equally')", '# Add input attributes.', 'if', 'graph_attr', 'is', 'not', 'None', ':', 'fg', '.', 'graph_attr', '.', 'update', '(', '*', '*', 'graph_attr', ')', 'if', 'node_attr', 'is', 'not', 'None', ':', 'fg', '.', 'node_attr', '.', 'update', '(', '*', '*', 'node_attr', ')', 'if', 'edge_attr', 'is', 'not', 'None', ':', 'fg', '.', 'edge_attr', '.', 'update', '(', '*', '*', 'edge_attr', ')', 'def', 'node_kwargs', '(', 'path', ')', ':', 'return', 'dict', '(', '#shape="circle",', '#shape="none",', '#shape="plaintext",', '#shape="point",', 'shape', '=', '"record"', ',', '#color=node.color_hex,', 'fontsize', '=', '"8.0"', ',', 'label', '=', 'os', '.', 'path', '.', 'basename', '(', 'path', ')', ',', ')', 'edge_kwargs', '=', 'dict', '(', 'arrowType', '=', '"vee"', ',', 'style', '=', '"solid"', ',', 'minlen', '=', '"1"', ')', 'cluster_kwargs', '=', 'dict', '(', 'rankdir', '=', '"LR"', ',', 'pagedir', '=', '"BL"', ',', 'style', '=', '"rounded"', ',', 'bgcolor', '=', '"azure2"', ')', '# TODO: Write other method without clusters if not walk.', 'exclude_top_node', '=', 'False', 'for', 'root', ',', 'dirs', ',', 'files', 'in', 'os', '.', 'walk', '(', 'self', '.', 'top', ')', ':', 'if', 'exclude_top_node', 'and', 'root', '==', 'self', '.', 'top', ':', 'continue', 'cluster_name', '=', '"cluster_%s"', '%', 'root', '#print("root", root, cluster_name, "dirs", dirs, "files", files, sep="\\n")', 'with', 'g', '.', 'subgraph', '(', 'name', '=', 'cluster_name', ')', 'as', 'd', ':', 'd', '.', 'attr', '(', '*', '*', 'cluster_kwargs', ')', 'd', '.', 'attr', '(', 'rank', '=', '"source"', 'if', '(', 'files', 'or', 'dirs', ')', 'else', '"sink"', ')', 'd', '.', 'attr', '(', 'label', '=', 'os', '.', 'path', '.', 'basename', '(', 'root', ')', ')', 'for', 'f', 'in', 'files', ':', 'filepath', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'f', ')', 'd', '.', 'node', '(', 'filepath', ',', '*', '*', 'node_kwargs', '(', 'filepath', ')', ')', 'if', 'os', '.', 'path', '.', 'islink', '(', 'filepath', ')', ':', '# Follow the link and use the relpath wrt link as label.', 'realp', '=', 'os', '.', 'path', '.', 'realpath', '(', 'filepath', ')', 'realp', '=', 'os', '.', 'path', '.', 'relpath', '(', 'realp', ',', 'filepath', ')', '#realp = os.path.relpath(realp, self.top)', '#print(filepath, realp)', '#g.node(realp, **node_kwargs(realp))', 'g', '.', 'edge', '(', 'filepath', ',', 'realp', ',', '*', '*', 'edge_kwargs', ')', 'for', 'dirname', 'in', 'dirs', ':', 'dirpath', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'dirname', ')', '#head, basename = os.path.split(dirpath)', 'new_cluster_name', '=', '"cluster_%s"', '%', 'dirpath', '#rank = "source" if os.listdir(dirpath) else "sink"', '#g.node(dirpath, rank=rank, **node_kwargs(dirpath))', '#g.edge(dirpath, new_cluster_name, **edge_kwargs)', '#d.edge(cluster_name, new_cluster_name, minlen="2", **edge_kwargs)', 'd', '.', 'edge', '(', 'cluster_name', ',', 'new_cluster_name', ',', '*', '*', 'edge_kwargs', ')', 'return', 'g']
Generate directory graph in the DOT language. Directories are shown as clusters .. warning:: This function scans the entire directory tree starting from top so the resulting graph can be really big. Args: engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage'] graph_attr: Mapping of (attribute, value) pairs for the graph. node_attr: Mapping of (attribute, value) pairs set for all nodes. edge_attr: Mapping of (attribute, value) pairs set for all edges. Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
['Generate', 'directory', 'graph', 'in', 'the', 'DOT', 'language', '.', 'Directories', 'are', 'shown', 'as', 'clusters']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/utils.py#L902-L988
5,438
apache/incubator-mxnet
example/vae-gan/vaegan_mxnet.py
visual
def visual(title, X, activation): '''create a grid of images and save it as a final image title : grid image name X : array of images ''' assert len(X.shape) == 4 X = X.transpose((0, 2, 3, 1)) if activation == 'sigmoid': X = np.clip((X)*(255.0), 0, 255).astype(np.uint8) elif activation == 'tanh': X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8) n = np.ceil(np.sqrt(X.shape[0])) buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8) for i, img in enumerate(X): fill_buf(buff, i, img, X.shape[1:3]) cv2.imwrite('%s.jpg' % (title), buff)
python
def visual(title, X, activation): '''create a grid of images and save it as a final image title : grid image name X : array of images ''' assert len(X.shape) == 4 X = X.transpose((0, 2, 3, 1)) if activation == 'sigmoid': X = np.clip((X)*(255.0), 0, 255).astype(np.uint8) elif activation == 'tanh': X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8) n = np.ceil(np.sqrt(X.shape[0])) buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8) for i, img in enumerate(X): fill_buf(buff, i, img, X.shape[1:3]) cv2.imwrite('%s.jpg' % (title), buff)
['def', 'visual', '(', 'title', ',', 'X', ',', 'activation', ')', ':', 'assert', 'len', '(', 'X', '.', 'shape', ')', '==', '4', 'X', '=', 'X', '.', 'transpose', '(', '(', '0', ',', '2', ',', '3', ',', '1', ')', ')', 'if', 'activation', '==', "'sigmoid'", ':', 'X', '=', 'np', '.', 'clip', '(', '(', 'X', ')', '*', '(', '255.0', ')', ',', '0', ',', '255', ')', '.', 'astype', '(', 'np', '.', 'uint8', ')', 'elif', 'activation', '==', "'tanh'", ':', 'X', '=', 'np', '.', 'clip', '(', '(', 'X', '+', '1.0', ')', '*', '(', '255.0', '/', '2.0', ')', ',', '0', ',', '255', ')', '.', 'astype', '(', 'np', '.', 'uint8', ')', 'n', '=', 'np', '.', 'ceil', '(', 'np', '.', 'sqrt', '(', 'X', '.', 'shape', '[', '0', ']', ')', ')', 'buff', '=', 'np', '.', 'zeros', '(', '(', 'int', '(', 'n', '*', 'X', '.', 'shape', '[', '1', ']', ')', ',', 'int', '(', 'n', '*', 'X', '.', 'shape', '[', '2', ']', ')', ',', 'int', '(', 'X', '.', 'shape', '[', '3', ']', ')', ')', ',', 'dtype', '=', 'np', '.', 'uint8', ')', 'for', 'i', ',', 'img', 'in', 'enumerate', '(', 'X', ')', ':', 'fill_buf', '(', 'buff', ',', 'i', ',', 'img', ',', 'X', '.', 'shape', '[', '1', ':', '3', ']', ')', 'cv2', '.', 'imwrite', '(', "'%s.jpg'", '%', '(', 'title', ')', ',', 'buff', ')']
create a grid of images and save it as a final image title : grid image name X : array of images
['create', 'a', 'grid', 'of', 'images', 'and', 'save', 'it', 'as', 'a', 'final', 'image', 'title', ':', 'grid', 'image', 'name', 'X', ':', 'array', 'of', 'images']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L270-L286
5,439
tmux-python/libtmux
libtmux/window.py
Window.show_window_option
def show_window_option(self, option, g=False): """ Return a list of options for the window. todo: test and return True/False for on/off string Parameters ---------- option : str g : bool, optional Pass ``-g`` flag, global. Default False. Returns ------- str, int Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption` """ tmux_args = tuple() if g: tmux_args += ('-g',) tmux_args += (option,) cmd = self.cmd('show-window-options', *tmux_args) if len(cmd.stderr): handle_option_error(cmd.stderr[0]) if not len(cmd.stdout): return None option = [shlex.split(item) for item in cmd.stdout][0] if option[1].isdigit(): option = (option[0], int(option[1])) return option[1]
python
def show_window_option(self, option, g=False): """ Return a list of options for the window. todo: test and return True/False for on/off string Parameters ---------- option : str g : bool, optional Pass ``-g`` flag, global. Default False. Returns ------- str, int Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption` """ tmux_args = tuple() if g: tmux_args += ('-g',) tmux_args += (option,) cmd = self.cmd('show-window-options', *tmux_args) if len(cmd.stderr): handle_option_error(cmd.stderr[0]) if not len(cmd.stdout): return None option = [shlex.split(item) for item in cmd.stdout][0] if option[1].isdigit(): option = (option[0], int(option[1])) return option[1]
['def', 'show_window_option', '(', 'self', ',', 'option', ',', 'g', '=', 'False', ')', ':', 'tmux_args', '=', 'tuple', '(', ')', 'if', 'g', ':', 'tmux_args', '+=', '(', "'-g'", ',', ')', 'tmux_args', '+=', '(', 'option', ',', ')', 'cmd', '=', 'self', '.', 'cmd', '(', "'show-window-options'", ',', '*', 'tmux_args', ')', 'if', 'len', '(', 'cmd', '.', 'stderr', ')', ':', 'handle_option_error', '(', 'cmd', '.', 'stderr', '[', '0', ']', ')', 'if', 'not', 'len', '(', 'cmd', '.', 'stdout', ')', ':', 'return', 'None', 'option', '=', '[', 'shlex', '.', 'split', '(', 'item', ')', 'for', 'item', 'in', 'cmd', '.', 'stdout', ']', '[', '0', ']', 'if', 'option', '[', '1', ']', '.', 'isdigit', '(', ')', ':', 'option', '=', '(', 'option', '[', '0', ']', ',', 'int', '(', 'option', '[', '1', ']', ')', ')', 'return', 'option', '[', '1', ']']
Return a list of options for the window. todo: test and return True/False for on/off string Parameters ---------- option : str g : bool, optional Pass ``-g`` flag, global. Default False. Returns ------- str, int Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption`
['Return', 'a', 'list', 'of', 'options', 'for', 'the', 'window', '.']
train
https://github.com/tmux-python/libtmux/blob/8eb2f8bbea3a025c1567b1516653414dbc24e1fc/libtmux/window.py#L232-L274
5,440
UDST/orca
orca/server/server.py
column_definition
def column_definition(table_name, col_name): """ Get the source of a column function. If a column is a registered Series and not a function then all that is returned is {'type': 'series'}. If the column is a registered function then the JSON returned has keys "type", "filename", "lineno", "text", and "html". "text" is the raw text of the function, "html" has been marked up by Pygments. """ col_type = orca.get_table(table_name).column_type(col_name) if col_type != 'function': return jsonify(type=col_type) filename, lineno, source = \ orca.get_raw_column(table_name, col_name).func_source_data() html = highlight(source, PythonLexer(), HtmlFormatter()) return jsonify( type='function', filename=filename, lineno=lineno, text=source, html=html)
python
def column_definition(table_name, col_name): """ Get the source of a column function. If a column is a registered Series and not a function then all that is returned is {'type': 'series'}. If the column is a registered function then the JSON returned has keys "type", "filename", "lineno", "text", and "html". "text" is the raw text of the function, "html" has been marked up by Pygments. """ col_type = orca.get_table(table_name).column_type(col_name) if col_type != 'function': return jsonify(type=col_type) filename, lineno, source = \ orca.get_raw_column(table_name, col_name).func_source_data() html = highlight(source, PythonLexer(), HtmlFormatter()) return jsonify( type='function', filename=filename, lineno=lineno, text=source, html=html)
['def', 'column_definition', '(', 'table_name', ',', 'col_name', ')', ':', 'col_type', '=', 'orca', '.', 'get_table', '(', 'table_name', ')', '.', 'column_type', '(', 'col_name', ')', 'if', 'col_type', '!=', "'function'", ':', 'return', 'jsonify', '(', 'type', '=', 'col_type', ')', 'filename', ',', 'lineno', ',', 'source', '=', 'orca', '.', 'get_raw_column', '(', 'table_name', ',', 'col_name', ')', '.', 'func_source_data', '(', ')', 'html', '=', 'highlight', '(', 'source', ',', 'PythonLexer', '(', ')', ',', 'HtmlFormatter', '(', ')', ')', 'return', 'jsonify', '(', 'type', '=', "'function'", ',', 'filename', '=', 'filename', ',', 'lineno', '=', 'lineno', ',', 'text', '=', 'source', ',', 'html', '=', 'html', ')']
Get the source of a column function. If a column is a registered Series and not a function then all that is returned is {'type': 'series'}. If the column is a registered function then the JSON returned has keys "type", "filename", "lineno", "text", and "html". "text" is the raw text of the function, "html" has been marked up by Pygments.
['Get', 'the', 'source', 'of', 'a', 'column', 'function', '.']
train
https://github.com/UDST/orca/blob/07b34aeef13cc87c966b2e30cbe7e76cc9d3622c/orca/server/server.py#L290-L314
5,441
thombashi/SimpleSQLite
simplesqlite/sqlquery.py
SqlQuery.make_insert
def make_insert(cls, table, insert_tuple): """ [Deprecated] Make INSERT query. :param str table: Table name of executing the query. :param list/tuple insert_tuple: Insertion data. :return: Query of SQLite. :rtype: str :raises ValueError: If ``insert_tuple`` is empty |list|/|tuple|. :raises simplesqlite.NameValidationError: |raises_validate_table_name| """ validate_table_name(table) table = Table(table) if typepy.is_empty_sequence(insert_tuple): raise ValueError("empty insert list/tuple") return "INSERT INTO {:s} VALUES ({:s})".format( table, ",".join(["?" for _i in insert_tuple]) )
python
def make_insert(cls, table, insert_tuple): """ [Deprecated] Make INSERT query. :param str table: Table name of executing the query. :param list/tuple insert_tuple: Insertion data. :return: Query of SQLite. :rtype: str :raises ValueError: If ``insert_tuple`` is empty |list|/|tuple|. :raises simplesqlite.NameValidationError: |raises_validate_table_name| """ validate_table_name(table) table = Table(table) if typepy.is_empty_sequence(insert_tuple): raise ValueError("empty insert list/tuple") return "INSERT INTO {:s} VALUES ({:s})".format( table, ",".join(["?" for _i in insert_tuple]) )
['def', 'make_insert', '(', 'cls', ',', 'table', ',', 'insert_tuple', ')', ':', 'validate_table_name', '(', 'table', ')', 'table', '=', 'Table', '(', 'table', ')', 'if', 'typepy', '.', 'is_empty_sequence', '(', 'insert_tuple', ')', ':', 'raise', 'ValueError', '(', '"empty insert list/tuple"', ')', 'return', '"INSERT INTO {:s} VALUES ({:s})"', '.', 'format', '(', 'table', ',', '","', '.', 'join', '(', '[', '"?"', 'for', '_i', 'in', 'insert_tuple', ']', ')', ')']
[Deprecated] Make INSERT query. :param str table: Table name of executing the query. :param list/tuple insert_tuple: Insertion data. :return: Query of SQLite. :rtype: str :raises ValueError: If ``insert_tuple`` is empty |list|/|tuple|. :raises simplesqlite.NameValidationError: |raises_validate_table_name|
['[', 'Deprecated', ']', 'Make', 'INSERT', 'query', '.']
train
https://github.com/thombashi/SimpleSQLite/blob/b16f212132b9b98773e68bf7395abc2f60f56fe5/simplesqlite/sqlquery.py#L22-L44
5,442
materialsproject/pymatgen
pymatgen/io/babel.py
BabelMolAdaptor.from_string
def from_string(string_data, file_format="xyz"): """ Uses OpenBabel to read a molecule from a string in all supported formats. Args: string_data: String containing molecule data. file_format: String specifying any OpenBabel supported formats. Returns: BabelMolAdaptor object """ mols = pb.readstring(str(file_format), str(string_data)) return BabelMolAdaptor(mols.OBMol)
python
def from_string(string_data, file_format="xyz"): """ Uses OpenBabel to read a molecule from a string in all supported formats. Args: string_data: String containing molecule data. file_format: String specifying any OpenBabel supported formats. Returns: BabelMolAdaptor object """ mols = pb.readstring(str(file_format), str(string_data)) return BabelMolAdaptor(mols.OBMol)
['def', 'from_string', '(', 'string_data', ',', 'file_format', '=', '"xyz"', ')', ':', 'mols', '=', 'pb', '.', 'readstring', '(', 'str', '(', 'file_format', ')', ',', 'str', '(', 'string_data', ')', ')', 'return', 'BabelMolAdaptor', '(', 'mols', '.', 'OBMol', ')']
Uses OpenBabel to read a molecule from a string in all supported formats. Args: string_data: String containing molecule data. file_format: String specifying any OpenBabel supported formats. Returns: BabelMolAdaptor object
['Uses', 'OpenBabel', 'to', 'read', 'a', 'molecule', 'from', 'a', 'string', 'in', 'all', 'supported', 'formats', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/babel.py#L332-L345
5,443
mozilla/treeherder
treeherder/services/elasticsearch/helpers.py
search
def search(query, index=INDEX_NAME, doc_type=DOC_TYPE): """ Thin wrapper of the main query function to provide just the resulting objects """ results = raw_query(query, index=index, doc_type=doc_type) return [r['_source'] for r in results]
python
def search(query, index=INDEX_NAME, doc_type=DOC_TYPE): """ Thin wrapper of the main query function to provide just the resulting objects """ results = raw_query(query, index=index, doc_type=doc_type) return [r['_source'] for r in results]
['def', 'search', '(', 'query', ',', 'index', '=', 'INDEX_NAME', ',', 'doc_type', '=', 'DOC_TYPE', ')', ':', 'results', '=', 'raw_query', '(', 'query', ',', 'index', '=', 'index', ',', 'doc_type', '=', 'doc_type', ')', 'return', '[', 'r', '[', "'_source'", ']', 'for', 'r', 'in', 'results', ']']
Thin wrapper of the main query function to provide just the resulting objects
['Thin', 'wrapper', 'of', 'the', 'main', 'query', 'function', 'to', 'provide', 'just', 'the', 'resulting', 'objects']
train
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/services/elasticsearch/helpers.py#L147-L152
5,444
PlaidWeb/Publ
publ/queries.py
where_entry_tag
def where_entry_tag(query, tag): """ Generate a where clause for entries with the given tag """ if isinstance(tag, (list, set, tuple)): tags = [t.lower() for t in tag] return orm.select(e for e in query for t in e.tags if t.key in tags) return orm.select(e for e in query for t in e.tags if t.key == tag.lower())
python
def where_entry_tag(query, tag): """ Generate a where clause for entries with the given tag """ if isinstance(tag, (list, set, tuple)): tags = [t.lower() for t in tag] return orm.select(e for e in query for t in e.tags if t.key in tags) return orm.select(e for e in query for t in e.tags if t.key == tag.lower())
['def', 'where_entry_tag', '(', 'query', ',', 'tag', ')', ':', 'if', 'isinstance', '(', 'tag', ',', '(', 'list', ',', 'set', ',', 'tuple', ')', ')', ':', 'tags', '=', '[', 't', '.', 'lower', '(', ')', 'for', 't', 'in', 'tag', ']', 'return', 'orm', '.', 'select', '(', 'e', 'for', 'e', 'in', 'query', 'for', 't', 'in', 'e', '.', 'tags', 'if', 't', '.', 'key', 'in', 'tags', ')', 'return', 'orm', '.', 'select', '(', 'e', 'for', 'e', 'in', 'query', 'for', 't', 'in', 'e', '.', 'tags', 'if', 't', '.', 'key', '==', 'tag', '.', 'lower', '(', ')', ')']
Generate a where clause for entries with the given tag
['Generate', 'a', 'where', 'clause', 'for', 'entries', 'with', 'the', 'given', 'tag']
train
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/queries.py#L137-L142
5,445
SBRG/ssbio
ssbio/databases/uniprot.py
uniprot_ec
def uniprot_ec(uniprot_id): """Retrieve the EC number annotation for a UniProt ID. Args: uniprot_id: Valid UniProt ID Returns: """ r = requests.post('http://www.uniprot.org/uniprot/?query=%s&columns=ec&format=tab' % uniprot_id) ec = r.content.decode('utf-8').splitlines()[1] if len(ec) == 0: ec = None return ec
python
def uniprot_ec(uniprot_id): """Retrieve the EC number annotation for a UniProt ID. Args: uniprot_id: Valid UniProt ID Returns: """ r = requests.post('http://www.uniprot.org/uniprot/?query=%s&columns=ec&format=tab' % uniprot_id) ec = r.content.decode('utf-8').splitlines()[1] if len(ec) == 0: ec = None return ec
['def', 'uniprot_ec', '(', 'uniprot_id', ')', ':', 'r', '=', 'requests', '.', 'post', '(', "'http://www.uniprot.org/uniprot/?query=%s&columns=ec&format=tab'", '%', 'uniprot_id', ')', 'ec', '=', 'r', '.', 'content', '.', 'decode', '(', "'utf-8'", ')', '.', 'splitlines', '(', ')', '[', '1', ']', 'if', 'len', '(', 'ec', ')', '==', '0', ':', 'ec', '=', 'None', 'return', 'ec']
Retrieve the EC number annotation for a UniProt ID. Args: uniprot_id: Valid UniProt ID Returns:
['Retrieve', 'the', 'EC', 'number', 'annotation', 'for', 'a', 'UniProt', 'ID', '.']
train
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L409-L424
5,446
dotzero/tilda-api-python
tilda/client.py
Client.get_page_full
def get_page_full(self, page_id): """ Get full page info and full html code """ try: result = self._request('/getpagefull/', {'pageid': page_id}) return TildaPage(**result) except NetworkError: return []
python
def get_page_full(self, page_id): """ Get full page info and full html code """ try: result = self._request('/getpagefull/', {'pageid': page_id}) return TildaPage(**result) except NetworkError: return []
['def', 'get_page_full', '(', 'self', ',', 'page_id', ')', ':', 'try', ':', 'result', '=', 'self', '.', '_request', '(', "'/getpagefull/'", ',', '{', "'pageid'", ':', 'page_id', '}', ')', 'return', 'TildaPage', '(', '*', '*', 'result', ')', 'except', 'NetworkError', ':', 'return', '[', ']']
Get full page info and full html code
['Get', 'full', 'page', 'info', 'and', 'full', 'html', 'code']
train
https://github.com/dotzero/tilda-api-python/blob/0ab984e0236cbfb676b0fbddc1ab37202d92e0a8/tilda/client.py#L119-L126
5,447
T-002/pycast
pycast/errors/medianabsolutepercentageerror.py
MedianAbsolutePercentageError._calculate
def _calculate(self, startingPercentage, endPercentage, startDate, endDate): """This is the error calculation function that gets called by :py:meth:`BaseErrorMeasure.get_error`. Both parameters will be correct at this time. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :param float startDate: Epoch representing the start date used for error calculation. :param float endDate: Epoch representing the end date used in the error calculation. :return: Returns a float representing the error. :rtype: float """ # get the defined subset of error values errorValues = self._get_error_values(startingPercentage, endPercentage, startDate, endDate) errorValues = filter(lambda item: item is not None, errorValues) return sorted(errorValues)[len(errorValues)//2]
python
def _calculate(self, startingPercentage, endPercentage, startDate, endDate): """This is the error calculation function that gets called by :py:meth:`BaseErrorMeasure.get_error`. Both parameters will be correct at this time. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :param float startDate: Epoch representing the start date used for error calculation. :param float endDate: Epoch representing the end date used in the error calculation. :return: Returns a float representing the error. :rtype: float """ # get the defined subset of error values errorValues = self._get_error_values(startingPercentage, endPercentage, startDate, endDate) errorValues = filter(lambda item: item is not None, errorValues) return sorted(errorValues)[len(errorValues)//2]
['def', '_calculate', '(', 'self', ',', 'startingPercentage', ',', 'endPercentage', ',', 'startDate', ',', 'endDate', ')', ':', '# get the defined subset of error values', 'errorValues', '=', 'self', '.', '_get_error_values', '(', 'startingPercentage', ',', 'endPercentage', ',', 'startDate', ',', 'endDate', ')', 'errorValues', '=', 'filter', '(', 'lambda', 'item', ':', 'item', 'is', 'not', 'None', ',', 'errorValues', ')', 'return', 'sorted', '(', 'errorValues', ')', '[', 'len', '(', 'errorValues', ')', '//', '2', ']']
This is the error calculation function that gets called by :py:meth:`BaseErrorMeasure.get_error`. Both parameters will be correct at this time. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :param float startDate: Epoch representing the start date used for error calculation. :param float endDate: Epoch representing the end date used in the error calculation. :return: Returns a float representing the error. :rtype: float
['This', 'is', 'the', 'error', 'calculation', 'function', 'that', 'gets', 'called', 'by', ':', 'py', ':', 'meth', ':', 'BaseErrorMeasure', '.', 'get_error', '.']
train
https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/errors/medianabsolutepercentageerror.py#L31-L52
5,448
xolox/python-vcs-repo-mgr
vcs_repo_mgr/__init__.py
Repository.ensure_hexadecimal_string
def ensure_hexadecimal_string(self, value, command=None): """ Make sure the given value is a hexadecimal string. :param value: The value to check (a string). :param command: The command that produced the value (a string or :data:`None`). :returns: The validated hexadecimal string. :raises: :exc:`~exceptions.ValueError` when `value` is not a hexadecimal string. """ if not HEX_PATTERN.match(value): msg = "Expected a hexadecimal string, got '%s' instead!" if command: msg += " ('%s' gave unexpected output)" msg %= (value, command) else: msg %= value raise ValueError(msg) return value
python
def ensure_hexadecimal_string(self, value, command=None): """ Make sure the given value is a hexadecimal string. :param value: The value to check (a string). :param command: The command that produced the value (a string or :data:`None`). :returns: The validated hexadecimal string. :raises: :exc:`~exceptions.ValueError` when `value` is not a hexadecimal string. """ if not HEX_PATTERN.match(value): msg = "Expected a hexadecimal string, got '%s' instead!" if command: msg += " ('%s' gave unexpected output)" msg %= (value, command) else: msg %= value raise ValueError(msg) return value
['def', 'ensure_hexadecimal_string', '(', 'self', ',', 'value', ',', 'command', '=', 'None', ')', ':', 'if', 'not', 'HEX_PATTERN', '.', 'match', '(', 'value', ')', ':', 'msg', '=', '"Expected a hexadecimal string, got \'%s\' instead!"', 'if', 'command', ':', 'msg', '+=', '" (\'%s\' gave unexpected output)"', 'msg', '%=', '(', 'value', ',', 'command', ')', 'else', ':', 'msg', '%=', 'value', 'raise', 'ValueError', '(', 'msg', ')', 'return', 'value']
Make sure the given value is a hexadecimal string. :param value: The value to check (a string). :param command: The command that produced the value (a string or :data:`None`). :returns: The validated hexadecimal string. :raises: :exc:`~exceptions.ValueError` when `value` is not a hexadecimal string.
['Make', 'sure', 'the', 'given', 'value', 'is', 'a', 'hexadecimal', 'string', '.']
train
https://github.com/xolox/python-vcs-repo-mgr/blob/fdad2441a3e7ba5deeeddfa1c2f5ebc00c393aed/vcs_repo_mgr/__init__.py#L1224-L1241
5,449
bcbnz/python-rofi
rofi.py
Rofi.integer_entry
def integer_entry(self, prompt, message=None, min=None, max=None, rofi_args=None, **kwargs): """Prompt the user to enter an integer. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: integer, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- integer, or None if the dialog is cancelled. """ # Sanity check. if (min is not None) and (max is not None) and not (max > min): raise ValueError("Maximum limit has to be more than the minimum limit.") def integer_validator(text): error = None # Attempt to convert to integer. try: value = int(text) except ValueError: return None, "Please enter an integer value." # Check its within limits. if (min is not None) and (value < min): return None, "The minimum allowable value is {0:d}.".format(min) if (max is not None) and (value > max): return None, "The maximum allowable value is {0:d}.".format(max) return value, None return self.generic_entry(prompt, integer_validator, message, rofi_args, **kwargs)
python
def integer_entry(self, prompt, message=None, min=None, max=None, rofi_args=None, **kwargs): """Prompt the user to enter an integer. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: integer, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- integer, or None if the dialog is cancelled. """ # Sanity check. if (min is not None) and (max is not None) and not (max > min): raise ValueError("Maximum limit has to be more than the minimum limit.") def integer_validator(text): error = None # Attempt to convert to integer. try: value = int(text) except ValueError: return None, "Please enter an integer value." # Check its within limits. if (min is not None) and (value < min): return None, "The minimum allowable value is {0:d}.".format(min) if (max is not None) and (value > max): return None, "The maximum allowable value is {0:d}.".format(max) return value, None return self.generic_entry(prompt, integer_validator, message, rofi_args, **kwargs)
['def', 'integer_entry', '(', 'self', ',', 'prompt', ',', 'message', '=', 'None', ',', 'min', '=', 'None', ',', 'max', '=', 'None', ',', 'rofi_args', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', '# Sanity check.', 'if', '(', 'min', 'is', 'not', 'None', ')', 'and', '(', 'max', 'is', 'not', 'None', ')', 'and', 'not', '(', 'max', '>', 'min', ')', ':', 'raise', 'ValueError', '(', '"Maximum limit has to be more than the minimum limit."', ')', 'def', 'integer_validator', '(', 'text', ')', ':', 'error', '=', 'None', '# Attempt to convert to integer.', 'try', ':', 'value', '=', 'int', '(', 'text', ')', 'except', 'ValueError', ':', 'return', 'None', ',', '"Please enter an integer value."', '# Check its within limits.', 'if', '(', 'min', 'is', 'not', 'None', ')', 'and', '(', 'value', '<', 'min', ')', ':', 'return', 'None', ',', '"The minimum allowable value is {0:d}."', '.', 'format', '(', 'min', ')', 'if', '(', 'max', 'is', 'not', 'None', ')', 'and', '(', 'value', '>', 'max', ')', ':', 'return', 'None', ',', '"The maximum allowable value is {0:d}."', '.', 'format', '(', 'max', ')', 'return', 'value', ',', 'None', 'return', 'self', '.', 'generic_entry', '(', 'prompt', ',', 'integer_validator', ',', 'message', ',', 'rofi_args', ',', '*', '*', 'kwargs', ')']
Prompt the user to enter an integer. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: integer, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- integer, or None if the dialog is cancelled.
['Prompt', 'the', 'user', 'to', 'enter', 'an', 'integer', '.']
train
https://github.com/bcbnz/python-rofi/blob/d20b3a2ba4ba1b294b002f25a8fb526c5115d0d4/rofi.py#L569-L607
5,450
n1analytics/python-paillier
phe/util.py
powmod
def powmod(a, b, c): """ Uses GMP, if available, to do a^b mod c where a, b, c are integers. :return int: (a ** b) % c """ if a == 1: return 1 if not HAVE_GMP or max(a, b, c) < _USE_MOD_FROM_GMP_SIZE: return pow(a, b, c) else: return int(gmpy2.powmod(a, b, c))
python
def powmod(a, b, c): """ Uses GMP, if available, to do a^b mod c where a, b, c are integers. :return int: (a ** b) % c """ if a == 1: return 1 if not HAVE_GMP or max(a, b, c) < _USE_MOD_FROM_GMP_SIZE: return pow(a, b, c) else: return int(gmpy2.powmod(a, b, c))
['def', 'powmod', '(', 'a', ',', 'b', ',', 'c', ')', ':', 'if', 'a', '==', '1', ':', 'return', '1', 'if', 'not', 'HAVE_GMP', 'or', 'max', '(', 'a', ',', 'b', ',', 'c', ')', '<', '_USE_MOD_FROM_GMP_SIZE', ':', 'return', 'pow', '(', 'a', ',', 'b', ',', 'c', ')', 'else', ':', 'return', 'int', '(', 'gmpy2', '.', 'powmod', '(', 'a', ',', 'b', ',', 'c', ')', ')']
Uses GMP, if available, to do a^b mod c where a, b, c are integers. :return int: (a ** b) % c
['Uses', 'GMP', 'if', 'available', 'to', 'do', 'a^b', 'mod', 'c', 'where', 'a', 'b', 'c', 'are', 'integers', '.']
train
https://github.com/n1analytics/python-paillier/blob/955f8c0bfa9623be15b75462b121d28acf70f04b/phe/util.py#L38-L50
5,451
vertexproject/synapse
synapse/lib/reflect.py
getClsNames
def getClsNames(item): ''' Return a list of "fully qualified" class names for an instance. Example: for name in getClsNames(foo): print(name) ''' mro = inspect.getmro(item.__class__) mro = [c for c in mro if c not in clsskip] return ['%s.%s' % (c.__module__, c.__name__) for c in mro]
python
def getClsNames(item): ''' Return a list of "fully qualified" class names for an instance. Example: for name in getClsNames(foo): print(name) ''' mro = inspect.getmro(item.__class__) mro = [c for c in mro if c not in clsskip] return ['%s.%s' % (c.__module__, c.__name__) for c in mro]
['def', 'getClsNames', '(', 'item', ')', ':', 'mro', '=', 'inspect', '.', 'getmro', '(', 'item', '.', '__class__', ')', 'mro', '=', '[', 'c', 'for', 'c', 'in', 'mro', 'if', 'c', 'not', 'in', 'clsskip', ']', 'return', '[', "'%s.%s'", '%', '(', 'c', '.', '__module__', ',', 'c', '.', '__name__', ')', 'for', 'c', 'in', 'mro', ']']
Return a list of "fully qualified" class names for an instance. Example: for name in getClsNames(foo): print(name)
['Return', 'a', 'list', 'of', 'fully', 'qualified', 'class', 'names', 'for', 'an', 'instance', '.']
train
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/reflect.py#L11-L23
5,452
sdispater/orator
orator/query/builder.py
QueryBuilder.chunk
def chunk(self, count): """ Chunk the results of the query :param count: The chunk size :type count: int :return: The current chunk :rtype: list """ for chunk in self._connection.select_many( count, self.to_sql(), self.get_bindings(), not self._use_write_connection ): yield chunk
python
def chunk(self, count): """ Chunk the results of the query :param count: The chunk size :type count: int :return: The current chunk :rtype: list """ for chunk in self._connection.select_many( count, self.to_sql(), self.get_bindings(), not self._use_write_connection ): yield chunk
['def', 'chunk', '(', 'self', ',', 'count', ')', ':', 'for', 'chunk', 'in', 'self', '.', '_connection', '.', 'select_many', '(', 'count', ',', 'self', '.', 'to_sql', '(', ')', ',', 'self', '.', 'get_bindings', '(', ')', ',', 'not', 'self', '.', '_use_write_connection', ')', ':', 'yield', 'chunk']
Chunk the results of the query :param count: The chunk size :type count: int :return: The current chunk :rtype: list
['Chunk', 'the', 'results', 'of', 'the', 'query']
train
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/query/builder.py#L1146-L1159
5,453
Azure/azure-cosmos-python
azure/cosmos/resource_throttle_retry_policy.py
_ResourceThrottleRetryPolicy.ShouldRetry
def ShouldRetry(self, exception): """Returns true if should retry based on the passed-in exception. :param (errors.HTTPFailure instance) exception: :rtype: boolean """ if self.current_retry_attempt_count < self._max_retry_attempt_count: self.current_retry_attempt_count += 1 self.retry_after_in_milliseconds = 0 if self._fixed_retry_interval_in_milliseconds: self.retry_after_in_milliseconds = self._fixed_retry_interval_in_milliseconds elif http_constants.HttpHeaders.RetryAfterInMilliseconds in exception.headers: self.retry_after_in_milliseconds = int(exception.headers[http_constants.HttpHeaders.RetryAfterInMilliseconds]) if self.cummulative_wait_time_in_milliseconds < self._max_wait_time_in_milliseconds: self.cummulative_wait_time_in_milliseconds += self.retry_after_in_milliseconds return True return False
python
def ShouldRetry(self, exception): """Returns true if should retry based on the passed-in exception. :param (errors.HTTPFailure instance) exception: :rtype: boolean """ if self.current_retry_attempt_count < self._max_retry_attempt_count: self.current_retry_attempt_count += 1 self.retry_after_in_milliseconds = 0 if self._fixed_retry_interval_in_milliseconds: self.retry_after_in_milliseconds = self._fixed_retry_interval_in_milliseconds elif http_constants.HttpHeaders.RetryAfterInMilliseconds in exception.headers: self.retry_after_in_milliseconds = int(exception.headers[http_constants.HttpHeaders.RetryAfterInMilliseconds]) if self.cummulative_wait_time_in_milliseconds < self._max_wait_time_in_milliseconds: self.cummulative_wait_time_in_milliseconds += self.retry_after_in_milliseconds return True return False
['def', 'ShouldRetry', '(', 'self', ',', 'exception', ')', ':', 'if', 'self', '.', 'current_retry_attempt_count', '<', 'self', '.', '_max_retry_attempt_count', ':', 'self', '.', 'current_retry_attempt_count', '+=', '1', 'self', '.', 'retry_after_in_milliseconds', '=', '0', 'if', 'self', '.', '_fixed_retry_interval_in_milliseconds', ':', 'self', '.', 'retry_after_in_milliseconds', '=', 'self', '.', '_fixed_retry_interval_in_milliseconds', 'elif', 'http_constants', '.', 'HttpHeaders', '.', 'RetryAfterInMilliseconds', 'in', 'exception', '.', 'headers', ':', 'self', '.', 'retry_after_in_milliseconds', '=', 'int', '(', 'exception', '.', 'headers', '[', 'http_constants', '.', 'HttpHeaders', '.', 'RetryAfterInMilliseconds', ']', ')', 'if', 'self', '.', 'cummulative_wait_time_in_milliseconds', '<', 'self', '.', '_max_wait_time_in_milliseconds', ':', 'self', '.', 'cummulative_wait_time_in_milliseconds', '+=', 'self', '.', 'retry_after_in_milliseconds', 'return', 'True', 'return', 'False']
Returns true if should retry based on the passed-in exception. :param (errors.HTTPFailure instance) exception: :rtype: boolean
['Returns', 'true', 'if', 'should', 'retry', 'based', 'on', 'the', 'passed', '-', 'in', 'exception', '.']
train
https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/resource_throttle_retry_policy.py#L36-L58
5,454
mediaburst/clockwork-python
clockwork/clockwork.py
API.get_balance
def get_balance(self): """Check the balance fot this account. Returns a dictionary containing: account_type: The account type balance: The balance remaining on the account currency: The currency used for the account balance. Assume GBP in not set""" xml_root = self.__init_xml('Balance') response = clockwork_http.request(BALANCE_URL, etree.tostring(xml_root, encoding='utf-8')) data_etree = etree.fromstring(response['data']) err_desc = data_etree.find('ErrDesc') if err_desc is not None: raise clockwork_exceptions.ApiException(err_desc.text, data_etree.find('ErrNo').text) result = {} result['account_type'] = data_etree.find('AccountType').text result['balance'] = data_etree.find('Balance').text result['currency'] = data_etree.find('Currency').text return result
python
def get_balance(self): """Check the balance fot this account. Returns a dictionary containing: account_type: The account type balance: The balance remaining on the account currency: The currency used for the account balance. Assume GBP in not set""" xml_root = self.__init_xml('Balance') response = clockwork_http.request(BALANCE_URL, etree.tostring(xml_root, encoding='utf-8')) data_etree = etree.fromstring(response['data']) err_desc = data_etree.find('ErrDesc') if err_desc is not None: raise clockwork_exceptions.ApiException(err_desc.text, data_etree.find('ErrNo').text) result = {} result['account_type'] = data_etree.find('AccountType').text result['balance'] = data_etree.find('Balance').text result['currency'] = data_etree.find('Currency').text return result
['def', 'get_balance', '(', 'self', ')', ':', 'xml_root', '=', 'self', '.', '__init_xml', '(', "'Balance'", ')', 'response', '=', 'clockwork_http', '.', 'request', '(', 'BALANCE_URL', ',', 'etree', '.', 'tostring', '(', 'xml_root', ',', 'encoding', '=', "'utf-8'", ')', ')', 'data_etree', '=', 'etree', '.', 'fromstring', '(', 'response', '[', "'data'", ']', ')', 'err_desc', '=', 'data_etree', '.', 'find', '(', "'ErrDesc'", ')', 'if', 'err_desc', 'is', 'not', 'None', ':', 'raise', 'clockwork_exceptions', '.', 'ApiException', '(', 'err_desc', '.', 'text', ',', 'data_etree', '.', 'find', '(', "'ErrNo'", ')', '.', 'text', ')', 'result', '=', '{', '}', 'result', '[', "'account_type'", ']', '=', 'data_etree', '.', 'find', '(', "'AccountType'", ')', '.', 'text', 'result', '[', "'balance'", ']', '=', 'data_etree', '.', 'find', '(', "'Balance'", ')', '.', 'text', 'result', '[', "'currency'", ']', '=', 'data_etree', '.', 'find', '(', "'Currency'", ')', '.', 'text', 'return', 'result']
Check the balance fot this account. Returns a dictionary containing: account_type: The account type balance: The balance remaining on the account currency: The currency used for the account balance. Assume GBP in not set
['Check', 'the', 'balance', 'fot', 'this', 'account', '.', 'Returns', 'a', 'dictionary', 'containing', ':', 'account_type', ':', 'The', 'account', 'type', 'balance', ':', 'The', 'balance', 'remaining', 'on', 'the', 'account', 'currency', ':', 'The', 'currency', 'used', 'for', 'the', 'account', 'balance', '.', 'Assume', 'GBP', 'in', 'not', 'set']
train
https://github.com/mediaburst/clockwork-python/blob/7f8368bbed1fcb5218584fbc5094d93c6aa365d1/clockwork/clockwork.py#L47-L67
5,455
cloudtools/stacker
stacker/blueprints/raw.py
get_template_path
def get_template_path(filename): """Find raw template in working directory or in sys.path. template_path from config may refer to templates colocated with the Stacker config, or files in remote package_sources. Here, we emulate python module loading to find the path to the template. Args: filename (str): Template filename. Returns: Optional[str]: Path to file, or None if no file found """ if os.path.isfile(filename): return os.path.abspath(filename) for i in sys.path: if os.path.isfile(os.path.join(i, filename)): return os.path.abspath(os.path.join(i, filename)) return None
python
def get_template_path(filename): """Find raw template in working directory or in sys.path. template_path from config may refer to templates colocated with the Stacker config, or files in remote package_sources. Here, we emulate python module loading to find the path to the template. Args: filename (str): Template filename. Returns: Optional[str]: Path to file, or None if no file found """ if os.path.isfile(filename): return os.path.abspath(filename) for i in sys.path: if os.path.isfile(os.path.join(i, filename)): return os.path.abspath(os.path.join(i, filename)) return None
['def', 'get_template_path', '(', 'filename', ')', ':', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'filename', ')', ':', 'return', 'os', '.', 'path', '.', 'abspath', '(', 'filename', ')', 'for', 'i', 'in', 'sys', '.', 'path', ':', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'os', '.', 'path', '.', 'join', '(', 'i', ',', 'filename', ')', ')', ':', 'return', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'join', '(', 'i', ',', 'filename', ')', ')', 'return', 'None']
Find raw template in working directory or in sys.path. template_path from config may refer to templates colocated with the Stacker config, or files in remote package_sources. Here, we emulate python module loading to find the path to the template. Args: filename (str): Template filename. Returns: Optional[str]: Path to file, or None if no file found
['Find', 'raw', 'template', 'in', 'working', 'directory', 'or', 'in', 'sys', '.', 'path', '.']
train
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/blueprints/raw.py#L18-L38
5,456
Microsoft/nni
tools/nni_cmd/updater.py
import_data
def import_data(args): '''import additional data to the experiment''' validate_file(args.filename) validate_dispatcher(args) content = load_search_space(args.filename) args.port = get_experiment_port(args) if args.port is not None: if import_data_to_restful_server(args, content): pass else: print_error('Import data failed!')
python
def import_data(args): '''import additional data to the experiment''' validate_file(args.filename) validate_dispatcher(args) content = load_search_space(args.filename) args.port = get_experiment_port(args) if args.port is not None: if import_data_to_restful_server(args, content): pass else: print_error('Import data failed!')
['def', 'import_data', '(', 'args', ')', ':', 'validate_file', '(', 'args', '.', 'filename', ')', 'validate_dispatcher', '(', 'args', ')', 'content', '=', 'load_search_space', '(', 'args', '.', 'filename', ')', 'args', '.', 'port', '=', 'get_experiment_port', '(', 'args', ')', 'if', 'args', '.', 'port', 'is', 'not', 'None', ':', 'if', 'import_data_to_restful_server', '(', 'args', ',', 'content', ')', ':', 'pass', 'else', ':', 'print_error', '(', "'Import data failed!'", ')']
import additional data to the experiment
['import', 'additional', 'data', 'to', 'the', 'experiment']
train
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_cmd/updater.py#L131-L141
5,457
galaxy-genome-annotation/python-apollo
arrow/commands/annotations/set_symbol.py
cli
def cli(ctx, feature_id, symbol, organism="", sequence=""): """Set a feature's description Output: A standard apollo feature dictionary ({"features": [{...}]}) """ return ctx.gi.annotations.set_symbol(feature_id, symbol, organism=organism, sequence=sequence)
python
def cli(ctx, feature_id, symbol, organism="", sequence=""): """Set a feature's description Output: A standard apollo feature dictionary ({"features": [{...}]}) """ return ctx.gi.annotations.set_symbol(feature_id, symbol, organism=organism, sequence=sequence)
['def', 'cli', '(', 'ctx', ',', 'feature_id', ',', 'symbol', ',', 'organism', '=', '""', ',', 'sequence', '=', '""', ')', ':', 'return', 'ctx', '.', 'gi', '.', 'annotations', '.', 'set_symbol', '(', 'feature_id', ',', 'symbol', ',', 'organism', '=', 'organism', ',', 'sequence', '=', 'sequence', ')']
Set a feature's description Output: A standard apollo feature dictionary ({"features": [{...}]})
['Set', 'a', 'feature', 's', 'description']
train
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/arrow/commands/annotations/set_symbol.py#L22-L29
5,458
mollie/mollie-api-python
mollie/api/objects/payment.py
Payment.order
def order(self): """Return the order for this payment. """ from ..resources.orders import Order url = self._get_link('order') if url: resp = self.client.orders.perform_api_call(self.client.orders.REST_READ, url) return Order(resp, self.client)
python
def order(self): """Return the order for this payment. """ from ..resources.orders import Order url = self._get_link('order') if url: resp = self.client.orders.perform_api_call(self.client.orders.REST_READ, url) return Order(resp, self.client)
['def', 'order', '(', 'self', ')', ':', 'from', '.', '.', 'resources', '.', 'orders', 'import', 'Order', 'url', '=', 'self', '.', '_get_link', '(', "'order'", ')', 'if', 'url', ':', 'resp', '=', 'self', '.', 'client', '.', 'orders', '.', 'perform_api_call', '(', 'self', '.', 'client', '.', 'orders', '.', 'REST_READ', ',', 'url', ')', 'return', 'Order', '(', 'resp', ',', 'self', '.', 'client', ')']
Return the order for this payment.
['Return', 'the', 'order', 'for', 'this', 'payment', '.']
train
https://github.com/mollie/mollie-api-python/blob/307836b70f0439c066718f1e375fa333dc6e5d77/mollie/api/objects/payment.py#L193-L199
5,459
AtteqCom/zsl
src/zsl/resource/json_server_resource.py
JsonServerResource._create_filter_by
def _create_filter_by(self): """Transform the json-server filter arguments to model-resource ones.""" filter_by = [] for name, values in request.args.copy().lists(): # copy.lists works in py2 and py3 if name not in _SKIPPED_ARGUMENTS: column = _re_column_name.search(name).group(1) if column not in self._model_columns: continue for value in values: if name.endswith('_ne'): filter_by.append(name[:-3] + '!=' + value) elif name.endswith('_lte'): filter_by.append(name[:-4] + '<=' + value) elif name.endswith('_gte'): filter_by.append(name[:-4] + '>=' + value) elif name.endswith('_like'): filter_by.append(name[:-5] + '::like::%' + value + '%') else: filter_by.append(name.replace('__', '.') + '==' + value) filter_by += self._create_fulltext_query() return ','.join(filter_by)
python
def _create_filter_by(self): """Transform the json-server filter arguments to model-resource ones.""" filter_by = [] for name, values in request.args.copy().lists(): # copy.lists works in py2 and py3 if name not in _SKIPPED_ARGUMENTS: column = _re_column_name.search(name).group(1) if column not in self._model_columns: continue for value in values: if name.endswith('_ne'): filter_by.append(name[:-3] + '!=' + value) elif name.endswith('_lte'): filter_by.append(name[:-4] + '<=' + value) elif name.endswith('_gte'): filter_by.append(name[:-4] + '>=' + value) elif name.endswith('_like'): filter_by.append(name[:-5] + '::like::%' + value + '%') else: filter_by.append(name.replace('__', '.') + '==' + value) filter_by += self._create_fulltext_query() return ','.join(filter_by)
['def', '_create_filter_by', '(', 'self', ')', ':', 'filter_by', '=', '[', ']', 'for', 'name', ',', 'values', 'in', 'request', '.', 'args', '.', 'copy', '(', ')', '.', 'lists', '(', ')', ':', '# copy.lists works in py2 and py3', 'if', 'name', 'not', 'in', '_SKIPPED_ARGUMENTS', ':', 'column', '=', '_re_column_name', '.', 'search', '(', 'name', ')', '.', 'group', '(', '1', ')', 'if', 'column', 'not', 'in', 'self', '.', '_model_columns', ':', 'continue', 'for', 'value', 'in', 'values', ':', 'if', 'name', '.', 'endswith', '(', "'_ne'", ')', ':', 'filter_by', '.', 'append', '(', 'name', '[', ':', '-', '3', ']', '+', "'!='", '+', 'value', ')', 'elif', 'name', '.', 'endswith', '(', "'_lte'", ')', ':', 'filter_by', '.', 'append', '(', 'name', '[', ':', '-', '4', ']', '+', "'<='", '+', 'value', ')', 'elif', 'name', '.', 'endswith', '(', "'_gte'", ')', ':', 'filter_by', '.', 'append', '(', 'name', '[', ':', '-', '4', ']', '+', "'>='", '+', 'value', ')', 'elif', 'name', '.', 'endswith', '(', "'_like'", ')', ':', 'filter_by', '.', 'append', '(', 'name', '[', ':', '-', '5', ']', '+', "'::like::%'", '+', 'value', '+', "'%'", ')', 'else', ':', 'filter_by', '.', 'append', '(', 'name', '.', 'replace', '(', "'__'", ',', "'.'", ')', '+', "'=='", '+', 'value', ')', 'filter_by', '+=', 'self', '.', '_create_fulltext_query', '(', ')', 'return', "','", '.', 'join', '(', 'filter_by', ')']
Transform the json-server filter arguments to model-resource ones.
['Transform', 'the', 'json', '-', 'server', 'filter', 'arguments', 'to', 'model', '-', 'resource', 'ones', '.']
train
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/resource/json_server_resource.py#L95-L120
5,460
NickMonzillo/SmartCloud
SmartCloud/__init__.py
Cloud.collides
def collides(self,position,size): '''Returns True if the word collides with another plotted word.''' word_rect = pygame.Rect(position,self.word_size) if word_rect.collidelistall(self.used_pos) == []: return False else: return True
python
def collides(self,position,size): '''Returns True if the word collides with another plotted word.''' word_rect = pygame.Rect(position,self.word_size) if word_rect.collidelistall(self.used_pos) == []: return False else: return True
['def', 'collides', '(', 'self', ',', 'position', ',', 'size', ')', ':', 'word_rect', '=', 'pygame', '.', 'Rect', '(', 'position', ',', 'self', '.', 'word_size', ')', 'if', 'word_rect', '.', 'collidelistall', '(', 'self', '.', 'used_pos', ')', '==', '[', ']', ':', 'return', 'False', 'else', ':', 'return', 'True']
Returns True if the word collides with another plotted word.
['Returns', 'True', 'if', 'the', 'word', 'collides', 'with', 'another', 'plotted', 'word', '.']
train
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/__init__.py#L29-L35
5,461
jkenlooper/chill
src/chill/public.py
PageView.post
def post(self, uri=''): "For sql queries that start with 'INSERT ...'" # get node... (node, rule_kw) = node_from_uri(uri, method=request.method) rule_kw.update( node ) values = rule_kw xhr_data = request.get_json() if xhr_data: values.update( xhr_data ) values.update( request.form.to_dict(flat=True) ) values.update( request.args.to_dict(flat=True) ) values['method'] = request.method # Execute the sql query with the data _query(node['id'], **values) response = make_response('ok', 201) return response
python
def post(self, uri=''): "For sql queries that start with 'INSERT ...'" # get node... (node, rule_kw) = node_from_uri(uri, method=request.method) rule_kw.update( node ) values = rule_kw xhr_data = request.get_json() if xhr_data: values.update( xhr_data ) values.update( request.form.to_dict(flat=True) ) values.update( request.args.to_dict(flat=True) ) values['method'] = request.method # Execute the sql query with the data _query(node['id'], **values) response = make_response('ok', 201) return response
['def', 'post', '(', 'self', ',', 'uri', '=', "''", ')', ':', '# get node...', '(', 'node', ',', 'rule_kw', ')', '=', 'node_from_uri', '(', 'uri', ',', 'method', '=', 'request', '.', 'method', ')', 'rule_kw', '.', 'update', '(', 'node', ')', 'values', '=', 'rule_kw', 'xhr_data', '=', 'request', '.', 'get_json', '(', ')', 'if', 'xhr_data', ':', 'values', '.', 'update', '(', 'xhr_data', ')', 'values', '.', 'update', '(', 'request', '.', 'form', '.', 'to_dict', '(', 'flat', '=', 'True', ')', ')', 'values', '.', 'update', '(', 'request', '.', 'args', '.', 'to_dict', '(', 'flat', '=', 'True', ')', ')', 'values', '[', "'method'", ']', '=', 'request', '.', 'method', '# Execute the sql query with the data', '_query', '(', 'node', '[', "'id'", ']', ',', '*', '*', 'values', ')', 'response', '=', 'make_response', '(', "'ok'", ',', '201', ')', 'return', 'response']
For sql queries that start with 'INSERT ...
['For', 'sql', 'queries', 'that', 'start', 'with', 'INSERT', '...']
train
https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/public.py#L156-L175
5,462
SatelliteQE/nailgun
nailgun/entity_mixins.py
Entity.get_values
def get_values(self): """Return a copy of field values on the current object. This method is almost identical to ``vars(self).copy()``. However, only instance attributes that correspond to a field are included in the returned dict. :return: A dict mapping field names to user-provided values. """ attrs = vars(self).copy() attrs.pop('_server_config') attrs.pop('_fields') attrs.pop('_meta') if '_path_fields' in attrs: attrs.pop('_path_fields') return attrs
python
def get_values(self): """Return a copy of field values on the current object. This method is almost identical to ``vars(self).copy()``. However, only instance attributes that correspond to a field are included in the returned dict. :return: A dict mapping field names to user-provided values. """ attrs = vars(self).copy() attrs.pop('_server_config') attrs.pop('_fields') attrs.pop('_meta') if '_path_fields' in attrs: attrs.pop('_path_fields') return attrs
['def', 'get_values', '(', 'self', ')', ':', 'attrs', '=', 'vars', '(', 'self', ')', '.', 'copy', '(', ')', 'attrs', '.', 'pop', '(', "'_server_config'", ')', 'attrs', '.', 'pop', '(', "'_fields'", ')', 'attrs', '.', 'pop', '(', "'_meta'", ')', 'if', "'_path_fields'", 'in', 'attrs', ':', 'attrs', '.', 'pop', '(', "'_path_fields'", ')', 'return', 'attrs']
Return a copy of field values on the current object. This method is almost identical to ``vars(self).copy()``. However, only instance attributes that correspond to a field are included in the returned dict. :return: A dict mapping field names to user-provided values.
['Return', 'a', 'copy', 'of', 'field', 'values', 'on', 'the', 'current', 'object', '.']
train
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entity_mixins.py#L501-L516
5,463
kubernetes-client/python
kubernetes/client/apis/policy_v1beta1_api.py
PolicyV1beta1Api.list_pod_disruption_budget_for_all_namespaces
def list_pod_disruption_budget_for_all_namespaces(self, **kwargs): """ list or watch objects of kind PodDisruptionBudget This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_pod_disruption_budget_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1PodDisruptionBudgetList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_pod_disruption_budget_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_pod_disruption_budget_for_all_namespaces_with_http_info(**kwargs) return data
python
def list_pod_disruption_budget_for_all_namespaces(self, **kwargs): """ list or watch objects of kind PodDisruptionBudget This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_pod_disruption_budget_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1PodDisruptionBudgetList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_pod_disruption_budget_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_pod_disruption_budget_for_all_namespaces_with_http_info(**kwargs) return data
['def', 'list_pod_disruption_budget_for_all_namespaces', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async_req'", ')', ':', 'return', 'self', '.', 'list_pod_disruption_budget_for_all_namespaces_with_http_info', '(', '*', '*', 'kwargs', ')', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'list_pod_disruption_budget_for_all_namespaces_with_http_info', '(', '*', '*', 'kwargs', ')', 'return', 'data']
list or watch objects of kind PodDisruptionBudget This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_pod_disruption_budget_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1PodDisruptionBudgetList If the method is called asynchronously, returns the request thread.
['list', 'or', 'watch', 'objects', 'of', 'kind', 'PodDisruptionBudget', 'This', 'method', 'makes', 'a', 'synchronous', 'HTTP', 'request', 'by', 'default', '.', 'To', 'make', 'an', 'asynchronous', 'HTTP', 'request', 'please', 'pass', 'async_req', '=', 'True', '>>>', 'thread', '=', 'api', '.', 'list_pod_disruption_budget_for_all_namespaces', '(', 'async_req', '=', 'True', ')', '>>>', 'result', '=', 'thread', '.', 'get', '()']
train
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/policy_v1beta1_api.py#L973-L999
5,464
smdabdoub/phylotoast
phylotoast/biom_calc.py
transform_raw_abundance
def transform_raw_abundance(biomf, fn=math.log10, sampleIDs=None, sample_abd=True): """ Function to transform the total abundance calculation for each sample ID to another format based on user given transformation function. :type biomf: A BIOM file. :param biomf: OTU table format. :param fn: Mathematical function which is used to transform smax to another format. By default, the function has been given as base 10 logarithm. :rtype: dict :return: Returns a dictionary similar to output of raw_abundance function but with the abundance values modified by the mathematical operation. By default, the operation performed on the abundances is base 10 logarithm. """ totals = raw_abundance(biomf, sampleIDs, sample_abd) return {sid: fn(abd) for sid, abd in totals.items()}
python
def transform_raw_abundance(biomf, fn=math.log10, sampleIDs=None, sample_abd=True): """ Function to transform the total abundance calculation for each sample ID to another format based on user given transformation function. :type biomf: A BIOM file. :param biomf: OTU table format. :param fn: Mathematical function which is used to transform smax to another format. By default, the function has been given as base 10 logarithm. :rtype: dict :return: Returns a dictionary similar to output of raw_abundance function but with the abundance values modified by the mathematical operation. By default, the operation performed on the abundances is base 10 logarithm. """ totals = raw_abundance(biomf, sampleIDs, sample_abd) return {sid: fn(abd) for sid, abd in totals.items()}
['def', 'transform_raw_abundance', '(', 'biomf', ',', 'fn', '=', 'math', '.', 'log10', ',', 'sampleIDs', '=', 'None', ',', 'sample_abd', '=', 'True', ')', ':', 'totals', '=', 'raw_abundance', '(', 'biomf', ',', 'sampleIDs', ',', 'sample_abd', ')', 'return', '{', 'sid', ':', 'fn', '(', 'abd', ')', 'for', 'sid', ',', 'abd', 'in', 'totals', '.', 'items', '(', ')', '}']
Function to transform the total abundance calculation for each sample ID to another format based on user given transformation function. :type biomf: A BIOM file. :param biomf: OTU table format. :param fn: Mathematical function which is used to transform smax to another format. By default, the function has been given as base 10 logarithm. :rtype: dict :return: Returns a dictionary similar to output of raw_abundance function but with the abundance values modified by the mathematical operation. By default, the operation performed on the abundances is base 10 logarithm.
['Function', 'to', 'transform', 'the', 'total', 'abundance', 'calculation', 'for', 'each', 'sample', 'ID', 'to', 'another', 'format', 'based', 'on', 'user', 'given', 'transformation', 'function', '.']
train
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/biom_calc.py#L138-L155
5,465
oasiswork/zimsoap
zimsoap/client.py
ZimbraAccountClient.get_share_info
def get_share_info(self, grantee_type=None, grantee_id=None, grantee_name=None, owner=None, owner_type='name'): """ :returns: list of dict representing shares informations """ params = {} if grantee_type: if 'grantee' not in params.keys(): params['grantee'] = {} params['grantee'].update({'type': grantee_type}) if grantee_id: if 'grantee' not in params.keys(): params['grantee'] = {} params['grantee'].update({'id': grantee_id}) if grantee_name: if 'grantee' not in params.keys(): params['grantee'] = {} params['grantee'].update({'name': grantee_name}) if owner: params['owner'] = {'by': owner_type, '_content': owner} try: resp = self.request('GetShareInfo', params) # if user never logged in, no mailbox was created except ZimbraSoapServerError as e: if 'mailbox not found for account' in str(e): return [] else: raise e if resp and isinstance(resp['share'], list): return resp['share'] elif resp and isinstance(resp['share'], dict): return [resp['share']] else: return []
python
def get_share_info(self, grantee_type=None, grantee_id=None, grantee_name=None, owner=None, owner_type='name'): """ :returns: list of dict representing shares informations """ params = {} if grantee_type: if 'grantee' not in params.keys(): params['grantee'] = {} params['grantee'].update({'type': grantee_type}) if grantee_id: if 'grantee' not in params.keys(): params['grantee'] = {} params['grantee'].update({'id': grantee_id}) if grantee_name: if 'grantee' not in params.keys(): params['grantee'] = {} params['grantee'].update({'name': grantee_name}) if owner: params['owner'] = {'by': owner_type, '_content': owner} try: resp = self.request('GetShareInfo', params) # if user never logged in, no mailbox was created except ZimbraSoapServerError as e: if 'mailbox not found for account' in str(e): return [] else: raise e if resp and isinstance(resp['share'], list): return resp['share'] elif resp and isinstance(resp['share'], dict): return [resp['share']] else: return []
['def', 'get_share_info', '(', 'self', ',', 'grantee_type', '=', 'None', ',', 'grantee_id', '=', 'None', ',', 'grantee_name', '=', 'None', ',', 'owner', '=', 'None', ',', 'owner_type', '=', "'name'", ')', ':', 'params', '=', '{', '}', 'if', 'grantee_type', ':', 'if', "'grantee'", 'not', 'in', 'params', '.', 'keys', '(', ')', ':', 'params', '[', "'grantee'", ']', '=', '{', '}', 'params', '[', "'grantee'", ']', '.', 'update', '(', '{', "'type'", ':', 'grantee_type', '}', ')', 'if', 'grantee_id', ':', 'if', "'grantee'", 'not', 'in', 'params', '.', 'keys', '(', ')', ':', 'params', '[', "'grantee'", ']', '=', '{', '}', 'params', '[', "'grantee'", ']', '.', 'update', '(', '{', "'id'", ':', 'grantee_id', '}', ')', 'if', 'grantee_name', ':', 'if', "'grantee'", 'not', 'in', 'params', '.', 'keys', '(', ')', ':', 'params', '[', "'grantee'", ']', '=', '{', '}', 'params', '[', "'grantee'", ']', '.', 'update', '(', '{', "'name'", ':', 'grantee_name', '}', ')', 'if', 'owner', ':', 'params', '[', "'owner'", ']', '=', '{', "'by'", ':', 'owner_type', ',', "'_content'", ':', 'owner', '}', 'try', ':', 'resp', '=', 'self', '.', 'request', '(', "'GetShareInfo'", ',', 'params', ')', '# if user never logged in, no mailbox was created', 'except', 'ZimbraSoapServerError', 'as', 'e', ':', 'if', "'mailbox not found for account'", 'in', 'str', '(', 'e', ')', ':', 'return', '[', ']', 'else', ':', 'raise', 'e', 'if', 'resp', 'and', 'isinstance', '(', 'resp', '[', "'share'", ']', ',', 'list', ')', ':', 'return', 'resp', '[', "'share'", ']', 'elif', 'resp', 'and', 'isinstance', '(', 'resp', '[', "'share'", ']', ',', 'dict', ')', ':', 'return', '[', 'resp', '[', "'share'", ']', ']', 'else', ':', 'return', '[', ']']
:returns: list of dict representing shares informations
[':', 'returns', ':', 'list', 'of', 'dict', 'representing', 'shares', 'informations']
train
https://github.com/oasiswork/zimsoap/blob/d1ea2eb4d50f263c9a16e5549af03f1eff3e295e/zimsoap/client.py#L333-L367
5,466
meraki-analytics/datapipelines-python
datapipelines/pipelines.py
_SourceHandler.get
def get(self, query: Mapping[str, Any], context: PipelineContext = None) -> T: """Gets a query from the data source. 1) Extracts the query from the data source. 2) Inserts the result into any data sinks. 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object. """ result = self._source.get(self._source_type, deepcopy(query), context) LOGGER.info("Got result \"{result}\" from query \"{query}\" of source \"{source}\"".format(result=result, query=query, source=self._source)) LOGGER.info("Sending result \"{result}\" to sinks before converting".format(result=result)) for sink in self._before_transform: sink.put(result, context) LOGGER.info("Converting result \"{result}\" to request type".format(result=result)) result = self._transform(data=result, context=context) LOGGER.info("Sending result \"{result}\" to sinks after converting".format(result=result)) for sink in self._after_transform: sink.put(result, context) return result
python
def get(self, query: Mapping[str, Any], context: PipelineContext = None) -> T: """Gets a query from the data source. 1) Extracts the query from the data source. 2) Inserts the result into any data sinks. 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object. """ result = self._source.get(self._source_type, deepcopy(query), context) LOGGER.info("Got result \"{result}\" from query \"{query}\" of source \"{source}\"".format(result=result, query=query, source=self._source)) LOGGER.info("Sending result \"{result}\" to sinks before converting".format(result=result)) for sink in self._before_transform: sink.put(result, context) LOGGER.info("Converting result \"{result}\" to request type".format(result=result)) result = self._transform(data=result, context=context) LOGGER.info("Sending result \"{result}\" to sinks after converting".format(result=result)) for sink in self._after_transform: sink.put(result, context) return result
['def', 'get', '(', 'self', ',', 'query', ':', 'Mapping', '[', 'str', ',', 'Any', ']', ',', 'context', ':', 'PipelineContext', '=', 'None', ')', '->', 'T', ':', 'result', '=', 'self', '.', '_source', '.', 'get', '(', 'self', '.', '_source_type', ',', 'deepcopy', '(', 'query', ')', ',', 'context', ')', 'LOGGER', '.', 'info', '(', '"Got result \\"{result}\\" from query \\"{query}\\" of source \\"{source}\\""', '.', 'format', '(', 'result', '=', 'result', ',', 'query', '=', 'query', ',', 'source', '=', 'self', '.', '_source', ')', ')', 'LOGGER', '.', 'info', '(', '"Sending result \\"{result}\\" to sinks before converting"', '.', 'format', '(', 'result', '=', 'result', ')', ')', 'for', 'sink', 'in', 'self', '.', '_before_transform', ':', 'sink', '.', 'put', '(', 'result', ',', 'context', ')', 'LOGGER', '.', 'info', '(', '"Converting result \\"{result}\\" to request type"', '.', 'format', '(', 'result', '=', 'result', ')', ')', 'result', '=', 'self', '.', '_transform', '(', 'data', '=', 'result', ',', 'context', '=', 'context', ')', 'LOGGER', '.', 'info', '(', '"Sending result \\"{result}\\" to sinks after converting"', '.', 'format', '(', 'result', '=', 'result', ')', ')', 'for', 'sink', 'in', 'self', '.', '_after_transform', ':', 'sink', '.', 'put', '(', 'result', ',', 'context', ')', 'return', 'result']
Gets a query from the data source. 1) Extracts the query from the data source. 2) Inserts the result into any data sinks. 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object.
['Gets', 'a', 'query', 'from', 'the', 'data', 'source', '.']
train
https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/pipelines.py#L170-L199
5,467
ejeschke/ginga
ginga/BaseImage.py
BaseImage.cutout_data
def cutout_data(self, x1, y1, x2, y2, xstep=1, ystep=1, astype=None): """cut out data area based on coords. """ view = np.s_[y1:y2:ystep, x1:x2:xstep] data = self._slice(view) if astype: data = data.astype(astype, copy=False) return data
python
def cutout_data(self, x1, y1, x2, y2, xstep=1, ystep=1, astype=None): """cut out data area based on coords. """ view = np.s_[y1:y2:ystep, x1:x2:xstep] data = self._slice(view) if astype: data = data.astype(astype, copy=False) return data
['def', 'cutout_data', '(', 'self', ',', 'x1', ',', 'y1', ',', 'x2', ',', 'y2', ',', 'xstep', '=', '1', ',', 'ystep', '=', '1', ',', 'astype', '=', 'None', ')', ':', 'view', '=', 'np', '.', 's_', '[', 'y1', ':', 'y2', ':', 'ystep', ',', 'x1', ':', 'x2', ':', 'xstep', ']', 'data', '=', 'self', '.', '_slice', '(', 'view', ')', 'if', 'astype', ':', 'data', '=', 'data', '.', 'astype', '(', 'astype', ',', 'copy', '=', 'False', ')', 'return', 'data']
cut out data area based on coords.
['cut', 'out', 'data', 'area', 'based', 'on', 'coords', '.']
train
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/BaseImage.py#L293-L300
5,468
inveniosoftware/invenio-github
invenio_github/ext.py
InvenioGitHub.init_app
def init_app(self, app): """Flask application initialization.""" self.init_config(app) app.extensions['invenio-github'] = self @app.before_first_request def connect_signals(): """Connect OAuthClient signals.""" from invenio_oauthclient.models import RemoteAccount from invenio_oauthclient.signals import account_setup_committed from .api import GitHubAPI from .handlers import account_post_init account_setup_committed.connect( account_post_init, sender=GitHubAPI.remote._get_current_object() ) @event.listens_for(RemoteAccount, 'before_delete') def receive_before_delete(mapper, connection, target): """Listen for the 'before_delete' event."""
python
def init_app(self, app): """Flask application initialization.""" self.init_config(app) app.extensions['invenio-github'] = self @app.before_first_request def connect_signals(): """Connect OAuthClient signals.""" from invenio_oauthclient.models import RemoteAccount from invenio_oauthclient.signals import account_setup_committed from .api import GitHubAPI from .handlers import account_post_init account_setup_committed.connect( account_post_init, sender=GitHubAPI.remote._get_current_object() ) @event.listens_for(RemoteAccount, 'before_delete') def receive_before_delete(mapper, connection, target): """Listen for the 'before_delete' event."""
['def', 'init_app', '(', 'self', ',', 'app', ')', ':', 'self', '.', 'init_config', '(', 'app', ')', 'app', '.', 'extensions', '[', "'invenio-github'", ']', '=', 'self', '@', 'app', '.', 'before_first_request', 'def', 'connect_signals', '(', ')', ':', '"""Connect OAuthClient signals."""', 'from', 'invenio_oauthclient', '.', 'models', 'import', 'RemoteAccount', 'from', 'invenio_oauthclient', '.', 'signals', 'import', 'account_setup_committed', 'from', '.', 'api', 'import', 'GitHubAPI', 'from', '.', 'handlers', 'import', 'account_post_init', 'account_setup_committed', '.', 'connect', '(', 'account_post_init', ',', 'sender', '=', 'GitHubAPI', '.', 'remote', '.', '_get_current_object', '(', ')', ')', '@', 'event', '.', 'listens_for', '(', 'RemoteAccount', ',', "'before_delete'", ')', 'def', 'receive_before_delete', '(', 'mapper', ',', 'connection', ',', 'target', ')', ':', '"""Listen for the \'before_delete\' event."""']
Flask application initialization.
['Flask', 'application', 'initialization', '.']
train
https://github.com/inveniosoftware/invenio-github/blob/ec42fd6a06079310dcbe2c46d9fd79d5197bbe26/invenio_github/ext.py#L63-L84
5,469
inveniosoftware/invenio-collections
invenio_collections/views.py
collection
def collection(name=None): """Render the collection page. It renders it either with a collection specific template (aka collection_{collection_name}.html) or with the default collection template (collection.html). """ if name is None: collection = Collection.query.get_or_404(1) else: collection = Collection.query.filter( Collection.name == name).first_or_404() # TODO add breadcrumbs # breadcrumbs = current_breadcrumbs + collection.breadcrumbs(ln=g.ln)[1:] return render_template([ 'invenio_collections/collection_{0}.html'.format(collection.id), 'invenio_collections/collection_{0}.html'.format(slugify(name, '_')), current_app.config['COLLECTIONS_DEFAULT_TEMPLATE'] ], collection=collection)
python
def collection(name=None): """Render the collection page. It renders it either with a collection specific template (aka collection_{collection_name}.html) or with the default collection template (collection.html). """ if name is None: collection = Collection.query.get_or_404(1) else: collection = Collection.query.filter( Collection.name == name).first_or_404() # TODO add breadcrumbs # breadcrumbs = current_breadcrumbs + collection.breadcrumbs(ln=g.ln)[1:] return render_template([ 'invenio_collections/collection_{0}.html'.format(collection.id), 'invenio_collections/collection_{0}.html'.format(slugify(name, '_')), current_app.config['COLLECTIONS_DEFAULT_TEMPLATE'] ], collection=collection)
['def', 'collection', '(', 'name', '=', 'None', ')', ':', 'if', 'name', 'is', 'None', ':', 'collection', '=', 'Collection', '.', 'query', '.', 'get_or_404', '(', '1', ')', 'else', ':', 'collection', '=', 'Collection', '.', 'query', '.', 'filter', '(', 'Collection', '.', 'name', '==', 'name', ')', '.', 'first_or_404', '(', ')', '# TODO add breadcrumbs', '# breadcrumbs = current_breadcrumbs + collection.breadcrumbs(ln=g.ln)[1:]', 'return', 'render_template', '(', '[', "'invenio_collections/collection_{0}.html'", '.', 'format', '(', 'collection', '.', 'id', ')', ',', "'invenio_collections/collection_{0}.html'", '.', 'format', '(', 'slugify', '(', 'name', ',', "'_'", ')', ')', ',', 'current_app', '.', 'config', '[', "'COLLECTIONS_DEFAULT_TEMPLATE'", ']', ']', ',', 'collection', '=', 'collection', ')']
Render the collection page. It renders it either with a collection specific template (aka collection_{collection_name}.html) or with the default collection template (collection.html).
['Render', 'the', 'collection', 'page', '.']
train
https://github.com/inveniosoftware/invenio-collections/blob/f3adca45c6d00a4dbf1f48fd501e8a68fe347f2f/invenio_collections/views.py#L44-L63
5,470
bcbio/bcbio-nextgen
bcbio/structural/prioritize.py
_combine_files
def _combine_files(tsv_files, work_dir, data): """Combine multiple priority tsv files into a final sorted output. """ header = "\t".join(["caller", "sample", "chrom", "start", "end", "svtype", "lof", "annotation", "split_read_support", "paired_support_PE", "paired_support_PR"]) sample = dd.get_sample_name(data) out_file = os.path.join(work_dir, "%s-prioritize.tsv" % (sample)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: tmpdir = os.path.dirname(tx_out_file) input_files = " ".join(tsv_files) sort_cmd = bedutils.get_sort_cmd(tmpdir) cmd = "{{ echo '{header}'; cat {input_files} | {sort_cmd} -k3,3 -k4,4n; }} > {tx_out_file}" do.run(cmd.format(**locals()), "Combine prioritized from multiple callers") return out_file
python
def _combine_files(tsv_files, work_dir, data): """Combine multiple priority tsv files into a final sorted output. """ header = "\t".join(["caller", "sample", "chrom", "start", "end", "svtype", "lof", "annotation", "split_read_support", "paired_support_PE", "paired_support_PR"]) sample = dd.get_sample_name(data) out_file = os.path.join(work_dir, "%s-prioritize.tsv" % (sample)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: tmpdir = os.path.dirname(tx_out_file) input_files = " ".join(tsv_files) sort_cmd = bedutils.get_sort_cmd(tmpdir) cmd = "{{ echo '{header}'; cat {input_files} | {sort_cmd} -k3,3 -k4,4n; }} > {tx_out_file}" do.run(cmd.format(**locals()), "Combine prioritized from multiple callers") return out_file
['def', '_combine_files', '(', 'tsv_files', ',', 'work_dir', ',', 'data', ')', ':', 'header', '=', '"\\t"', '.', 'join', '(', '[', '"caller"', ',', '"sample"', ',', '"chrom"', ',', '"start"', ',', '"end"', ',', '"svtype"', ',', '"lof"', ',', '"annotation"', ',', '"split_read_support"', ',', '"paired_support_PE"', ',', '"paired_support_PR"', ']', ')', 'sample', '=', 'dd', '.', 'get_sample_name', '(', 'data', ')', 'out_file', '=', 'os', '.', 'path', '.', 'join', '(', 'work_dir', ',', '"%s-prioritize.tsv"', '%', '(', 'sample', ')', ')', 'if', 'not', 'utils', '.', 'file_exists', '(', 'out_file', ')', ':', 'with', 'file_transaction', '(', 'data', ',', 'out_file', ')', 'as', 'tx_out_file', ':', 'tmpdir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'tx_out_file', ')', 'input_files', '=', '" "', '.', 'join', '(', 'tsv_files', ')', 'sort_cmd', '=', 'bedutils', '.', 'get_sort_cmd', '(', 'tmpdir', ')', 'cmd', '=', '"{{ echo \'{header}\'; cat {input_files} | {sort_cmd} -k3,3 -k4,4n; }} > {tx_out_file}"', 'do', '.', 'run', '(', 'cmd', '.', 'format', '(', '*', '*', 'locals', '(', ')', ')', ',', '"Combine prioritized from multiple callers"', ')', 'return', 'out_file']
Combine multiple priority tsv files into a final sorted output.
['Combine', 'multiple', 'priority', 'tsv', 'files', 'into', 'a', 'final', 'sorted', 'output', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/prioritize.py#L145-L159
5,471
pysathq/pysat
examples/rc2.py
parse_options
def parse_options(): """ Parses command-line option """ try: opts, args = getopt.getopt(sys.argv[1:], 'ac:e:hilms:t:vx', ['adapt', 'comp=', 'enum=', 'exhaust', 'help', 'incr', 'blo', 'minimize', 'solver=', 'trim=', 'verbose']) except getopt.GetoptError as err: sys.stderr.write(str(err).capitalize()) usage() sys.exit(1) adapt = False exhaust = False cmode = None to_enum = 1 incr = False blo = False minz = False solver = 'g3' trim = 0 verbose = 1 for opt, arg in opts: if opt in ('-a', '--adapt'): adapt = True elif opt in ('-c', '--comp'): cmode = str(arg) elif opt in ('-e', '--enum'): to_enum = str(arg) if to_enum != 'all': to_enum = int(to_enum) else: to_enum = 0 elif opt in ('-h', '--help'): usage() sys.exit(0) elif opt in ('-i', '--incr'): incr = True elif opt in ('-l', '--blo'): blo = True elif opt in ('-m', '--minimize'): minz = True elif opt in ('-s', '--solver'): solver = str(arg) elif opt in ('-t', '--trim'): trim = int(arg) elif opt in ('-v', '--verbose'): verbose += 1 elif opt in ('-x', '--exhaust'): exhaust = True else: assert False, 'Unhandled option: {0} {1}'.format(opt, arg) return adapt, blo, cmode, to_enum, exhaust, incr, minz, solver, trim, \ verbose, args
python
def parse_options(): """ Parses command-line option """ try: opts, args = getopt.getopt(sys.argv[1:], 'ac:e:hilms:t:vx', ['adapt', 'comp=', 'enum=', 'exhaust', 'help', 'incr', 'blo', 'minimize', 'solver=', 'trim=', 'verbose']) except getopt.GetoptError as err: sys.stderr.write(str(err).capitalize()) usage() sys.exit(1) adapt = False exhaust = False cmode = None to_enum = 1 incr = False blo = False minz = False solver = 'g3' trim = 0 verbose = 1 for opt, arg in opts: if opt in ('-a', '--adapt'): adapt = True elif opt in ('-c', '--comp'): cmode = str(arg) elif opt in ('-e', '--enum'): to_enum = str(arg) if to_enum != 'all': to_enum = int(to_enum) else: to_enum = 0 elif opt in ('-h', '--help'): usage() sys.exit(0) elif opt in ('-i', '--incr'): incr = True elif opt in ('-l', '--blo'): blo = True elif opt in ('-m', '--minimize'): minz = True elif opt in ('-s', '--solver'): solver = str(arg) elif opt in ('-t', '--trim'): trim = int(arg) elif opt in ('-v', '--verbose'): verbose += 1 elif opt in ('-x', '--exhaust'): exhaust = True else: assert False, 'Unhandled option: {0} {1}'.format(opt, arg) return adapt, blo, cmode, to_enum, exhaust, incr, minz, solver, trim, \ verbose, args
['def', 'parse_options', '(', ')', ':', 'try', ':', 'opts', ',', 'args', '=', 'getopt', '.', 'getopt', '(', 'sys', '.', 'argv', '[', '1', ':', ']', ',', "'ac:e:hilms:t:vx'", ',', '[', "'adapt'", ',', "'comp='", ',', "'enum='", ',', "'exhaust'", ',', "'help'", ',', "'incr'", ',', "'blo'", ',', "'minimize'", ',', "'solver='", ',', "'trim='", ',', "'verbose'", ']', ')', 'except', 'getopt', '.', 'GetoptError', 'as', 'err', ':', 'sys', '.', 'stderr', '.', 'write', '(', 'str', '(', 'err', ')', '.', 'capitalize', '(', ')', ')', 'usage', '(', ')', 'sys', '.', 'exit', '(', '1', ')', 'adapt', '=', 'False', 'exhaust', '=', 'False', 'cmode', '=', 'None', 'to_enum', '=', '1', 'incr', '=', 'False', 'blo', '=', 'False', 'minz', '=', 'False', 'solver', '=', "'g3'", 'trim', '=', '0', 'verbose', '=', '1', 'for', 'opt', ',', 'arg', 'in', 'opts', ':', 'if', 'opt', 'in', '(', "'-a'", ',', "'--adapt'", ')', ':', 'adapt', '=', 'True', 'elif', 'opt', 'in', '(', "'-c'", ',', "'--comp'", ')', ':', 'cmode', '=', 'str', '(', 'arg', ')', 'elif', 'opt', 'in', '(', "'-e'", ',', "'--enum'", ')', ':', 'to_enum', '=', 'str', '(', 'arg', ')', 'if', 'to_enum', '!=', "'all'", ':', 'to_enum', '=', 'int', '(', 'to_enum', ')', 'else', ':', 'to_enum', '=', '0', 'elif', 'opt', 'in', '(', "'-h'", ',', "'--help'", ')', ':', 'usage', '(', ')', 'sys', '.', 'exit', '(', '0', ')', 'elif', 'opt', 'in', '(', "'-i'", ',', "'--incr'", ')', ':', 'incr', '=', 'True', 'elif', 'opt', 'in', '(', "'-l'", ',', "'--blo'", ')', ':', 'blo', '=', 'True', 'elif', 'opt', 'in', '(', "'-m'", ',', "'--minimize'", ')', ':', 'minz', '=', 'True', 'elif', 'opt', 'in', '(', "'-s'", ',', "'--solver'", ')', ':', 'solver', '=', 'str', '(', 'arg', ')', 'elif', 'opt', 'in', '(', "'-t'", ',', "'--trim'", ')', ':', 'trim', '=', 'int', '(', 'arg', ')', 'elif', 'opt', 'in', '(', "'-v'", ',', "'--verbose'", ')', ':', 'verbose', '+=', '1', 'elif', 'opt', 'in', '(', "'-x'", ',', "'--exhaust'", ')', ':', 'exhaust', '=', 'True', 'else', ':', 'assert', 'False', ',', "'Unhandled option: {0} {1}'", '.', 'format', '(', 'opt', ',', 'arg', ')', 'return', 'adapt', ',', 'blo', ',', 'cmode', ',', 'to_enum', ',', 'exhaust', ',', 'incr', ',', 'minz', ',', 'solver', ',', 'trim', ',', 'verbose', ',', 'args']
Parses command-line option
['Parses', 'command', '-', 'line', 'option']
train
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/rc2.py#L1463-L1520
5,472
jplusplus/statscraper
statscraper/base_scraper.py
ResultSet.append
def append(self, val): """Connect any new results to the resultset. This is where all the heavy lifting is done for creating results: - We add a datatype here, so that each result can handle validation etc independently. This is so that scraper authors don't need to worry about creating and passing around datatype objects. - As the scraper author yields result objects, we append them to a resultset. - This is also where we normalize dialects. """ val.resultset = self val.dataset = self.dataset # Check result dimensions against available dimensions for this dataset if val.dataset: dataset_dimensions = self.dataset.dimensions for k, v in val.raw_dimensions.items(): if k not in dataset_dimensions: d = Dimension(k) else: d = dataset_dimensions[k] # Normalize if we have a datatype and a foreign dialect normalized_value = unicode(v) if d.dialect and d.datatype: if d.dialect in d.datatype.dialects: for av in d.allowed_values: # Not all allowed_value have all dialects if unicode(v) in av.dialects.get(d.dialect, []): normalized_value = av.value # Use first match # We do not support multiple matches # This is by design. break # Create DimensionValue object if isinstance(v, DimensionValue): dim = v v.value = normalized_value else: if k in dataset_dimensions: dim = DimensionValue(normalized_value, d) else: dim = DimensionValue(normalized_value, Dimension()) val.dimensionvalues.append(dim) # Add last list of dimension values to the ResultSet # They will usually be the same for each result self.dimensionvalues = val.dimensionvalues super(ResultSet, self).append(val)
python
def append(self, val): """Connect any new results to the resultset. This is where all the heavy lifting is done for creating results: - We add a datatype here, so that each result can handle validation etc independently. This is so that scraper authors don't need to worry about creating and passing around datatype objects. - As the scraper author yields result objects, we append them to a resultset. - This is also where we normalize dialects. """ val.resultset = self val.dataset = self.dataset # Check result dimensions against available dimensions for this dataset if val.dataset: dataset_dimensions = self.dataset.dimensions for k, v in val.raw_dimensions.items(): if k not in dataset_dimensions: d = Dimension(k) else: d = dataset_dimensions[k] # Normalize if we have a datatype and a foreign dialect normalized_value = unicode(v) if d.dialect and d.datatype: if d.dialect in d.datatype.dialects: for av in d.allowed_values: # Not all allowed_value have all dialects if unicode(v) in av.dialects.get(d.dialect, []): normalized_value = av.value # Use first match # We do not support multiple matches # This is by design. break # Create DimensionValue object if isinstance(v, DimensionValue): dim = v v.value = normalized_value else: if k in dataset_dimensions: dim = DimensionValue(normalized_value, d) else: dim = DimensionValue(normalized_value, Dimension()) val.dimensionvalues.append(dim) # Add last list of dimension values to the ResultSet # They will usually be the same for each result self.dimensionvalues = val.dimensionvalues super(ResultSet, self).append(val)
['def', 'append', '(', 'self', ',', 'val', ')', ':', 'val', '.', 'resultset', '=', 'self', 'val', '.', 'dataset', '=', 'self', '.', 'dataset', '# Check result dimensions against available dimensions for this dataset', 'if', 'val', '.', 'dataset', ':', 'dataset_dimensions', '=', 'self', '.', 'dataset', '.', 'dimensions', 'for', 'k', ',', 'v', 'in', 'val', '.', 'raw_dimensions', '.', 'items', '(', ')', ':', 'if', 'k', 'not', 'in', 'dataset_dimensions', ':', 'd', '=', 'Dimension', '(', 'k', ')', 'else', ':', 'd', '=', 'dataset_dimensions', '[', 'k', ']', '# Normalize if we have a datatype and a foreign dialect', 'normalized_value', '=', 'unicode', '(', 'v', ')', 'if', 'd', '.', 'dialect', 'and', 'd', '.', 'datatype', ':', 'if', 'd', '.', 'dialect', 'in', 'd', '.', 'datatype', '.', 'dialects', ':', 'for', 'av', 'in', 'd', '.', 'allowed_values', ':', '# Not all allowed_value have all dialects', 'if', 'unicode', '(', 'v', ')', 'in', 'av', '.', 'dialects', '.', 'get', '(', 'd', '.', 'dialect', ',', '[', ']', ')', ':', 'normalized_value', '=', 'av', '.', 'value', '# Use first match', '# We do not support multiple matches', '# This is by design.', 'break', '# Create DimensionValue object', 'if', 'isinstance', '(', 'v', ',', 'DimensionValue', ')', ':', 'dim', '=', 'v', 'v', '.', 'value', '=', 'normalized_value', 'else', ':', 'if', 'k', 'in', 'dataset_dimensions', ':', 'dim', '=', 'DimensionValue', '(', 'normalized_value', ',', 'd', ')', 'else', ':', 'dim', '=', 'DimensionValue', '(', 'normalized_value', ',', 'Dimension', '(', ')', ')', 'val', '.', 'dimensionvalues', '.', 'append', '(', 'dim', ')', '# Add last list of dimension values to the ResultSet', '# They will usually be the same for each result', 'self', '.', 'dimensionvalues', '=', 'val', '.', 'dimensionvalues', 'super', '(', 'ResultSet', ',', 'self', ')', '.', 'append', '(', 'val', ')']
Connect any new results to the resultset. This is where all the heavy lifting is done for creating results: - We add a datatype here, so that each result can handle validation etc independently. This is so that scraper authors don't need to worry about creating and passing around datatype objects. - As the scraper author yields result objects, we append them to a resultset. - This is also where we normalize dialects.
['Connect', 'any', 'new', 'results', 'to', 'the', 'resultset', '.']
train
https://github.com/jplusplus/statscraper/blob/932ec048b23d15b3dbdaf829facc55fd78ec0109/statscraper/base_scraper.py#L88-L140
5,473
jjjake/internetarchive
internetarchive/session.py
ArchiveSession.set_file_logger
def set_file_logger(self, log_level, path, logger_name='internetarchive'): """Convenience function to quickly configure any level of logging to a file. :type log_level: str :param log_level: A log level as specified in the `logging` module. :type path: string :param path: Path to the log file. The file will be created if it doesn't already exist. :type logger_name: str :param logger_name: (optional) The name of the logger. """ _log_level = { 'CRITICAL': 50, 'ERROR': 40, 'WARNING': 30, 'INFO': 20, 'DEBUG': 10, 'NOTSET': 0, } log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' _log = logging.getLogger(logger_name) _log.setLevel(logging.DEBUG) fh = logging.FileHandler(path, encoding='utf-8') fh.setLevel(_log_level[log_level]) formatter = logging.Formatter(log_format) fh.setFormatter(formatter) _log.addHandler(fh)
python
def set_file_logger(self, log_level, path, logger_name='internetarchive'): """Convenience function to quickly configure any level of logging to a file. :type log_level: str :param log_level: A log level as specified in the `logging` module. :type path: string :param path: Path to the log file. The file will be created if it doesn't already exist. :type logger_name: str :param logger_name: (optional) The name of the logger. """ _log_level = { 'CRITICAL': 50, 'ERROR': 40, 'WARNING': 30, 'INFO': 20, 'DEBUG': 10, 'NOTSET': 0, } log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' _log = logging.getLogger(logger_name) _log.setLevel(logging.DEBUG) fh = logging.FileHandler(path, encoding='utf-8') fh.setLevel(_log_level[log_level]) formatter = logging.Formatter(log_format) fh.setFormatter(formatter) _log.addHandler(fh)
['def', 'set_file_logger', '(', 'self', ',', 'log_level', ',', 'path', ',', 'logger_name', '=', "'internetarchive'", ')', ':', '_log_level', '=', '{', "'CRITICAL'", ':', '50', ',', "'ERROR'", ':', '40', ',', "'WARNING'", ':', '30', ',', "'INFO'", ':', '20', ',', "'DEBUG'", ':', '10', ',', "'NOTSET'", ':', '0', ',', '}', 'log_format', '=', "'%(asctime)s - %(name)s - %(levelname)s - %(message)s'", '_log', '=', 'logging', '.', 'getLogger', '(', 'logger_name', ')', '_log', '.', 'setLevel', '(', 'logging', '.', 'DEBUG', ')', 'fh', '=', 'logging', '.', 'FileHandler', '(', 'path', ',', 'encoding', '=', "'utf-8'", ')', 'fh', '.', 'setLevel', '(', '_log_level', '[', 'log_level', ']', ')', 'formatter', '=', 'logging', '.', 'Formatter', '(', 'log_format', ')', 'fh', '.', 'setFormatter', '(', 'formatter', ')', '_log', '.', 'addHandler', '(', 'fh', ')']
Convenience function to quickly configure any level of logging to a file. :type log_level: str :param log_level: A log level as specified in the `logging` module. :type path: string :param path: Path to the log file. The file will be created if it doesn't already exist. :type logger_name: str :param logger_name: (optional) The name of the logger.
['Convenience', 'function', 'to', 'quickly', 'configure', 'any', 'level', 'of', 'logging', 'to', 'a', 'file', '.']
train
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/session.py#L183-L217
5,474
fopina/tgbotplug
tgbot/tgbot.py
TGBot.print_commands
def print_commands(self, out=sys.stdout): ''' utility method to print commands and descriptions for @BotFather ''' cmds = self.list_commands() for ck in cmds: if ck.printable: out.write('%s\n' % ck)
python
def print_commands(self, out=sys.stdout): ''' utility method to print commands and descriptions for @BotFather ''' cmds = self.list_commands() for ck in cmds: if ck.printable: out.write('%s\n' % ck)
['def', 'print_commands', '(', 'self', ',', 'out', '=', 'sys', '.', 'stdout', ')', ':', 'cmds', '=', 'self', '.', 'list_commands', '(', ')', 'for', 'ck', 'in', 'cmds', ':', 'if', 'ck', '.', 'printable', ':', 'out', '.', 'write', '(', "'%s\\n'", '%', 'ck', ')']
utility method to print commands and descriptions for @BotFather
['utility', 'method', 'to', 'print', 'commands', 'and', 'descriptions', 'for']
train
https://github.com/fopina/tgbotplug/blob/c115733b03f2e23ddcdecfce588d1a6a1e5bde91/tgbot/tgbot.py#L161-L169
5,475
devopshq/youtrack
youtrack/connection.py
Connection.import_links
def import_links(self, links): """ Import links, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Links) Accepts result of getLinks() Example: importLinks([{'login':'vadim', 'fullName':'vadim', 'email':'[email protected]', 'jabber':'[email protected]'}, {'login':'maxim', 'fullName':'maxim', 'email':'[email protected]', 'jabber':'[email protected]'}]) """ xml = '<list>\n' for l in links: # ignore typeOutward and typeInward returned by getLinks() xml += ' <link ' + "".join(attr + '=' + quoteattr(l[attr]) + ' ' for attr in l if attr not in ['typeInward', 'typeOutward']) + '/>\n' xml += '</list>' # TODO: convert response xml into python objects res = self._req_xml('PUT', '/import/links', xml, 400) return res.toxml() if hasattr(res, "toxml") else res
python
def import_links(self, links): """ Import links, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Links) Accepts result of getLinks() Example: importLinks([{'login':'vadim', 'fullName':'vadim', 'email':'[email protected]', 'jabber':'[email protected]'}, {'login':'maxim', 'fullName':'maxim', 'email':'[email protected]', 'jabber':'[email protected]'}]) """ xml = '<list>\n' for l in links: # ignore typeOutward and typeInward returned by getLinks() xml += ' <link ' + "".join(attr + '=' + quoteattr(l[attr]) + ' ' for attr in l if attr not in ['typeInward', 'typeOutward']) + '/>\n' xml += '</list>' # TODO: convert response xml into python objects res = self._req_xml('PUT', '/import/links', xml, 400) return res.toxml() if hasattr(res, "toxml") else res
['def', 'import_links', '(', 'self', ',', 'links', ')', ':', 'xml', '=', "'<list>\\n'", 'for', 'l', 'in', 'links', ':', '# ignore typeOutward and typeInward returned by getLinks()', 'xml', '+=', "' <link '", '+', '""', '.', 'join', '(', 'attr', '+', "'='", '+', 'quoteattr', '(', 'l', '[', 'attr', ']', ')', '+', "' '", 'for', 'attr', 'in', 'l', 'if', 'attr', 'not', 'in', '[', "'typeInward'", ',', "'typeOutward'", ']', ')', '+', "'/>\\n'", 'xml', '+=', "'</list>'", '# TODO: convert response xml into python objects', 'res', '=', 'self', '.', '_req_xml', '(', "'PUT'", ',', "'/import/links'", ',', 'xml', ',', '400', ')', 'return', 'res', '.', 'toxml', '(', ')', 'if', 'hasattr', '(', 'res', ',', '"toxml"', ')', 'else', 'res']
Import links, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Links) Accepts result of getLinks() Example: importLinks([{'login':'vadim', 'fullName':'vadim', 'email':'[email protected]', 'jabber':'[email protected]'}, {'login':'maxim', 'fullName':'maxim', 'email':'[email protected]', 'jabber':'[email protected]'}])
['Import', 'links', 'returns', 'import', 'result', '(', 'http', ':', '//', 'confluence', '.', 'jetbrains', '.', 'net', '/', 'display', '/', 'YTD2', '/', 'Import', '+', 'Links', ')', 'Accepts', 'result', 'of', 'getLinks', '()', 'Example', ':', 'importLinks', '(', '[', '{', 'login', ':', 'vadim', 'fullName', ':', 'vadim', 'email', ':', 'eee']
train
https://github.com/devopshq/youtrack/blob/c4ec19aca253ae30ac8eee7976a2f330e480a73b/youtrack/connection.py#L356-L370
5,476
abingham/docopt-subcommands
docopt_subcommands/subcommands.py
Subcommands.add_command
def add_command(self, handler, name=None): """Add a subcommand `name` which invokes `handler`. """ if name is None: name = docstring_to_subcommand(handler.__doc__) # TODO: Prevent overwriting 'help'? self._commands[name] = handler
python
def add_command(self, handler, name=None): """Add a subcommand `name` which invokes `handler`. """ if name is None: name = docstring_to_subcommand(handler.__doc__) # TODO: Prevent overwriting 'help'? self._commands[name] = handler
['def', 'add_command', '(', 'self', ',', 'handler', ',', 'name', '=', 'None', ')', ':', 'if', 'name', 'is', 'None', ':', 'name', '=', 'docstring_to_subcommand', '(', 'handler', '.', '__doc__', ')', "# TODO: Prevent overwriting 'help'?", 'self', '.', '_commands', '[', 'name', ']', '=', 'handler']
Add a subcommand `name` which invokes `handler`.
['Add', 'a', 'subcommand', 'name', 'which', 'invokes', 'handler', '.']
train
https://github.com/abingham/docopt-subcommands/blob/4b5cd75bb8eed01f9405345446ca58e9a29d67ad/docopt_subcommands/subcommands.py#L98-L105
5,477
avinassh/haxor
hackernews/__init__.py
HackerNews.get_item
def get_item(self, item_id, expand=False): """Returns Hacker News `Item` object. Fetches the data from url: https://hacker-news.firebaseio.com/v0/item/<item_id>.json e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json Args: item_id (int or string): Unique item id of Hacker News story, comment etc. expand (bool): expand (bool): Flag to indicate whether to transform all IDs into objects. Returns: `Item` object representing Hacker News item. Raises: InvalidItemID: If corresponding Hacker News story does not exist. """ url = urljoin(self.item_url, F"{item_id}.json") response = self._get_sync(url) if not response: raise InvalidItemID item = Item(response) if expand: item.by = self.get_user(item.by) item.kids = self.get_items_by_ids(item.kids) if item.kids else None item.parent = self.get_item(item.parent) if item.parent else None item.poll = self.get_item(item.poll) if item.poll else None item.parts = ( self.get_items_by_ids(item.parts) if item.parts else None ) return item
python
def get_item(self, item_id, expand=False): """Returns Hacker News `Item` object. Fetches the data from url: https://hacker-news.firebaseio.com/v0/item/<item_id>.json e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json Args: item_id (int or string): Unique item id of Hacker News story, comment etc. expand (bool): expand (bool): Flag to indicate whether to transform all IDs into objects. Returns: `Item` object representing Hacker News item. Raises: InvalidItemID: If corresponding Hacker News story does not exist. """ url = urljoin(self.item_url, F"{item_id}.json") response = self._get_sync(url) if not response: raise InvalidItemID item = Item(response) if expand: item.by = self.get_user(item.by) item.kids = self.get_items_by_ids(item.kids) if item.kids else None item.parent = self.get_item(item.parent) if item.parent else None item.poll = self.get_item(item.poll) if item.poll else None item.parts = ( self.get_items_by_ids(item.parts) if item.parts else None ) return item
['def', 'get_item', '(', 'self', ',', 'item_id', ',', 'expand', '=', 'False', ')', ':', 'url', '=', 'urljoin', '(', 'self', '.', 'item_url', ',', 'F"{item_id}.json"', ')', 'response', '=', 'self', '.', '_get_sync', '(', 'url', ')', 'if', 'not', 'response', ':', 'raise', 'InvalidItemID', 'item', '=', 'Item', '(', 'response', ')', 'if', 'expand', ':', 'item', '.', 'by', '=', 'self', '.', 'get_user', '(', 'item', '.', 'by', ')', 'item', '.', 'kids', '=', 'self', '.', 'get_items_by_ids', '(', 'item', '.', 'kids', ')', 'if', 'item', '.', 'kids', 'else', 'None', 'item', '.', 'parent', '=', 'self', '.', 'get_item', '(', 'item', '.', 'parent', ')', 'if', 'item', '.', 'parent', 'else', 'None', 'item', '.', 'poll', '=', 'self', '.', 'get_item', '(', 'item', '.', 'poll', ')', 'if', 'item', '.', 'poll', 'else', 'None', 'item', '.', 'parts', '=', '(', 'self', '.', 'get_items_by_ids', '(', 'item', '.', 'parts', ')', 'if', 'item', '.', 'parts', 'else', 'None', ')', 'return', 'item']
Returns Hacker News `Item` object. Fetches the data from url: https://hacker-news.firebaseio.com/v0/item/<item_id>.json e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json Args: item_id (int or string): Unique item id of Hacker News story, comment etc. expand (bool): expand (bool): Flag to indicate whether to transform all IDs into objects. Returns: `Item` object representing Hacker News item. Raises: InvalidItemID: If corresponding Hacker News story does not exist.
['Returns', 'Hacker', 'News', 'Item', 'object', '.']
train
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L165-L202
5,478
pmorissette/bt
bt/core.py
SecurityBase.allocate
def allocate(self, amount, update=True): """ This allocates capital to the Security. This is the method used to buy/sell the security. A given amount of shares will be determined on the current price, a commission will be calculated based on the parent's commission fn, and any remaining capital will be passed back up to parent as an adjustment. Args: * amount (float): Amount of adjustment. * update (bool): Force update? """ # will need to update if this has been idle for a while... # update if needupdate or if now is stale # fetch parent's now since our now is stale if self._needupdate or self.now != self.parent.now: self.update(self.parent.now) # ignore 0 alloc # Note that if the price of security has dropped to zero, then it # should never be selected by SelectAll, SelectN etc. I.e. we should # not open the position at zero price. At the same time, we are able # to close it at zero price, because at that point amount=0. # Note also that we don't erase the position in an asset which price # has dropped to zero (though the weight will indeed be = 0) if amount == 0: return if self.parent is self or self.parent is None: raise Exception( 'Cannot allocate capital to a parentless security') if self._price == 0 or np.isnan(self._price): raise Exception( 'Cannot allocate capital to ' '%s because price is %s as of %s' % (self.name, self._price, self.parent.now)) # buy/sell # determine quantity - must also factor in commission # closing out? if amount == -self._value: q = -self._position else: q = amount / (self._price * self.multiplier) if self.integer_positions: if (self._position > 0) or ((self._position == 0) and ( amount > 0)): # if we're going long or changing long position q = math.floor(q) else: # if we're going short or changing short position q = math.ceil(q) # if q is 0 nothing to do if q == 0 or np.isnan(q): return # unless we are closing out a position (q == -position) # we want to ensure that # # - In the event of a positive amount, this indicates the maximum # amount a given security can use up for a purchase. Therefore, if # commissions push us above this amount, we cannot buy `q`, and must # decrease its value # # - In the event of a negative amount, we want to 'raise' at least the # amount indicated, no less. Therefore, if we have commission, we must # sell additional units to fund this requirement. As such, q must once # again decrease. # if not q == -self._position: full_outlay, _, _ = self.outlay(q) # if full outlay > amount, we must decrease the magnitude of `q` # this can potentially lead to an infinite loop if the commission # per share > price per share. However, we cannot really detect # that in advance since the function can be non-linear (say a fn # like max(1, abs(q) * 0.01). Nevertheless, we want to avoid these # situations. # cap the maximum number of iterations to 1e4 and raise exception # if we get there # if integer positions then we know we are stuck if q doesn't change # if integer positions is false then we want full_outlay == amount # if integer positions is true then we want to be at the q where # if we bought 1 more then we wouldn't have enough cash i = 0 last_q = q last_amount_short = full_outlay - amount while not np.isclose(full_outlay, amount, rtol=0.) and q != 0: dq_wout_considering_tx_costs = (full_outlay - amount)/(self._price * self.multiplier) q = q - dq_wout_considering_tx_costs if self.integer_positions: q = math.floor(q) full_outlay, _, _ = self.outlay(q) # if our q is too low and we have integer positions # then we know that the correct quantity is the one where # the outlay of q + 1 < amount. i.e. if we bought one more # position then we wouldn't have enough cash if self.integer_positions: full_outlay_of_1_more, _, _ = self.outlay(q + 1) if full_outlay < amount and full_outlay_of_1_more > amount: break # if not integer positions then we should keep going until # full_outlay == amount or is close enough i = i + 1 if i > 1e4: raise Exception( 'Potentially infinite loop detected. This occurred ' 'while trying to reduce the amount of shares purchased' ' to respect the outlay <= amount rule. This is most ' 'likely due to a commission function that outputs a ' 'commission that is greater than the amount of cash ' 'a short sale can raise.') if self.integer_positions and last_q == q: raise Exception( 'Newton Method like root search for quantity is stuck!' ' q did not change in iterations so it is probably a bug' ' but we are not entirely sure it is wrong! Consider ' ' changing to warning.' ) last_q = q if np.abs(full_outlay - amount) > np.abs(last_amount_short): raise Exception( 'The difference between what we have raised with q and' ' the amount we are trying to raise has gotten bigger since' ' last iteration! full_outlay should always be approaching' ' amount! There may be a case where the commission fn is' ' not smooth' ) last_amount_short = full_outlay - amount # if last step led to q == 0, then we can return just like above if q == 0: return # this security will need an update, even if pos is 0 (for example if # we close the positions, value and pos is 0, but still need to do that # last update) self._needupdate = True # adjust position & value self._position += q # calculate proper adjustment for parent # parent passed down amount so we want to pass # -outlay back up to parent to adjust for capital # used full_outlay, outlay, fee = self.outlay(q) # store outlay for future reference self._outlay += outlay # call parent self.parent.adjust(-full_outlay, update=update, flow=False, fee=fee)
python
def allocate(self, amount, update=True): """ This allocates capital to the Security. This is the method used to buy/sell the security. A given amount of shares will be determined on the current price, a commission will be calculated based on the parent's commission fn, and any remaining capital will be passed back up to parent as an adjustment. Args: * amount (float): Amount of adjustment. * update (bool): Force update? """ # will need to update if this has been idle for a while... # update if needupdate or if now is stale # fetch parent's now since our now is stale if self._needupdate or self.now != self.parent.now: self.update(self.parent.now) # ignore 0 alloc # Note that if the price of security has dropped to zero, then it # should never be selected by SelectAll, SelectN etc. I.e. we should # not open the position at zero price. At the same time, we are able # to close it at zero price, because at that point amount=0. # Note also that we don't erase the position in an asset which price # has dropped to zero (though the weight will indeed be = 0) if amount == 0: return if self.parent is self or self.parent is None: raise Exception( 'Cannot allocate capital to a parentless security') if self._price == 0 or np.isnan(self._price): raise Exception( 'Cannot allocate capital to ' '%s because price is %s as of %s' % (self.name, self._price, self.parent.now)) # buy/sell # determine quantity - must also factor in commission # closing out? if amount == -self._value: q = -self._position else: q = amount / (self._price * self.multiplier) if self.integer_positions: if (self._position > 0) or ((self._position == 0) and ( amount > 0)): # if we're going long or changing long position q = math.floor(q) else: # if we're going short or changing short position q = math.ceil(q) # if q is 0 nothing to do if q == 0 or np.isnan(q): return # unless we are closing out a position (q == -position) # we want to ensure that # # - In the event of a positive amount, this indicates the maximum # amount a given security can use up for a purchase. Therefore, if # commissions push us above this amount, we cannot buy `q`, and must # decrease its value # # - In the event of a negative amount, we want to 'raise' at least the # amount indicated, no less. Therefore, if we have commission, we must # sell additional units to fund this requirement. As such, q must once # again decrease. # if not q == -self._position: full_outlay, _, _ = self.outlay(q) # if full outlay > amount, we must decrease the magnitude of `q` # this can potentially lead to an infinite loop if the commission # per share > price per share. However, we cannot really detect # that in advance since the function can be non-linear (say a fn # like max(1, abs(q) * 0.01). Nevertheless, we want to avoid these # situations. # cap the maximum number of iterations to 1e4 and raise exception # if we get there # if integer positions then we know we are stuck if q doesn't change # if integer positions is false then we want full_outlay == amount # if integer positions is true then we want to be at the q where # if we bought 1 more then we wouldn't have enough cash i = 0 last_q = q last_amount_short = full_outlay - amount while not np.isclose(full_outlay, amount, rtol=0.) and q != 0: dq_wout_considering_tx_costs = (full_outlay - amount)/(self._price * self.multiplier) q = q - dq_wout_considering_tx_costs if self.integer_positions: q = math.floor(q) full_outlay, _, _ = self.outlay(q) # if our q is too low and we have integer positions # then we know that the correct quantity is the one where # the outlay of q + 1 < amount. i.e. if we bought one more # position then we wouldn't have enough cash if self.integer_positions: full_outlay_of_1_more, _, _ = self.outlay(q + 1) if full_outlay < amount and full_outlay_of_1_more > amount: break # if not integer positions then we should keep going until # full_outlay == amount or is close enough i = i + 1 if i > 1e4: raise Exception( 'Potentially infinite loop detected. This occurred ' 'while trying to reduce the amount of shares purchased' ' to respect the outlay <= amount rule. This is most ' 'likely due to a commission function that outputs a ' 'commission that is greater than the amount of cash ' 'a short sale can raise.') if self.integer_positions and last_q == q: raise Exception( 'Newton Method like root search for quantity is stuck!' ' q did not change in iterations so it is probably a bug' ' but we are not entirely sure it is wrong! Consider ' ' changing to warning.' ) last_q = q if np.abs(full_outlay - amount) > np.abs(last_amount_short): raise Exception( 'The difference between what we have raised with q and' ' the amount we are trying to raise has gotten bigger since' ' last iteration! full_outlay should always be approaching' ' amount! There may be a case where the commission fn is' ' not smooth' ) last_amount_short = full_outlay - amount # if last step led to q == 0, then we can return just like above if q == 0: return # this security will need an update, even if pos is 0 (for example if # we close the positions, value and pos is 0, but still need to do that # last update) self._needupdate = True # adjust position & value self._position += q # calculate proper adjustment for parent # parent passed down amount so we want to pass # -outlay back up to parent to adjust for capital # used full_outlay, outlay, fee = self.outlay(q) # store outlay for future reference self._outlay += outlay # call parent self.parent.adjust(-full_outlay, update=update, flow=False, fee=fee)
['def', 'allocate', '(', 'self', ',', 'amount', ',', 'update', '=', 'True', ')', ':', '# will need to update if this has been idle for a while...', '# update if needupdate or if now is stale', "# fetch parent's now since our now is stale", 'if', 'self', '.', '_needupdate', 'or', 'self', '.', 'now', '!=', 'self', '.', 'parent', '.', 'now', ':', 'self', '.', 'update', '(', 'self', '.', 'parent', '.', 'now', ')', '# ignore 0 alloc', '# Note that if the price of security has dropped to zero, then it', '# should never be selected by SelectAll, SelectN etc. I.e. we should', '# not open the position at zero price. At the same time, we are able', '# to close it at zero price, because at that point amount=0.', "# Note also that we don't erase the position in an asset which price", '# has dropped to zero (though the weight will indeed be = 0)', 'if', 'amount', '==', '0', ':', 'return', 'if', 'self', '.', 'parent', 'is', 'self', 'or', 'self', '.', 'parent', 'is', 'None', ':', 'raise', 'Exception', '(', "'Cannot allocate capital to a parentless security'", ')', 'if', 'self', '.', '_price', '==', '0', 'or', 'np', '.', 'isnan', '(', 'self', '.', '_price', ')', ':', 'raise', 'Exception', '(', "'Cannot allocate capital to '", "'%s because price is %s as of %s'", '%', '(', 'self', '.', 'name', ',', 'self', '.', '_price', ',', 'self', '.', 'parent', '.', 'now', ')', ')', '# buy/sell', '# determine quantity - must also factor in commission', '# closing out?', 'if', 'amount', '==', '-', 'self', '.', '_value', ':', 'q', '=', '-', 'self', '.', '_position', 'else', ':', 'q', '=', 'amount', '/', '(', 'self', '.', '_price', '*', 'self', '.', 'multiplier', ')', 'if', 'self', '.', 'integer_positions', ':', 'if', '(', 'self', '.', '_position', '>', '0', ')', 'or', '(', '(', 'self', '.', '_position', '==', '0', ')', 'and', '(', 'amount', '>', '0', ')', ')', ':', "# if we're going long or changing long position", 'q', '=', 'math', '.', 'floor', '(', 'q', ')', 'else', ':', "# if we're going short or changing short position", 'q', '=', 'math', '.', 'ceil', '(', 'q', ')', '# if q is 0 nothing to do', 'if', 'q', '==', '0', 'or', 'np', '.', 'isnan', '(', 'q', ')', ':', 'return', '# unless we are closing out a position (q == -position)', '# we want to ensure that', '#', '# - In the event of a positive amount, this indicates the maximum', '# amount a given security can use up for a purchase. Therefore, if', '# commissions push us above this amount, we cannot buy `q`, and must', '# decrease its value', '#', "# - In the event of a negative amount, we want to 'raise' at least the", '# amount indicated, no less. Therefore, if we have commission, we must', '# sell additional units to fund this requirement. As such, q must once', '# again decrease.', '#', 'if', 'not', 'q', '==', '-', 'self', '.', '_position', ':', 'full_outlay', ',', '_', ',', '_', '=', 'self', '.', 'outlay', '(', 'q', ')', '# if full outlay > amount, we must decrease the magnitude of `q`', '# this can potentially lead to an infinite loop if the commission', '# per share > price per share. However, we cannot really detect', '# that in advance since the function can be non-linear (say a fn', '# like max(1, abs(q) * 0.01). Nevertheless, we want to avoid these', '# situations.', '# cap the maximum number of iterations to 1e4 and raise exception', '# if we get there', "# if integer positions then we know we are stuck if q doesn't change", '# if integer positions is false then we want full_outlay == amount', '# if integer positions is true then we want to be at the q where', "# if we bought 1 more then we wouldn't have enough cash", 'i', '=', '0', 'last_q', '=', 'q', 'last_amount_short', '=', 'full_outlay', '-', 'amount', 'while', 'not', 'np', '.', 'isclose', '(', 'full_outlay', ',', 'amount', ',', 'rtol', '=', '0.', ')', 'and', 'q', '!=', '0', ':', 'dq_wout_considering_tx_costs', '=', '(', 'full_outlay', '-', 'amount', ')', '/', '(', 'self', '.', '_price', '*', 'self', '.', 'multiplier', ')', 'q', '=', 'q', '-', 'dq_wout_considering_tx_costs', 'if', 'self', '.', 'integer_positions', ':', 'q', '=', 'math', '.', 'floor', '(', 'q', ')', 'full_outlay', ',', '_', ',', '_', '=', 'self', '.', 'outlay', '(', 'q', ')', '# if our q is too low and we have integer positions', '# then we know that the correct quantity is the one where', '# the outlay of q + 1 < amount. i.e. if we bought one more', "# position then we wouldn't have enough cash", 'if', 'self', '.', 'integer_positions', ':', 'full_outlay_of_1_more', ',', '_', ',', '_', '=', 'self', '.', 'outlay', '(', 'q', '+', '1', ')', 'if', 'full_outlay', '<', 'amount', 'and', 'full_outlay_of_1_more', '>', 'amount', ':', 'break', '# if not integer positions then we should keep going until', '# full_outlay == amount or is close enough', 'i', '=', 'i', '+', '1', 'if', 'i', '>', '1e4', ':', 'raise', 'Exception', '(', "'Potentially infinite loop detected. This occurred '", "'while trying to reduce the amount of shares purchased'", "' to respect the outlay <= amount rule. This is most '", "'likely due to a commission function that outputs a '", "'commission that is greater than the amount of cash '", "'a short sale can raise.'", ')', 'if', 'self', '.', 'integer_positions', 'and', 'last_q', '==', 'q', ':', 'raise', 'Exception', '(', "'Newton Method like root search for quantity is stuck!'", "' q did not change in iterations so it is probably a bug'", "' but we are not entirely sure it is wrong! Consider '", "' changing to warning.'", ')', 'last_q', '=', 'q', 'if', 'np', '.', 'abs', '(', 'full_outlay', '-', 'amount', ')', '>', 'np', '.', 'abs', '(', 'last_amount_short', ')', ':', 'raise', 'Exception', '(', "'The difference between what we have raised with q and'", "' the amount we are trying to raise has gotten bigger since'", "' last iteration! full_outlay should always be approaching'", "' amount! There may be a case where the commission fn is'", "' not smooth'", ')', 'last_amount_short', '=', 'full_outlay', '-', 'amount', '# if last step led to q == 0, then we can return just like above', 'if', 'q', '==', '0', ':', 'return', '# this security will need an update, even if pos is 0 (for example if', '# we close the positions, value and pos is 0, but still need to do that', '# last update)', 'self', '.', '_needupdate', '=', 'True', '# adjust position & value', 'self', '.', '_position', '+=', 'q', '# calculate proper adjustment for parent', '# parent passed down amount so we want to pass', '# -outlay back up to parent to adjust for capital', '# used', 'full_outlay', ',', 'outlay', ',', 'fee', '=', 'self', '.', 'outlay', '(', 'q', ')', '# store outlay for future reference', 'self', '.', '_outlay', '+=', 'outlay', '# call parent', 'self', '.', 'parent', '.', 'adjust', '(', '-', 'full_outlay', ',', 'update', '=', 'update', ',', 'flow', '=', 'False', ',', 'fee', '=', 'fee', ')']
This allocates capital to the Security. This is the method used to buy/sell the security. A given amount of shares will be determined on the current price, a commission will be calculated based on the parent's commission fn, and any remaining capital will be passed back up to parent as an adjustment. Args: * amount (float): Amount of adjustment. * update (bool): Force update?
['This', 'allocates', 'capital', 'to', 'the', 'Security', '.', 'This', 'is', 'the', 'method', 'used', 'to', 'buy', '/', 'sell', 'the', 'security', '.']
train
https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L987-L1156
5,479
hydraplatform/hydra-base
hydra_base/lib/project.py
get_project_by_network_id
def get_project_by_network_id(network_id,**kwargs): """ get a project complexmodel by a network_id """ user_id = kwargs.get('user_id') projects_i = db.DBSession.query(Project).join(ProjectOwner).join(Network, Project.id==Network.project_id).filter( Network.id==network_id, ProjectOwner.user_id==user_id).order_by('name').all() ret_project = None for project_i in projects_i: try: project_i.check_read_permission(user_id) ret_project = project_i except: log.info("Can't return project %s. User %s does not have permission to read it.", project_i.id, user_id) return ret_project
python
def get_project_by_network_id(network_id,**kwargs): """ get a project complexmodel by a network_id """ user_id = kwargs.get('user_id') projects_i = db.DBSession.query(Project).join(ProjectOwner).join(Network, Project.id==Network.project_id).filter( Network.id==network_id, ProjectOwner.user_id==user_id).order_by('name').all() ret_project = None for project_i in projects_i: try: project_i.check_read_permission(user_id) ret_project = project_i except: log.info("Can't return project %s. User %s does not have permission to read it.", project_i.id, user_id) return ret_project
['def', 'get_project_by_network_id', '(', 'network_id', ',', '*', '*', 'kwargs', ')', ':', 'user_id', '=', 'kwargs', '.', 'get', '(', "'user_id'", ')', 'projects_i', '=', 'db', '.', 'DBSession', '.', 'query', '(', 'Project', ')', '.', 'join', '(', 'ProjectOwner', ')', '.', 'join', '(', 'Network', ',', 'Project', '.', 'id', '==', 'Network', '.', 'project_id', ')', '.', 'filter', '(', 'Network', '.', 'id', '==', 'network_id', ',', 'ProjectOwner', '.', 'user_id', '==', 'user_id', ')', '.', 'order_by', '(', "'name'", ')', '.', 'all', '(', ')', 'ret_project', '=', 'None', 'for', 'project_i', 'in', 'projects_i', ':', 'try', ':', 'project_i', '.', 'check_read_permission', '(', 'user_id', ')', 'ret_project', '=', 'project_i', 'except', ':', 'log', '.', 'info', '(', '"Can\'t return project %s. User %s does not have permission to read it."', ',', 'project_i', '.', 'id', ',', 'user_id', ')', 'return', 'ret_project']
get a project complexmodel by a network_id
['get', 'a', 'project', 'complexmodel', 'by', 'a', 'network_id']
train
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/project.py#L149-L166
5,480
bpsmith/tia
tia/rlab/builder.py
PdfBuilder.new_title_bar
def new_title_bar(self, title, color=None): """Return an array of Pdf Objects which constitute a Header""" # Build a title bar for top of page w, t, c = '100%', 2, color or HexColor('#404040') title = '<b>{0}</b>'.format(title) if 'TitleBar' not in self.stylesheet: tb = ParagraphStyle('TitleBar', parent=self.stylesheet['Normal'], fontName='Helvetica-Bold', fontSize=10, leading=10, alignment=TA_CENTER) self.stylesheet.add(tb) return [HRFlowable(width=w, thickness=t, color=c, spaceAfter=2, vAlign='MIDDLE', lineCap='square'), self.new_paragraph(title, 'TitleBar'), HRFlowable(width=w, thickness=t, color=c, spaceBefore=2, vAlign='MIDDLE', lineCap='square')]
python
def new_title_bar(self, title, color=None): """Return an array of Pdf Objects which constitute a Header""" # Build a title bar for top of page w, t, c = '100%', 2, color or HexColor('#404040') title = '<b>{0}</b>'.format(title) if 'TitleBar' not in self.stylesheet: tb = ParagraphStyle('TitleBar', parent=self.stylesheet['Normal'], fontName='Helvetica-Bold', fontSize=10, leading=10, alignment=TA_CENTER) self.stylesheet.add(tb) return [HRFlowable(width=w, thickness=t, color=c, spaceAfter=2, vAlign='MIDDLE', lineCap='square'), self.new_paragraph(title, 'TitleBar'), HRFlowable(width=w, thickness=t, color=c, spaceBefore=2, vAlign='MIDDLE', lineCap='square')]
['def', 'new_title_bar', '(', 'self', ',', 'title', ',', 'color', '=', 'None', ')', ':', '# Build a title bar for top of page', 'w', ',', 't', ',', 'c', '=', "'100%'", ',', '2', ',', 'color', 'or', 'HexColor', '(', "'#404040'", ')', 'title', '=', "'<b>{0}</b>'", '.', 'format', '(', 'title', ')', 'if', "'TitleBar'", 'not', 'in', 'self', '.', 'stylesheet', ':', 'tb', '=', 'ParagraphStyle', '(', "'TitleBar'", ',', 'parent', '=', 'self', '.', 'stylesheet', '[', "'Normal'", ']', ',', 'fontName', '=', "'Helvetica-Bold'", ',', 'fontSize', '=', '10', ',', 'leading', '=', '10', ',', 'alignment', '=', 'TA_CENTER', ')', 'self', '.', 'stylesheet', '.', 'add', '(', 'tb', ')', 'return', '[', 'HRFlowable', '(', 'width', '=', 'w', ',', 'thickness', '=', 't', ',', 'color', '=', 'c', ',', 'spaceAfter', '=', '2', ',', 'vAlign', '=', "'MIDDLE'", ',', 'lineCap', '=', "'square'", ')', ',', 'self', '.', 'new_paragraph', '(', 'title', ',', "'TitleBar'", ')', ',', 'HRFlowable', '(', 'width', '=', 'w', ',', 'thickness', '=', 't', ',', 'color', '=', 'c', ',', 'spaceBefore', '=', '2', ',', 'vAlign', '=', "'MIDDLE'", ',', 'lineCap', '=', "'square'", ')', ']']
Return an array of Pdf Objects which constitute a Header
['Return', 'an', 'array', 'of', 'Pdf', 'Objects', 'which', 'constitute', 'a', 'Header']
train
https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/builder.py#L174-L185
5,481
Yelp/detect-secrets
detect_secrets/core/potential_secret.py
PotentialSecret.json
def json(self): """Custom JSON encoder""" attributes = { 'type': self.type, 'filename': self.filename, 'line_number': self.lineno, 'hashed_secret': self.secret_hash, } if self.is_secret is not None: attributes['is_secret'] = self.is_secret return attributes
python
def json(self): """Custom JSON encoder""" attributes = { 'type': self.type, 'filename': self.filename, 'line_number': self.lineno, 'hashed_secret': self.secret_hash, } if self.is_secret is not None: attributes['is_secret'] = self.is_secret return attributes
['def', 'json', '(', 'self', ')', ':', 'attributes', '=', '{', "'type'", ':', 'self', '.', 'type', ',', "'filename'", ':', 'self', '.', 'filename', ',', "'line_number'", ':', 'self', '.', 'lineno', ',', "'hashed_secret'", ':', 'self', '.', 'secret_hash', ',', '}', 'if', 'self', '.', 'is_secret', 'is', 'not', 'None', ':', 'attributes', '[', "'is_secret'", ']', '=', 'self', '.', 'is_secret', 'return', 'attributes']
Custom JSON encoder
['Custom', 'JSON', 'encoder']
train
https://github.com/Yelp/detect-secrets/blob/473923ea71f1ac2b5ea1eacc49b98f97967e3d05/detect_secrets/core/potential_secret.py#L65-L77
5,482
pvlib/pvlib-python
pvlib/iotools/surfrad.py
read_surfrad
def read_surfrad(filename, map_variables=True): """Read in a daily NOAA SURFRAD[1] file. Parameters ---------- filename: str Filepath or url. map_variables: bool When true, renames columns of the Dataframe to pvlib variable names where applicable. See variable SURFRAD_COLUMNS. Returns ------- Tuple of the form (data, metadata). data: Dataframe Dataframe with the fields found below. metadata: dict Site metadata included in the file. Notes ----- Metadata dictionary includes the following fields: =============== ====== =============== Key Format Description =============== ====== =============== station String site name latitude Float site latitude longitude Float site longitude elevation Int site elevation surfrad_version Int surfrad version tz String Timezone (UTC) =============== ====== =============== Dataframe includes the following fields: ======================= ====== ========================================== raw, mapped Format Description ======================= ====== ========================================== **Mapped field names are returned when the map_variables argument is True** --------------------------------------------------------------------------- year int year as 4 digit int jday int day of year 1-365(or 366) month int month (1-12) day int day of month(1-31) hour int hour (0-23) minute int minute (0-59) dt float decimal time i.e. 23.5 = 2330 zen, solar_zenith float solar zenith angle (deg) **Fields below have associated qc flags labeled <field>_flag.** --------------------------------------------------------------------------- dw_solar, ghi float downwelling global solar(W/m^2) uw_solar float updownwelling global solar(W/m^2) direct_n, dni float direct normal solar (W/m^2) diffuse, dhi float downwelling diffuse solar (W/m^2) dw_ir float downwelling thermal infrared (W/m^2) dw_casetemp float downwelling IR case temp (K) dw_dometemp float downwelling IR dome temp (K) uw_ir float upwelling thermal infrared (W/m^2) uw_casetemp float upwelling IR case temp (K) uw_dometemp float upwelling IR case temp (K) uvb float global uvb (miliWatts/m^2) par float photosynthetically active radiation(W/m^2) netsolar float net solar (dw_solar - uw_solar) (W/m^2) netir float net infrared (dw_ir - uw_ir) (W/m^2) totalnet float net radiation (netsolar+netir) (W/m^2) temp, temp_air float 10-meter air temperature (?C) rh, relative_humidity float relative humidity (%) windspd, wind_speed float wind speed (m/s) winddir, wind_direction float wind direction (deg, clockwise from north) pressure float station pressure (mb) ======================= ====== ========================================== See README files located in the station directories in the SURFRAD data archives[2] for details on SURFRAD daily data files. References ---------- [1] NOAA Earth System Research Laboratory Surface Radiation Budget Network `SURFRAD Homepage <https://www.esrl.noaa.gov/gmd/grad/surfrad/>`_ [2] NOAA SURFRAD Data Archive `SURFRAD Archive <ftp://aftp.cmdl.noaa.gov/data/radiation/surfrad/>`_ """ if filename.startswith('ftp'): req = Request(filename) response = urlopen(req) file_buffer = io.StringIO(response.read().decode(errors='ignore')) else: file_buffer = open(filename, 'r') # Read and parse the first two lines to build the metadata dict. station = file_buffer.readline() file_metadata = file_buffer.readline() metadata_list = file_metadata.split() metadata = {} metadata['name'] = station.strip() metadata['latitude'] = float(metadata_list[0]) metadata['longitude'] = float(metadata_list[1]) metadata['elevation'] = float(metadata_list[2]) metadata['surfrad_version'] = int(metadata_list[-1]) metadata['tz'] = 'UTC' data = pd.read_csv(file_buffer, delim_whitespace=True, header=None, names=SURFRAD_COLUMNS) file_buffer.close() data = format_index(data) missing = data == -9999.9 data = data.where(~missing, np.NaN) if map_variables: data.rename(columns=VARIABLE_MAP, inplace=True) return data, metadata
python
def read_surfrad(filename, map_variables=True): """Read in a daily NOAA SURFRAD[1] file. Parameters ---------- filename: str Filepath or url. map_variables: bool When true, renames columns of the Dataframe to pvlib variable names where applicable. See variable SURFRAD_COLUMNS. Returns ------- Tuple of the form (data, metadata). data: Dataframe Dataframe with the fields found below. metadata: dict Site metadata included in the file. Notes ----- Metadata dictionary includes the following fields: =============== ====== =============== Key Format Description =============== ====== =============== station String site name latitude Float site latitude longitude Float site longitude elevation Int site elevation surfrad_version Int surfrad version tz String Timezone (UTC) =============== ====== =============== Dataframe includes the following fields: ======================= ====== ========================================== raw, mapped Format Description ======================= ====== ========================================== **Mapped field names are returned when the map_variables argument is True** --------------------------------------------------------------------------- year int year as 4 digit int jday int day of year 1-365(or 366) month int month (1-12) day int day of month(1-31) hour int hour (0-23) minute int minute (0-59) dt float decimal time i.e. 23.5 = 2330 zen, solar_zenith float solar zenith angle (deg) **Fields below have associated qc flags labeled <field>_flag.** --------------------------------------------------------------------------- dw_solar, ghi float downwelling global solar(W/m^2) uw_solar float updownwelling global solar(W/m^2) direct_n, dni float direct normal solar (W/m^2) diffuse, dhi float downwelling diffuse solar (W/m^2) dw_ir float downwelling thermal infrared (W/m^2) dw_casetemp float downwelling IR case temp (K) dw_dometemp float downwelling IR dome temp (K) uw_ir float upwelling thermal infrared (W/m^2) uw_casetemp float upwelling IR case temp (K) uw_dometemp float upwelling IR case temp (K) uvb float global uvb (miliWatts/m^2) par float photosynthetically active radiation(W/m^2) netsolar float net solar (dw_solar - uw_solar) (W/m^2) netir float net infrared (dw_ir - uw_ir) (W/m^2) totalnet float net radiation (netsolar+netir) (W/m^2) temp, temp_air float 10-meter air temperature (?C) rh, relative_humidity float relative humidity (%) windspd, wind_speed float wind speed (m/s) winddir, wind_direction float wind direction (deg, clockwise from north) pressure float station pressure (mb) ======================= ====== ========================================== See README files located in the station directories in the SURFRAD data archives[2] for details on SURFRAD daily data files. References ---------- [1] NOAA Earth System Research Laboratory Surface Radiation Budget Network `SURFRAD Homepage <https://www.esrl.noaa.gov/gmd/grad/surfrad/>`_ [2] NOAA SURFRAD Data Archive `SURFRAD Archive <ftp://aftp.cmdl.noaa.gov/data/radiation/surfrad/>`_ """ if filename.startswith('ftp'): req = Request(filename) response = urlopen(req) file_buffer = io.StringIO(response.read().decode(errors='ignore')) else: file_buffer = open(filename, 'r') # Read and parse the first two lines to build the metadata dict. station = file_buffer.readline() file_metadata = file_buffer.readline() metadata_list = file_metadata.split() metadata = {} metadata['name'] = station.strip() metadata['latitude'] = float(metadata_list[0]) metadata['longitude'] = float(metadata_list[1]) metadata['elevation'] = float(metadata_list[2]) metadata['surfrad_version'] = int(metadata_list[-1]) metadata['tz'] = 'UTC' data = pd.read_csv(file_buffer, delim_whitespace=True, header=None, names=SURFRAD_COLUMNS) file_buffer.close() data = format_index(data) missing = data == -9999.9 data = data.where(~missing, np.NaN) if map_variables: data.rename(columns=VARIABLE_MAP, inplace=True) return data, metadata
['def', 'read_surfrad', '(', 'filename', ',', 'map_variables', '=', 'True', ')', ':', 'if', 'filename', '.', 'startswith', '(', "'ftp'", ')', ':', 'req', '=', 'Request', '(', 'filename', ')', 'response', '=', 'urlopen', '(', 'req', ')', 'file_buffer', '=', 'io', '.', 'StringIO', '(', 'response', '.', 'read', '(', ')', '.', 'decode', '(', 'errors', '=', "'ignore'", ')', ')', 'else', ':', 'file_buffer', '=', 'open', '(', 'filename', ',', "'r'", ')', '# Read and parse the first two lines to build the metadata dict.', 'station', '=', 'file_buffer', '.', 'readline', '(', ')', 'file_metadata', '=', 'file_buffer', '.', 'readline', '(', ')', 'metadata_list', '=', 'file_metadata', '.', 'split', '(', ')', 'metadata', '=', '{', '}', 'metadata', '[', "'name'", ']', '=', 'station', '.', 'strip', '(', ')', 'metadata', '[', "'latitude'", ']', '=', 'float', '(', 'metadata_list', '[', '0', ']', ')', 'metadata', '[', "'longitude'", ']', '=', 'float', '(', 'metadata_list', '[', '1', ']', ')', 'metadata', '[', "'elevation'", ']', '=', 'float', '(', 'metadata_list', '[', '2', ']', ')', 'metadata', '[', "'surfrad_version'", ']', '=', 'int', '(', 'metadata_list', '[', '-', '1', ']', ')', 'metadata', '[', "'tz'", ']', '=', "'UTC'", 'data', '=', 'pd', '.', 'read_csv', '(', 'file_buffer', ',', 'delim_whitespace', '=', 'True', ',', 'header', '=', 'None', ',', 'names', '=', 'SURFRAD_COLUMNS', ')', 'file_buffer', '.', 'close', '(', ')', 'data', '=', 'format_index', '(', 'data', ')', 'missing', '=', 'data', '==', '-', '9999.9', 'data', '=', 'data', '.', 'where', '(', '~', 'missing', ',', 'np', '.', 'NaN', ')', 'if', 'map_variables', ':', 'data', '.', 'rename', '(', 'columns', '=', 'VARIABLE_MAP', ',', 'inplace', '=', 'True', ')', 'return', 'data', ',', 'metadata']
Read in a daily NOAA SURFRAD[1] file. Parameters ---------- filename: str Filepath or url. map_variables: bool When true, renames columns of the Dataframe to pvlib variable names where applicable. See variable SURFRAD_COLUMNS. Returns ------- Tuple of the form (data, metadata). data: Dataframe Dataframe with the fields found below. metadata: dict Site metadata included in the file. Notes ----- Metadata dictionary includes the following fields: =============== ====== =============== Key Format Description =============== ====== =============== station String site name latitude Float site latitude longitude Float site longitude elevation Int site elevation surfrad_version Int surfrad version tz String Timezone (UTC) =============== ====== =============== Dataframe includes the following fields: ======================= ====== ========================================== raw, mapped Format Description ======================= ====== ========================================== **Mapped field names are returned when the map_variables argument is True** --------------------------------------------------------------------------- year int year as 4 digit int jday int day of year 1-365(or 366) month int month (1-12) day int day of month(1-31) hour int hour (0-23) minute int minute (0-59) dt float decimal time i.e. 23.5 = 2330 zen, solar_zenith float solar zenith angle (deg) **Fields below have associated qc flags labeled <field>_flag.** --------------------------------------------------------------------------- dw_solar, ghi float downwelling global solar(W/m^2) uw_solar float updownwelling global solar(W/m^2) direct_n, dni float direct normal solar (W/m^2) diffuse, dhi float downwelling diffuse solar (W/m^2) dw_ir float downwelling thermal infrared (W/m^2) dw_casetemp float downwelling IR case temp (K) dw_dometemp float downwelling IR dome temp (K) uw_ir float upwelling thermal infrared (W/m^2) uw_casetemp float upwelling IR case temp (K) uw_dometemp float upwelling IR case temp (K) uvb float global uvb (miliWatts/m^2) par float photosynthetically active radiation(W/m^2) netsolar float net solar (dw_solar - uw_solar) (W/m^2) netir float net infrared (dw_ir - uw_ir) (W/m^2) totalnet float net radiation (netsolar+netir) (W/m^2) temp, temp_air float 10-meter air temperature (?C) rh, relative_humidity float relative humidity (%) windspd, wind_speed float wind speed (m/s) winddir, wind_direction float wind direction (deg, clockwise from north) pressure float station pressure (mb) ======================= ====== ========================================== See README files located in the station directories in the SURFRAD data archives[2] for details on SURFRAD daily data files. References ---------- [1] NOAA Earth System Research Laboratory Surface Radiation Budget Network `SURFRAD Homepage <https://www.esrl.noaa.gov/gmd/grad/surfrad/>`_ [2] NOAA SURFRAD Data Archive `SURFRAD Archive <ftp://aftp.cmdl.noaa.gov/data/radiation/surfrad/>`_
['Read', 'in', 'a', 'daily', 'NOAA', 'SURFRAD', '[', '1', ']', 'file', '.']
train
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/iotools/surfrad.py#L46-L160
5,483
DataDog/integrations-core
hdfs_namenode/datadog_checks/hdfs_namenode/hdfs_namenode.py
HDFSNameNode._set_metric
def _set_metric(self, metric_name, metric_type, value, tags=None): """ Set a metric """ if metric_type == self.GAUGE: self.gauge(metric_name, value, tags=tags) else: self.log.error('Metric type "{}" unknown'.format(metric_type))
python
def _set_metric(self, metric_name, metric_type, value, tags=None): """ Set a metric """ if metric_type == self.GAUGE: self.gauge(metric_name, value, tags=tags) else: self.log.error('Metric type "{}" unknown'.format(metric_type))
['def', '_set_metric', '(', 'self', ',', 'metric_name', ',', 'metric_type', ',', 'value', ',', 'tags', '=', 'None', ')', ':', 'if', 'metric_type', '==', 'self', '.', 'GAUGE', ':', 'self', '.', 'gauge', '(', 'metric_name', ',', 'value', ',', 'tags', '=', 'tags', ')', 'else', ':', 'self', '.', 'log', '.', 'error', '(', '\'Metric type "{}" unknown\'', '.', 'format', '(', 'metric_type', ')', ')']
Set a metric
['Set', 'a', 'metric']
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/hdfs_namenode/datadog_checks/hdfs_namenode/hdfs_namenode.py#L131-L138
5,484
nwilming/ocupy
ocupy/datamat.py
Datamat.parameter_to_field
def parameter_to_field(self, name): """ Promotes a parameter to a field by creating a new array of same size as the other existing fields, filling it with the current value of the parameter, and then removing that parameter. """ if name not in self._parameters: raise ValueError("no '%s' parameter found" % (name)) if self._fields.count(name) > 0: raise ValueError("field with name '%s' already exists" % (name)) data = np.array([self._parameters[name]]*self._num_fix) self.rm_parameter(name) self.add_field(name, data)
python
def parameter_to_field(self, name): """ Promotes a parameter to a field by creating a new array of same size as the other existing fields, filling it with the current value of the parameter, and then removing that parameter. """ if name not in self._parameters: raise ValueError("no '%s' parameter found" % (name)) if self._fields.count(name) > 0: raise ValueError("field with name '%s' already exists" % (name)) data = np.array([self._parameters[name]]*self._num_fix) self.rm_parameter(name) self.add_field(name, data)
['def', 'parameter_to_field', '(', 'self', ',', 'name', ')', ':', 'if', 'name', 'not', 'in', 'self', '.', '_parameters', ':', 'raise', 'ValueError', '(', '"no \'%s\' parameter found"', '%', '(', 'name', ')', ')', 'if', 'self', '.', '_fields', '.', 'count', '(', 'name', ')', '>', '0', ':', 'raise', 'ValueError', '(', '"field with name \'%s\' already exists"', '%', '(', 'name', ')', ')', 'data', '=', 'np', '.', 'array', '(', '[', 'self', '.', '_parameters', '[', 'name', ']', ']', '*', 'self', '.', '_num_fix', ')', 'self', '.', 'rm_parameter', '(', 'name', ')', 'self', '.', 'add_field', '(', 'name', ',', 'data', ')']
Promotes a parameter to a field by creating a new array of same size as the other existing fields, filling it with the current value of the parameter, and then removing that parameter.
['Promotes', 'a', 'parameter', 'to', 'a', 'field', 'by', 'creating', 'a', 'new', 'array', 'of', 'same', 'size', 'as', 'the', 'other', 'existing', 'fields', 'filling', 'it', 'with', 'the', 'current', 'value', 'of', 'the', 'parameter', 'and', 'then', 'removing', 'that', 'parameter', '.']
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L450-L464
5,485
boriel/zxbasic
asm.py
AsmInstruction.bytes
def bytes(self): """ Returns a t-uple with instruction bytes (integers) """ result = [] op = self.opcode.split(' ') argi = 0 while op: q = op.pop(0) if q == 'XX': for k in range(self.argbytes[argi] - 1): op.pop(0) result.extend(num2bytes(self.argval()[argi], self.argbytes[argi])) argi += 1 else: result.append(int(q, 16)) # Add opcode if len(result) != self.size: raise InternalMismatchSizeError(len(result), self) return result
python
def bytes(self): """ Returns a t-uple with instruction bytes (integers) """ result = [] op = self.opcode.split(' ') argi = 0 while op: q = op.pop(0) if q == 'XX': for k in range(self.argbytes[argi] - 1): op.pop(0) result.extend(num2bytes(self.argval()[argi], self.argbytes[argi])) argi += 1 else: result.append(int(q, 16)) # Add opcode if len(result) != self.size: raise InternalMismatchSizeError(len(result), self) return result
['def', 'bytes', '(', 'self', ')', ':', 'result', '=', '[', ']', 'op', '=', 'self', '.', 'opcode', '.', 'split', '(', "' '", ')', 'argi', '=', '0', 'while', 'op', ':', 'q', '=', 'op', '.', 'pop', '(', '0', ')', 'if', 'q', '==', "'XX'", ':', 'for', 'k', 'in', 'range', '(', 'self', '.', 'argbytes', '[', 'argi', ']', '-', '1', ')', ':', 'op', '.', 'pop', '(', '0', ')', 'result', '.', 'extend', '(', 'num2bytes', '(', 'self', '.', 'argval', '(', ')', '[', 'argi', ']', ',', 'self', '.', 'argbytes', '[', 'argi', ']', ')', ')', 'argi', '+=', '1', 'else', ':', 'result', '.', 'append', '(', 'int', '(', 'q', ',', '16', ')', ')', '# Add opcode', 'if', 'len', '(', 'result', ')', '!=', 'self', '.', 'size', ':', 'raise', 'InternalMismatchSizeError', '(', 'len', '(', 'result', ')', ',', 'self', ')', 'return', 'result']
Returns a t-uple with instruction bytes (integers)
['Returns', 'a', 't', '-', 'uple', 'with', 'instruction', 'bytes', '(', 'integers', ')']
train
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asm.py#L128-L150
5,486
yyuu/botornado
boto/ec2/elb/__init__.py
ELBConnection.attach_lb_to_subnets
def attach_lb_to_subnets(self, name, subnets): """ Attaches load balancer to one or more subnets. Attaching subnets that are already registered with the Load Balancer has no effect. :type name: string :param name: The name of the Load Balancer :type subnets: List of strings :param subnets: The name of the subnet(s) to add. :rtype: List of strings :return: An updated list of subnets for this Load Balancer. """ params = {'LoadBalancerName' : name} self.build_list_params(params, subnets, 'Subnets.member.%d') return self.get_list('AttachLoadBalancerToSubnets', params, None)
python
def attach_lb_to_subnets(self, name, subnets): """ Attaches load balancer to one or more subnets. Attaching subnets that are already registered with the Load Balancer has no effect. :type name: string :param name: The name of the Load Balancer :type subnets: List of strings :param subnets: The name of the subnet(s) to add. :rtype: List of strings :return: An updated list of subnets for this Load Balancer. """ params = {'LoadBalancerName' : name} self.build_list_params(params, subnets, 'Subnets.member.%d') return self.get_list('AttachLoadBalancerToSubnets', params, None)
['def', 'attach_lb_to_subnets', '(', 'self', ',', 'name', ',', 'subnets', ')', ':', 'params', '=', '{', "'LoadBalancerName'", ':', 'name', '}', 'self', '.', 'build_list_params', '(', 'params', ',', 'subnets', ',', "'Subnets.member.%d'", ')', 'return', 'self', '.', 'get_list', '(', "'AttachLoadBalancerToSubnets'", ',', 'params', ',', 'None', ')']
Attaches load balancer to one or more subnets. Attaching subnets that are already registered with the Load Balancer has no effect. :type name: string :param name: The name of the Load Balancer :type subnets: List of strings :param subnets: The name of the subnet(s) to add. :rtype: List of strings :return: An updated list of subnets for this Load Balancer.
['Attaches', 'load', 'balancer', 'to', 'one', 'or', 'more', 'subnets', '.', 'Attaching', 'subnets', 'that', 'are', 'already', 'registered', 'with', 'the', 'Load', 'Balancer', 'has', 'no', 'effect', '.']
train
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/elb/__init__.py#L484-L505
5,487
scrapinghub/flatson
flatson/flatson.py
Flatson.flatten_dict
def flatten_dict(self, obj): """Return an OrderedDict dict preserving order of keys in fieldnames """ return OrderedDict(zip(self.fieldnames, self.flatten(obj)))
python
def flatten_dict(self, obj): """Return an OrderedDict dict preserving order of keys in fieldnames """ return OrderedDict(zip(self.fieldnames, self.flatten(obj)))
['def', 'flatten_dict', '(', 'self', ',', 'obj', ')', ':', 'return', 'OrderedDict', '(', 'zip', '(', 'self', '.', 'fieldnames', ',', 'self', '.', 'flatten', '(', 'obj', ')', ')', ')']
Return an OrderedDict dict preserving order of keys in fieldnames
['Return', 'an', 'OrderedDict', 'dict', 'preserving', 'order', 'of', 'keys', 'in', 'fieldnames']
train
https://github.com/scrapinghub/flatson/blob/dcbcea32ad6d4df1df85fff8366bce40438d469a/flatson/flatson.py#L136-L139
5,488
mdiener/grace
grace/py27/pyjsdoc.py
make_index
def make_index(css_class, entities): """ Generate the HTML index (a short description and a link to the full documentation) for a list of FunctionDocs or ClassDocs. """ def make_entry(entity): return ('<dt><a href = "%(url)s">%(name)s</a></dt>\n' + '<dd>%(doc)s</dd>') % { 'name': entity.name, 'url': entity.url, 'doc': first_sentence(entity.doc) } entry_text = '\n'.join(make_entry(val) for val in entities) if entry_text: return '<dl class = "%s">\n%s\n</dl>' % (css_class, entry_text) else: return ''
python
def make_index(css_class, entities): """ Generate the HTML index (a short description and a link to the full documentation) for a list of FunctionDocs or ClassDocs. """ def make_entry(entity): return ('<dt><a href = "%(url)s">%(name)s</a></dt>\n' + '<dd>%(doc)s</dd>') % { 'name': entity.name, 'url': entity.url, 'doc': first_sentence(entity.doc) } entry_text = '\n'.join(make_entry(val) for val in entities) if entry_text: return '<dl class = "%s">\n%s\n</dl>' % (css_class, entry_text) else: return ''
['def', 'make_index', '(', 'css_class', ',', 'entities', ')', ':', 'def', 'make_entry', '(', 'entity', ')', ':', 'return', '(', '\'<dt><a href = "%(url)s">%(name)s</a></dt>\\n\'', '+', "'<dd>%(doc)s</dd>'", ')', '%', '{', "'name'", ':', 'entity', '.', 'name', ',', "'url'", ':', 'entity', '.', 'url', ',', "'doc'", ':', 'first_sentence', '(', 'entity', '.', 'doc', ')', '}', 'entry_text', '=', "'\\n'", '.', 'join', '(', 'make_entry', '(', 'val', ')', 'for', 'val', 'in', 'entities', ')', 'if', 'entry_text', ':', 'return', '\'<dl class = "%s">\\n%s\\n</dl>\'', '%', '(', 'css_class', ',', 'entry_text', ')', 'else', ':', 'return', "''"]
Generate the HTML index (a short description and a link to the full documentation) for a list of FunctionDocs or ClassDocs.
['Generate', 'the', 'HTML', 'index', '(', 'a', 'short', 'description', 'and', 'a', 'link', 'to', 'the', 'full', 'documentation', ')', 'for', 'a', 'list', 'of', 'FunctionDocs', 'or', 'ClassDocs', '.']
train
https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/pyjsdoc.py#L1442-L1458
5,489
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject.remove_overlaps
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)): """Remove overlaps in UFOs' glyphs' contours.""" from booleanOperations import union, BooleanOperationsError for ufo in ufos: font_name = self._font_name(ufo) logger.info("Removing overlaps for " + font_name) for glyph in ufo: if not glyph_filter(glyph): continue contours = list(glyph) glyph.clearContours() try: union(contours, glyph.getPointPen()) except BooleanOperationsError: logger.error( "Failed to remove overlaps for %s: %r", font_name, glyph.name ) raise
python
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)): """Remove overlaps in UFOs' glyphs' contours.""" from booleanOperations import union, BooleanOperationsError for ufo in ufos: font_name = self._font_name(ufo) logger.info("Removing overlaps for " + font_name) for glyph in ufo: if not glyph_filter(glyph): continue contours = list(glyph) glyph.clearContours() try: union(contours, glyph.getPointPen()) except BooleanOperationsError: logger.error( "Failed to remove overlaps for %s: %r", font_name, glyph.name ) raise
['def', 'remove_overlaps', '(', 'self', ',', 'ufos', ',', 'glyph_filter', '=', 'lambda', 'g', ':', 'len', '(', 'g', ')', ')', ':', 'from', 'booleanOperations', 'import', 'union', ',', 'BooleanOperationsError', 'for', 'ufo', 'in', 'ufos', ':', 'font_name', '=', 'self', '.', '_font_name', '(', 'ufo', ')', 'logger', '.', 'info', '(', '"Removing overlaps for "', '+', 'font_name', ')', 'for', 'glyph', 'in', 'ufo', ':', 'if', 'not', 'glyph_filter', '(', 'glyph', ')', ':', 'continue', 'contours', '=', 'list', '(', 'glyph', ')', 'glyph', '.', 'clearContours', '(', ')', 'try', ':', 'union', '(', 'contours', ',', 'glyph', '.', 'getPointPen', '(', ')', ')', 'except', 'BooleanOperationsError', ':', 'logger', '.', 'error', '(', '"Failed to remove overlaps for %s: %r"', ',', 'font_name', ',', 'glyph', '.', 'name', ')', 'raise']
Remove overlaps in UFOs' glyphs' contours.
['Remove', 'overlaps', 'in', 'UFOs', 'glyphs', 'contours', '.']
train
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L187-L205
5,490
nion-software/nionswift-instrumentation-kit
nion/instrumentation/camera_base.py
CameraSettings.set_frame_parameters
def set_frame_parameters(self, profile_index: int, frame_parameters) -> None: """Set the frame parameters with the settings index and fire the frame parameters changed event. If the settings index matches the current settings index, call set current frame parameters. If the settings index matches the record settings index, call set record frame parameters. """ self.frame_parameters_changed_event.fire(profile_index, frame_parameters)
python
def set_frame_parameters(self, profile_index: int, frame_parameters) -> None: """Set the frame parameters with the settings index and fire the frame parameters changed event. If the settings index matches the current settings index, call set current frame parameters. If the settings index matches the record settings index, call set record frame parameters. """ self.frame_parameters_changed_event.fire(profile_index, frame_parameters)
['def', 'set_frame_parameters', '(', 'self', ',', 'profile_index', ':', 'int', ',', 'frame_parameters', ')', '->', 'None', ':', 'self', '.', 'frame_parameters_changed_event', '.', 'fire', '(', 'profile_index', ',', 'frame_parameters', ')']
Set the frame parameters with the settings index and fire the frame parameters changed event. If the settings index matches the current settings index, call set current frame parameters. If the settings index matches the record settings index, call set record frame parameters.
['Set', 'the', 'frame', 'parameters', 'with', 'the', 'settings', 'index', 'and', 'fire', 'the', 'frame', 'parameters', 'changed', 'event', '.']
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nion/instrumentation/camera_base.py#L504-L511
5,491
log2timeline/dfvfs
dfvfs/lib/data_format.py
DataFormat._ReadStructure
def _ReadStructure( self, file_object, file_offset, data_size, data_type_map, description): """Reads a structure. Args: file_object (FileIO): file-like object. file_offset (int): offset of the data relative from the start of the file-like object. data_size (int): data size of the structure. data_type_map (dtfabric.DataTypeMap): data type map of the structure. description (str): description of the structure. Returns: object: structure values object. Raises: FileFormatError: if the structure cannot be read. ValueError: if file-like object or date type map are invalid. """ data = self._ReadData(file_object, file_offset, data_size, description) return self._ReadStructureFromByteStream( data, file_offset, data_type_map, description)
python
def _ReadStructure( self, file_object, file_offset, data_size, data_type_map, description): """Reads a structure. Args: file_object (FileIO): file-like object. file_offset (int): offset of the data relative from the start of the file-like object. data_size (int): data size of the structure. data_type_map (dtfabric.DataTypeMap): data type map of the structure. description (str): description of the structure. Returns: object: structure values object. Raises: FileFormatError: if the structure cannot be read. ValueError: if file-like object or date type map are invalid. """ data = self._ReadData(file_object, file_offset, data_size, description) return self._ReadStructureFromByteStream( data, file_offset, data_type_map, description)
['def', '_ReadStructure', '(', 'self', ',', 'file_object', ',', 'file_offset', ',', 'data_size', ',', 'data_type_map', ',', 'description', ')', ':', 'data', '=', 'self', '.', '_ReadData', '(', 'file_object', ',', 'file_offset', ',', 'data_size', ',', 'description', ')', 'return', 'self', '.', '_ReadStructureFromByteStream', '(', 'data', ',', 'file_offset', ',', 'data_type_map', ',', 'description', ')']
Reads a structure. Args: file_object (FileIO): file-like object. file_offset (int): offset of the data relative from the start of the file-like object. data_size (int): data size of the structure. data_type_map (dtfabric.DataTypeMap): data type map of the structure. description (str): description of the structure. Returns: object: structure values object. Raises: FileFormatError: if the structure cannot be read. ValueError: if file-like object or date type map are invalid.
['Reads', 'a', 'structure', '.']
train
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/lib/data_format.py#L93-L115
5,492
DataBiosphere/toil
src/toil/common.py
Toil.createBatchSystem
def createBatchSystem(config): """ Creates an instance of the batch system specified in the given config. :param toil.common.Config config: the current configuration :rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem :return: an instance of a concrete subclass of AbstractBatchSystem """ kwargs = dict(config=config, maxCores=config.maxCores, maxMemory=config.maxMemory, maxDisk=config.maxDisk) from toil.batchSystems.registry import batchSystemFactoryFor try: factory = batchSystemFactoryFor(config.batchSystem) batchSystemClass = factory() except: raise RuntimeError('Unrecognised batch system: %s' % config.batchSystem) if not config.disableCaching and not batchSystemClass.supportsWorkerCleanup(): raise RuntimeError('%s currently does not support shared caching. Set the ' '--disableCaching flag if you want to ' 'use this batch system.' % config.batchSystem) logger.debug('Using the %s' % re.sub("([a-z])([A-Z])", "\g<1> \g<2>", batchSystemClass.__name__).lower()) return batchSystemClass(**kwargs)
python
def createBatchSystem(config): """ Creates an instance of the batch system specified in the given config. :param toil.common.Config config: the current configuration :rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem :return: an instance of a concrete subclass of AbstractBatchSystem """ kwargs = dict(config=config, maxCores=config.maxCores, maxMemory=config.maxMemory, maxDisk=config.maxDisk) from toil.batchSystems.registry import batchSystemFactoryFor try: factory = batchSystemFactoryFor(config.batchSystem) batchSystemClass = factory() except: raise RuntimeError('Unrecognised batch system: %s' % config.batchSystem) if not config.disableCaching and not batchSystemClass.supportsWorkerCleanup(): raise RuntimeError('%s currently does not support shared caching. Set the ' '--disableCaching flag if you want to ' 'use this batch system.' % config.batchSystem) logger.debug('Using the %s' % re.sub("([a-z])([A-Z])", "\g<1> \g<2>", batchSystemClass.__name__).lower()) return batchSystemClass(**kwargs)
['def', 'createBatchSystem', '(', 'config', ')', ':', 'kwargs', '=', 'dict', '(', 'config', '=', 'config', ',', 'maxCores', '=', 'config', '.', 'maxCores', ',', 'maxMemory', '=', 'config', '.', 'maxMemory', ',', 'maxDisk', '=', 'config', '.', 'maxDisk', ')', 'from', 'toil', '.', 'batchSystems', '.', 'registry', 'import', 'batchSystemFactoryFor', 'try', ':', 'factory', '=', 'batchSystemFactoryFor', '(', 'config', '.', 'batchSystem', ')', 'batchSystemClass', '=', 'factory', '(', ')', 'except', ':', 'raise', 'RuntimeError', '(', "'Unrecognised batch system: %s'", '%', 'config', '.', 'batchSystem', ')', 'if', 'not', 'config', '.', 'disableCaching', 'and', 'not', 'batchSystemClass', '.', 'supportsWorkerCleanup', '(', ')', ':', 'raise', 'RuntimeError', '(', "'%s currently does not support shared caching. Set the '", "'--disableCaching flag if you want to '", "'use this batch system.'", '%', 'config', '.', 'batchSystem', ')', 'logger', '.', 'debug', '(', "'Using the %s'", '%', 're', '.', 'sub', '(', '"([a-z])([A-Z])"', ',', '"\\g<1> \\g<2>"', ',', 'batchSystemClass', '.', '__name__', ')', '.', 'lower', '(', ')', ')', 'return', 'batchSystemClass', '(', '*', '*', 'kwargs', ')']
Creates an instance of the batch system specified in the given config. :param toil.common.Config config: the current configuration :rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem :return: an instance of a concrete subclass of AbstractBatchSystem
['Creates', 'an', 'instance', 'of', 'the', 'batch', 'system', 'specified', 'in', 'the', 'given', 'config', '.']
train
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L870-L900
5,493
django-fluent/django-fluent-contents
fluent_contents/extensions/pluginpool.py
PluginPool.get_plugin_by_model
def get_plugin_by_model(self, model_class): """ Return the corresponding plugin for a given model. You can also use the :attr:`ContentItem.plugin <fluent_contents.models.ContentItem.plugin>` property directly. This is the low-level function that supports that feature. """ self._import_plugins() # could happen during rendering that no plugin scan happened yet. assert issubclass(model_class, ContentItem) # avoid confusion between model instance and class here! try: name = self._name_for_model[model_class] except KeyError: raise PluginNotFound("No plugin found for model '{0}'.".format(model_class.__name__)) return self.plugins[name]
python
def get_plugin_by_model(self, model_class): """ Return the corresponding plugin for a given model. You can also use the :attr:`ContentItem.plugin <fluent_contents.models.ContentItem.plugin>` property directly. This is the low-level function that supports that feature. """ self._import_plugins() # could happen during rendering that no plugin scan happened yet. assert issubclass(model_class, ContentItem) # avoid confusion between model instance and class here! try: name = self._name_for_model[model_class] except KeyError: raise PluginNotFound("No plugin found for model '{0}'.".format(model_class.__name__)) return self.plugins[name]
['def', 'get_plugin_by_model', '(', 'self', ',', 'model_class', ')', ':', 'self', '.', '_import_plugins', '(', ')', '# could happen during rendering that no plugin scan happened yet.', 'assert', 'issubclass', '(', 'model_class', ',', 'ContentItem', ')', '# avoid confusion between model instance and class here!', 'try', ':', 'name', '=', 'self', '.', '_name_for_model', '[', 'model_class', ']', 'except', 'KeyError', ':', 'raise', 'PluginNotFound', '(', '"No plugin found for model \'{0}\'."', '.', 'format', '(', 'model_class', '.', '__name__', ')', ')', 'return', 'self', '.', 'plugins', '[', 'name', ']']
Return the corresponding plugin for a given model. You can also use the :attr:`ContentItem.plugin <fluent_contents.models.ContentItem.plugin>` property directly. This is the low-level function that supports that feature.
['Return', 'the', 'corresponding', 'plugin', 'for', 'a', 'given', 'model', '.']
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/extensions/pluginpool.py#L154-L168
5,494
hangyan/shaw
shaw/web/web.py
error_response
def error_response(message, status=400, code=None): """"Return error message(in dict).""" from django.http import JsonResponse data = {'message': message} if code: data['code'] = code LOG.error("Error response, status code is : {} | {}".format(status, data)) return JsonResponse(data=data, status=status)
python
def error_response(message, status=400, code=None): """"Return error message(in dict).""" from django.http import JsonResponse data = {'message': message} if code: data['code'] = code LOG.error("Error response, status code is : {} | {}".format(status, data)) return JsonResponse(data=data, status=status)
['def', 'error_response', '(', 'message', ',', 'status', '=', '400', ',', 'code', '=', 'None', ')', ':', 'from', 'django', '.', 'http', 'import', 'JsonResponse', 'data', '=', '{', "'message'", ':', 'message', '}', 'if', 'code', ':', 'data', '[', "'code'", ']', '=', 'code', 'LOG', '.', 'error', '(', '"Error response, status code is : {} | {}"', '.', 'format', '(', 'status', ',', 'data', ')', ')', 'return', 'JsonResponse', '(', 'data', '=', 'data', ',', 'status', '=', 'status', ')']
Return error message(in dict).
['Return', 'error', 'message', '(', 'in', 'dict', ')', '.']
train
https://github.com/hangyan/shaw/blob/63d01d35e225ba4edb9c61edaf351e1bc0e8fd15/shaw/web/web.py#L23-L30
5,495
lucuma/Clay
clay/manage.py
run
def run(host=DEFAULT_HOST, port=DEFAULT_PORT, path='.'): """Run the development server """ path = abspath(path) c = Clay(path) c.run(host=host, port=port)
python
def run(host=DEFAULT_HOST, port=DEFAULT_PORT, path='.'): """Run the development server """ path = abspath(path) c = Clay(path) c.run(host=host, port=port)
['def', 'run', '(', 'host', '=', 'DEFAULT_HOST', ',', 'port', '=', 'DEFAULT_PORT', ',', 'path', '=', "'.'", ')', ':', 'path', '=', 'abspath', '(', 'path', ')', 'c', '=', 'Clay', '(', 'path', ')', 'c', '.', 'run', '(', 'host', '=', 'host', ',', 'port', '=', 'port', ')']
Run the development server
['Run', 'the', 'development', 'server']
train
https://github.com/lucuma/Clay/blob/620d37086b712bdc4d13930ced43a5b7c9a5f46d/clay/manage.py#L44-L49
5,496
nugget/python-anthemav
anthemav/connection.py
Connection.close
def close(self): """Close the AVR device connection and don't try to reconnect.""" self.log.warning('Closing connection to AVR') self._closing = True if self.protocol.transport: self.protocol.transport.close()
python
def close(self): """Close the AVR device connection and don't try to reconnect.""" self.log.warning('Closing connection to AVR') self._closing = True if self.protocol.transport: self.protocol.transport.close()
['def', 'close', '(', 'self', ')', ':', 'self', '.', 'log', '.', 'warning', '(', "'Closing connection to AVR'", ')', 'self', '.', '_closing', '=', 'True', 'if', 'self', '.', 'protocol', '.', 'transport', ':', 'self', '.', 'protocol', '.', 'transport', '.', 'close', '(', ')']
Close the AVR device connection and don't try to reconnect.
['Close', 'the', 'AVR', 'device', 'connection', 'and', 'don', 't', 'try', 'to', 'reconnect', '.']
train
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/connection.py#L117-L122
5,497
ic-labs/django-icekit
icekit/publishing/admin.py
_PublishingHelpersMixin.has_preview_permission
def has_preview_permission(self, request, obj=None): """ Return `True` if the user has permissions to preview a publishable item. NOTE: this method does not actually change who can or cannot preview any particular item, just whether to show the preview link. The real dcision is made by a combination of: - `PublishingMiddleware` which chooses who can view draft content - the view code for a particular item, which may or may not render draft content for a specific user. :param request: Django request object. :param obj: The object the user would preview, if permitted. :return: Boolean. """ # User who can publish always has preview permission. if self.has_publish_permission(request, obj=obj): return True user_obj = request.user if not user_obj.is_active: return False if user_obj.is_staff: return True return False
python
def has_preview_permission(self, request, obj=None): """ Return `True` if the user has permissions to preview a publishable item. NOTE: this method does not actually change who can or cannot preview any particular item, just whether to show the preview link. The real dcision is made by a combination of: - `PublishingMiddleware` which chooses who can view draft content - the view code for a particular item, which may or may not render draft content for a specific user. :param request: Django request object. :param obj: The object the user would preview, if permitted. :return: Boolean. """ # User who can publish always has preview permission. if self.has_publish_permission(request, obj=obj): return True user_obj = request.user if not user_obj.is_active: return False if user_obj.is_staff: return True return False
['def', 'has_preview_permission', '(', 'self', ',', 'request', ',', 'obj', '=', 'None', ')', ':', '# User who can publish always has preview permission.', 'if', 'self', '.', 'has_publish_permission', '(', 'request', ',', 'obj', '=', 'obj', ')', ':', 'return', 'True', 'user_obj', '=', 'request', '.', 'user', 'if', 'not', 'user_obj', '.', 'is_active', ':', 'return', 'False', 'if', 'user_obj', '.', 'is_staff', ':', 'return', 'True', 'return', 'False']
Return `True` if the user has permissions to preview a publishable item. NOTE: this method does not actually change who can or cannot preview any particular item, just whether to show the preview link. The real dcision is made by a combination of: - `PublishingMiddleware` which chooses who can view draft content - the view code for a particular item, which may or may not render draft content for a specific user. :param request: Django request object. :param obj: The object the user would preview, if permitted. :return: Boolean.
['Return', 'True', 'if', 'the', 'user', 'has', 'permissions', 'to', 'preview', 'a', 'publishable', 'item', '.']
train
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/publishing/admin.py#L312-L337
5,498
pyviz/holoviews
holoviews/plotting/util.py
process_cmap
def process_cmap(cmap, ncolors=None, provider=None, categorical=False): """ Convert valid colormap specifications to a list of colors. """ providers_checked="matplotlib, bokeh, or colorcet" if provider is None else provider if isinstance(cmap, Cycle): palette = [rgb2hex(c) if isinstance(c, tuple) else c for c in cmap.values] elif isinstance(cmap, list): palette = cmap elif isinstance(cmap, basestring): mpl_cmaps = _list_cmaps('matplotlib') bk_cmaps = _list_cmaps('bokeh') cet_cmaps = _list_cmaps('colorcet') if provider=='matplotlib' or (provider is None and (cmap in mpl_cmaps or cmap.lower() in mpl_cmaps)): palette = mplcmap_to_palette(cmap, ncolors, categorical) elif provider=='bokeh' or (provider is None and (cmap in bk_cmaps or cmap.capitalize() in bk_cmaps)): palette = bokeh_palette_to_palette(cmap, ncolors, categorical) elif provider=='colorcet' or (provider is None and cmap in cet_cmaps): from colorcet import palette if cmap.endswith('_r'): palette = list(reversed(palette[cmap[:-2]])) else: palette = palette[cmap] else: raise ValueError("Supplied cmap %s not found among %s colormaps." % (cmap,providers_checked)) else: try: # Try processing as matplotlib colormap palette = mplcmap_to_palette(cmap, ncolors) except: palette = None if not isinstance(palette, list): raise TypeError("cmap argument %s expects a list, Cycle or valid %s colormap or palette." % (cmap,providers_checked)) if ncolors and len(palette) != ncolors: return [palette[i%len(palette)] for i in range(ncolors)] return palette
python
def process_cmap(cmap, ncolors=None, provider=None, categorical=False): """ Convert valid colormap specifications to a list of colors. """ providers_checked="matplotlib, bokeh, or colorcet" if provider is None else provider if isinstance(cmap, Cycle): palette = [rgb2hex(c) if isinstance(c, tuple) else c for c in cmap.values] elif isinstance(cmap, list): palette = cmap elif isinstance(cmap, basestring): mpl_cmaps = _list_cmaps('matplotlib') bk_cmaps = _list_cmaps('bokeh') cet_cmaps = _list_cmaps('colorcet') if provider=='matplotlib' or (provider is None and (cmap in mpl_cmaps or cmap.lower() in mpl_cmaps)): palette = mplcmap_to_palette(cmap, ncolors, categorical) elif provider=='bokeh' or (provider is None and (cmap in bk_cmaps or cmap.capitalize() in bk_cmaps)): palette = bokeh_palette_to_palette(cmap, ncolors, categorical) elif provider=='colorcet' or (provider is None and cmap in cet_cmaps): from colorcet import palette if cmap.endswith('_r'): palette = list(reversed(palette[cmap[:-2]])) else: palette = palette[cmap] else: raise ValueError("Supplied cmap %s not found among %s colormaps." % (cmap,providers_checked)) else: try: # Try processing as matplotlib colormap palette = mplcmap_to_palette(cmap, ncolors) except: palette = None if not isinstance(palette, list): raise TypeError("cmap argument %s expects a list, Cycle or valid %s colormap or palette." % (cmap,providers_checked)) if ncolors and len(palette) != ncolors: return [palette[i%len(palette)] for i in range(ncolors)] return palette
['def', 'process_cmap', '(', 'cmap', ',', 'ncolors', '=', 'None', ',', 'provider', '=', 'None', ',', 'categorical', '=', 'False', ')', ':', 'providers_checked', '=', '"matplotlib, bokeh, or colorcet"', 'if', 'provider', 'is', 'None', 'else', 'provider', 'if', 'isinstance', '(', 'cmap', ',', 'Cycle', ')', ':', 'palette', '=', '[', 'rgb2hex', '(', 'c', ')', 'if', 'isinstance', '(', 'c', ',', 'tuple', ')', 'else', 'c', 'for', 'c', 'in', 'cmap', '.', 'values', ']', 'elif', 'isinstance', '(', 'cmap', ',', 'list', ')', ':', 'palette', '=', 'cmap', 'elif', 'isinstance', '(', 'cmap', ',', 'basestring', ')', ':', 'mpl_cmaps', '=', '_list_cmaps', '(', "'matplotlib'", ')', 'bk_cmaps', '=', '_list_cmaps', '(', "'bokeh'", ')', 'cet_cmaps', '=', '_list_cmaps', '(', "'colorcet'", ')', 'if', 'provider', '==', "'matplotlib'", 'or', '(', 'provider', 'is', 'None', 'and', '(', 'cmap', 'in', 'mpl_cmaps', 'or', 'cmap', '.', 'lower', '(', ')', 'in', 'mpl_cmaps', ')', ')', ':', 'palette', '=', 'mplcmap_to_palette', '(', 'cmap', ',', 'ncolors', ',', 'categorical', ')', 'elif', 'provider', '==', "'bokeh'", 'or', '(', 'provider', 'is', 'None', 'and', '(', 'cmap', 'in', 'bk_cmaps', 'or', 'cmap', '.', 'capitalize', '(', ')', 'in', 'bk_cmaps', ')', ')', ':', 'palette', '=', 'bokeh_palette_to_palette', '(', 'cmap', ',', 'ncolors', ',', 'categorical', ')', 'elif', 'provider', '==', "'colorcet'", 'or', '(', 'provider', 'is', 'None', 'and', 'cmap', 'in', 'cet_cmaps', ')', ':', 'from', 'colorcet', 'import', 'palette', 'if', 'cmap', '.', 'endswith', '(', "'_r'", ')', ':', 'palette', '=', 'list', '(', 'reversed', '(', 'palette', '[', 'cmap', '[', ':', '-', '2', ']', ']', ')', ')', 'else', ':', 'palette', '=', 'palette', '[', 'cmap', ']', 'else', ':', 'raise', 'ValueError', '(', '"Supplied cmap %s not found among %s colormaps."', '%', '(', 'cmap', ',', 'providers_checked', ')', ')', 'else', ':', 'try', ':', '# Try processing as matplotlib colormap', 'palette', '=', 'mplcmap_to_palette', '(', 'cmap', ',', 'ncolors', ')', 'except', ':', 'palette', '=', 'None', 'if', 'not', 'isinstance', '(', 'palette', ',', 'list', ')', ':', 'raise', 'TypeError', '(', '"cmap argument %s expects a list, Cycle or valid %s colormap or palette."', '%', '(', 'cmap', ',', 'providers_checked', ')', ')', 'if', 'ncolors', 'and', 'len', '(', 'palette', ')', '!=', 'ncolors', ':', 'return', '[', 'palette', '[', 'i', '%', 'len', '(', 'palette', ')', ']', 'for', 'i', 'in', 'range', '(', 'ncolors', ')', ']', 'return', 'palette']
Convert valid colormap specifications to a list of colors.
['Convert', 'valid', 'colormap', 'specifications', 'to', 'a', 'list', 'of', 'colors', '.']
train
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/util.py#L868-L906
5,499
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/input_readers.py
NamespaceInputReader.from_json
def from_json(cls, json): """Create new DatastoreInputReader from the json, encoded by to_json. Args: json: json map representation of DatastoreInputReader. Returns: an instance of DatastoreInputReader with all data deserialized from json. """ return cls( namespace_range.NamespaceRange.from_json_object( json[cls.NAMESPACE_RANGE_PARAM]), json[cls.BATCH_SIZE_PARAM])
python
def from_json(cls, json): """Create new DatastoreInputReader from the json, encoded by to_json. Args: json: json map representation of DatastoreInputReader. Returns: an instance of DatastoreInputReader with all data deserialized from json. """ return cls( namespace_range.NamespaceRange.from_json_object( json[cls.NAMESPACE_RANGE_PARAM]), json[cls.BATCH_SIZE_PARAM])
['def', 'from_json', '(', 'cls', ',', 'json', ')', ':', 'return', 'cls', '(', 'namespace_range', '.', 'NamespaceRange', '.', 'from_json_object', '(', 'json', '[', 'cls', '.', 'NAMESPACE_RANGE_PARAM', ']', ')', ',', 'json', '[', 'cls', '.', 'BATCH_SIZE_PARAM', ']', ')']
Create new DatastoreInputReader from the json, encoded by to_json. Args: json: json map representation of DatastoreInputReader. Returns: an instance of DatastoreInputReader with all data deserialized from json.
['Create', 'new', 'DatastoreInputReader', 'from', 'the', 'json', 'encoded', 'by', 'to_json', '.']
train
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L1954-L1966