Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
6,900
twilio/twilio-python
twilio/rest/api/v2010/account/incoming_phone_number/assigned_add_on/assigned_add_on_extension.py
AssignedAddOnExtensionPage.get_instance
def get_instance(self, payload): """ Build an instance of AssignedAddOnExtensionInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance :rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance """ return AssignedAddOnExtensionInstance( self._version, payload, account_sid=self._solution['account_sid'], resource_sid=self._solution['resource_sid'], assigned_add_on_sid=self._solution['assigned_add_on_sid'], )
python
def get_instance(self, payload): """ Build an instance of AssignedAddOnExtensionInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance :rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance """ return AssignedAddOnExtensionInstance( self._version, payload, account_sid=self._solution['account_sid'], resource_sid=self._solution['resource_sid'], assigned_add_on_sid=self._solution['assigned_add_on_sid'], )
['def', 'get_instance', '(', 'self', ',', 'payload', ')', ':', 'return', 'AssignedAddOnExtensionInstance', '(', 'self', '.', '_version', ',', 'payload', ',', 'account_sid', '=', 'self', '.', '_solution', '[', "'account_sid'", ']', ',', 'resource_sid', '=', 'self', '.', '_solution', '[', "'resource_sid'", ']', ',', 'assigned_add_on_sid', '=', 'self', '.', '_solution', '[', "'assigned_add_on_sid'", ']', ',', ')']
Build an instance of AssignedAddOnExtensionInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance :rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance
['Build', 'an', 'instance', 'of', 'AssignedAddOnExtensionInstance']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/incoming_phone_number/assigned_add_on/assigned_add_on_extension.py#L189-L204
6,901
nerdvegas/rez
src/rez/resolved_context.py
ResolvedContext.requested_packages
def requested_packages(self, include_implicit=False): """Get packages in the request. Args: include_implicit (bool): If True, implicit packages are appended to the result. Returns: List of `PackageRequest` objects. """ if include_implicit: return self._package_requests + self.implicit_packages else: return self._package_requests
python
def requested_packages(self, include_implicit=False): """Get packages in the request. Args: include_implicit (bool): If True, implicit packages are appended to the result. Returns: List of `PackageRequest` objects. """ if include_implicit: return self._package_requests + self.implicit_packages else: return self._package_requests
['def', 'requested_packages', '(', 'self', ',', 'include_implicit', '=', 'False', ')', ':', 'if', 'include_implicit', ':', 'return', 'self', '.', '_package_requests', '+', 'self', '.', 'implicit_packages', 'else', ':', 'return', 'self', '.', '_package_requests']
Get packages in the request. Args: include_implicit (bool): If True, implicit packages are appended to the result. Returns: List of `PackageRequest` objects.
['Get', 'packages', 'in', 'the', 'request', '.']
train
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/resolved_context.py#L322-L335
6,902
postlund/pyatv
pyatv/__main__.py
DeviceCommands.auth
async def auth(self): """Perform AirPlay device authentication.""" credentials = await self.atv.airplay.generate_credentials() await self.atv.airplay.load_credentials(credentials) try: await self.atv.airplay.start_authentication() pin = await _read_input(self.loop, 'Enter PIN on screen: ') await self.atv.airplay.finish_authentication(pin) print('You may now use these credentials:') print(credentials) return 0 except exceptions.DeviceAuthenticationError: logging.exception('Failed to authenticate - invalid PIN?') return 1
python
async def auth(self): """Perform AirPlay device authentication.""" credentials = await self.atv.airplay.generate_credentials() await self.atv.airplay.load_credentials(credentials) try: await self.atv.airplay.start_authentication() pin = await _read_input(self.loop, 'Enter PIN on screen: ') await self.atv.airplay.finish_authentication(pin) print('You may now use these credentials:') print(credentials) return 0 except exceptions.DeviceAuthenticationError: logging.exception('Failed to authenticate - invalid PIN?') return 1
['async', 'def', 'auth', '(', 'self', ')', ':', 'credentials', '=', 'await', 'self', '.', 'atv', '.', 'airplay', '.', 'generate_credentials', '(', ')', 'await', 'self', '.', 'atv', '.', 'airplay', '.', 'load_credentials', '(', 'credentials', ')', 'try', ':', 'await', 'self', '.', 'atv', '.', 'airplay', '.', 'start_authentication', '(', ')', 'pin', '=', 'await', '_read_input', '(', 'self', '.', 'loop', ',', "'Enter PIN on screen: '", ')', 'await', 'self', '.', 'atv', '.', 'airplay', '.', 'finish_authentication', '(', 'pin', ')', 'print', '(', "'You may now use these credentials:'", ')', 'print', '(', 'credentials', ')', 'return', '0', 'except', 'exceptions', '.', 'DeviceAuthenticationError', ':', 'logging', '.', 'exception', '(', "'Failed to authenticate - invalid PIN?'", ')', 'return', '1']
Perform AirPlay device authentication.
['Perform', 'AirPlay', 'device', 'authentication', '.']
train
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__main__.py#L138-L154
6,903
kumar303/mohawk
mohawk/sender.py
Sender.accept_response
def accept_response(self, response_header, content=EmptyValue, content_type=EmptyValue, accept_untrusted_content=False, localtime_offset_in_seconds=0, timestamp_skew_in_seconds=default_ts_skew_in_seconds, **auth_kw): """ Accept a response to this request. :param response_header: A `Hawk`_ ``Server-Authorization`` header such as one created by :class:`mohawk.Receiver`. :type response_header: str :param content=EmptyValue: Byte string of the response body received. :type content=EmptyValue: str :param content_type=EmptyValue: Content-Type header value of the response received. :type content_type=EmptyValue: str :param accept_untrusted_content=False: When True, allow responses that do not hash their content. Read :ref:`skipping-content-checks` to learn more. :type accept_untrusted_content=False: bool :param localtime_offset_in_seconds=0: Seconds to add to local time in case it's out of sync. :type localtime_offset_in_seconds=0: float :param timestamp_skew_in_seconds=60: Max seconds until a message expires. Upon expiry, :class:`mohawk.exc.TokenExpired` is raised. :type timestamp_skew_in_seconds=60: float .. _`Hawk`: https://github.com/hueniverse/hawk """ log.debug('accepting response {header}' .format(header=response_header)) parsed_header = parse_authorization_header(response_header) resource = Resource(ext=parsed_header.get('ext', None), content=content, content_type=content_type, # The following response attributes are # in reference to the original request, # not to the reponse header: timestamp=self.req_resource.timestamp, nonce=self.req_resource.nonce, url=self.req_resource.url, method=self.req_resource.method, app=self.req_resource.app, dlg=self.req_resource.dlg, credentials=self.credentials, seen_nonce=self.seen_nonce) self._authorize( 'response', parsed_header, resource, # Per Node lib, a responder macs the *sender's* timestamp. # It does not create its own timestamp. # I suppose a slow response could time out here. Maybe only check # mac failures, not timeouts? their_timestamp=resource.timestamp, timestamp_skew_in_seconds=timestamp_skew_in_seconds, localtime_offset_in_seconds=localtime_offset_in_seconds, accept_untrusted_content=accept_untrusted_content, **auth_kw)
python
def accept_response(self, response_header, content=EmptyValue, content_type=EmptyValue, accept_untrusted_content=False, localtime_offset_in_seconds=0, timestamp_skew_in_seconds=default_ts_skew_in_seconds, **auth_kw): """ Accept a response to this request. :param response_header: A `Hawk`_ ``Server-Authorization`` header such as one created by :class:`mohawk.Receiver`. :type response_header: str :param content=EmptyValue: Byte string of the response body received. :type content=EmptyValue: str :param content_type=EmptyValue: Content-Type header value of the response received. :type content_type=EmptyValue: str :param accept_untrusted_content=False: When True, allow responses that do not hash their content. Read :ref:`skipping-content-checks` to learn more. :type accept_untrusted_content=False: bool :param localtime_offset_in_seconds=0: Seconds to add to local time in case it's out of sync. :type localtime_offset_in_seconds=0: float :param timestamp_skew_in_seconds=60: Max seconds until a message expires. Upon expiry, :class:`mohawk.exc.TokenExpired` is raised. :type timestamp_skew_in_seconds=60: float .. _`Hawk`: https://github.com/hueniverse/hawk """ log.debug('accepting response {header}' .format(header=response_header)) parsed_header = parse_authorization_header(response_header) resource = Resource(ext=parsed_header.get('ext', None), content=content, content_type=content_type, # The following response attributes are # in reference to the original request, # not to the reponse header: timestamp=self.req_resource.timestamp, nonce=self.req_resource.nonce, url=self.req_resource.url, method=self.req_resource.method, app=self.req_resource.app, dlg=self.req_resource.dlg, credentials=self.credentials, seen_nonce=self.seen_nonce) self._authorize( 'response', parsed_header, resource, # Per Node lib, a responder macs the *sender's* timestamp. # It does not create its own timestamp. # I suppose a slow response could time out here. Maybe only check # mac failures, not timeouts? their_timestamp=resource.timestamp, timestamp_skew_in_seconds=timestamp_skew_in_seconds, localtime_offset_in_seconds=localtime_offset_in_seconds, accept_untrusted_content=accept_untrusted_content, **auth_kw)
['def', 'accept_response', '(', 'self', ',', 'response_header', ',', 'content', '=', 'EmptyValue', ',', 'content_type', '=', 'EmptyValue', ',', 'accept_untrusted_content', '=', 'False', ',', 'localtime_offset_in_seconds', '=', '0', ',', 'timestamp_skew_in_seconds', '=', 'default_ts_skew_in_seconds', ',', '*', '*', 'auth_kw', ')', ':', 'log', '.', 'debug', '(', "'accepting response {header}'", '.', 'format', '(', 'header', '=', 'response_header', ')', ')', 'parsed_header', '=', 'parse_authorization_header', '(', 'response_header', ')', 'resource', '=', 'Resource', '(', 'ext', '=', 'parsed_header', '.', 'get', '(', "'ext'", ',', 'None', ')', ',', 'content', '=', 'content', ',', 'content_type', '=', 'content_type', ',', '# The following response attributes are', '# in reference to the original request,', '# not to the reponse header:', 'timestamp', '=', 'self', '.', 'req_resource', '.', 'timestamp', ',', 'nonce', '=', 'self', '.', 'req_resource', '.', 'nonce', ',', 'url', '=', 'self', '.', 'req_resource', '.', 'url', ',', 'method', '=', 'self', '.', 'req_resource', '.', 'method', ',', 'app', '=', 'self', '.', 'req_resource', '.', 'app', ',', 'dlg', '=', 'self', '.', 'req_resource', '.', 'dlg', ',', 'credentials', '=', 'self', '.', 'credentials', ',', 'seen_nonce', '=', 'self', '.', 'seen_nonce', ')', 'self', '.', '_authorize', '(', "'response'", ',', 'parsed_header', ',', 'resource', ',', "# Per Node lib, a responder macs the *sender's* timestamp.", '# It does not create its own timestamp.', '# I suppose a slow response could time out here. Maybe only check', '# mac failures, not timeouts?', 'their_timestamp', '=', 'resource', '.', 'timestamp', ',', 'timestamp_skew_in_seconds', '=', 'timestamp_skew_in_seconds', ',', 'localtime_offset_in_seconds', '=', 'localtime_offset_in_seconds', ',', 'accept_untrusted_content', '=', 'accept_untrusted_content', ',', '*', '*', 'auth_kw', ')']
Accept a response to this request. :param response_header: A `Hawk`_ ``Server-Authorization`` header such as one created by :class:`mohawk.Receiver`. :type response_header: str :param content=EmptyValue: Byte string of the response body received. :type content=EmptyValue: str :param content_type=EmptyValue: Content-Type header value of the response received. :type content_type=EmptyValue: str :param accept_untrusted_content=False: When True, allow responses that do not hash their content. Read :ref:`skipping-content-checks` to learn more. :type accept_untrusted_content=False: bool :param localtime_offset_in_seconds=0: Seconds to add to local time in case it's out of sync. :type localtime_offset_in_seconds=0: float :param timestamp_skew_in_seconds=60: Max seconds until a message expires. Upon expiry, :class:`mohawk.exc.TokenExpired` is raised. :type timestamp_skew_in_seconds=60: float .. _`Hawk`: https://github.com/hueniverse/hawk
['Accept', 'a', 'response', 'to', 'this', 'request', '.']
train
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/sender.py#L106-L175
6,904
redcanari/canari3
src/canari/entrypoints.py
generate_entities
def generate_entities(ctx, output_path, mtz_file, exclude_namespace, namespace, maltego_entities, append, entity): """Converts Maltego entity definition files to Canari python classes. Excludes Maltego built-in entities by default.""" from canari.commands.generate_entities import generate_entities generate_entities( ctx.project, output_path, mtz_file, exclude_namespace, namespace, maltego_entities, append, entity)
python
def generate_entities(ctx, output_path, mtz_file, exclude_namespace, namespace, maltego_entities, append, entity): """Converts Maltego entity definition files to Canari python classes. Excludes Maltego built-in entities by default.""" from canari.commands.generate_entities import generate_entities generate_entities( ctx.project, output_path, mtz_file, exclude_namespace, namespace, maltego_entities, append, entity)
['def', 'generate_entities', '(', 'ctx', ',', 'output_path', ',', 'mtz_file', ',', 'exclude_namespace', ',', 'namespace', ',', 'maltego_entities', ',', 'append', ',', 'entity', ')', ':', 'from', 'canari', '.', 'commands', '.', 'generate_entities', 'import', 'generate_entities', 'generate_entities', '(', 'ctx', '.', 'project', ',', 'output_path', ',', 'mtz_file', ',', 'exclude_namespace', ',', 'namespace', ',', 'maltego_entities', ',', 'append', ',', 'entity', ')']
Converts Maltego entity definition files to Canari python classes. Excludes Maltego built-in entities by default.
['Converts', 'Maltego', 'entity', 'definition', 'files', 'to', 'Canari', 'python', 'classes', '.', 'Excludes', 'Maltego', 'built', '-', 'in', 'entities', 'by', 'default', '.']
train
https://github.com/redcanari/canari3/blob/322d2bae4b49ac728229f418b786b51fcc227352/src/canari/entrypoints.py#L146-L151
6,905
brentp/cruzdb
cruzdb/__init__.py
Genome.load_file
def load_file(self, fname, table=None, sep="\t", bins=False, indexes=None): """ use some of the machinery in pandas to load a file into a table Parameters ---------- fname : str filename or filehandle to load table : str table to load the file to sep : str CSV separator bins : bool add a "bin" column for efficient spatial queries. indexes : list[str] list of columns to index """ convs = {"#chr": "chrom", "start": "txStart", "end": "txEnd", "chr": "chrom", "pos": "start", "POS": "start", "chromStart": "txStart", "chromEnd": "txEnd"} if table is None: import os.path as op table = op.basename(op.splitext(fname)[0]).replace(".", "_") print("writing to:", table, file=sys.stderr) from pandas.io import sql import pandas as pa from toolshed import nopen needs_name = False for i, chunk in enumerate(pa.read_csv(nopen(fname), iterator=True, chunksize=100000, sep=sep, encoding="latin-1")): chunk.columns = [convs.get(k, k) for k in chunk.columns] if not "name" in chunk.columns: needs_name = True chunk['name'] = chunk.get('chrom', chunk[chunk.columns[0]]) if bins: chunk['bin'] = 1 if i == 0 and not table in self.tables: flavor = self.url.split(":")[0] schema = sql.get_schema(chunk, table, flavor) print(schema) self.engine.execute(schema) elif i == 0: print >>sys.stderr,\ """adding to existing table, you may want to drop first""" tbl = getattr(self, table)._table cols = chunk.columns data = list(dict(zip(cols, x)) for x in chunk.values) if needs_name: for d in data: d['name'] = "%s:%s" % (d.get("chrom"), d.get("txStart", d.get("chromStart"))) if bins: for d in data: d['bin'] = max(Genome.bins(int(d["txStart"]), int(d["txEnd"]))) self.engine.execute(tbl.insert(), data) self.session.commit() if i > 0: print >>sys.stderr, "writing row:", i * 100000 if "txStart" in chunk.columns: if "chrom" in chunk.columns: ssql = """CREATE INDEX "%s.chrom_txStart" ON "%s" (chrom, txStart)""" % (table, table) else: ssql = """CREATE INDEX "%s.txStart" ON "%s" (txStart)""" % (table, table) self.engine.execute(ssql) for index in (indexes or []): ssql = """CREATE INDEX "%s.%s" ON "%s" (%s)""" % (table, index, table, index) self.engine.execute(ssql) if bins: ssql = """CREATE INDEX "%s.chrom_bin" ON "%s" (chrom, bin)""" % (table, table) self.engine.execute(ssql) self.session.commit()
python
def load_file(self, fname, table=None, sep="\t", bins=False, indexes=None): """ use some of the machinery in pandas to load a file into a table Parameters ---------- fname : str filename or filehandle to load table : str table to load the file to sep : str CSV separator bins : bool add a "bin" column for efficient spatial queries. indexes : list[str] list of columns to index """ convs = {"#chr": "chrom", "start": "txStart", "end": "txEnd", "chr": "chrom", "pos": "start", "POS": "start", "chromStart": "txStart", "chromEnd": "txEnd"} if table is None: import os.path as op table = op.basename(op.splitext(fname)[0]).replace(".", "_") print("writing to:", table, file=sys.stderr) from pandas.io import sql import pandas as pa from toolshed import nopen needs_name = False for i, chunk in enumerate(pa.read_csv(nopen(fname), iterator=True, chunksize=100000, sep=sep, encoding="latin-1")): chunk.columns = [convs.get(k, k) for k in chunk.columns] if not "name" in chunk.columns: needs_name = True chunk['name'] = chunk.get('chrom', chunk[chunk.columns[0]]) if bins: chunk['bin'] = 1 if i == 0 and not table in self.tables: flavor = self.url.split(":")[0] schema = sql.get_schema(chunk, table, flavor) print(schema) self.engine.execute(schema) elif i == 0: print >>sys.stderr,\ """adding to existing table, you may want to drop first""" tbl = getattr(self, table)._table cols = chunk.columns data = list(dict(zip(cols, x)) for x in chunk.values) if needs_name: for d in data: d['name'] = "%s:%s" % (d.get("chrom"), d.get("txStart", d.get("chromStart"))) if bins: for d in data: d['bin'] = max(Genome.bins(int(d["txStart"]), int(d["txEnd"]))) self.engine.execute(tbl.insert(), data) self.session.commit() if i > 0: print >>sys.stderr, "writing row:", i * 100000 if "txStart" in chunk.columns: if "chrom" in chunk.columns: ssql = """CREATE INDEX "%s.chrom_txStart" ON "%s" (chrom, txStart)""" % (table, table) else: ssql = """CREATE INDEX "%s.txStart" ON "%s" (txStart)""" % (table, table) self.engine.execute(ssql) for index in (indexes or []): ssql = """CREATE INDEX "%s.%s" ON "%s" (%s)""" % (table, index, table, index) self.engine.execute(ssql) if bins: ssql = """CREATE INDEX "%s.chrom_bin" ON "%s" (chrom, bin)""" % (table, table) self.engine.execute(ssql) self.session.commit()
['def', 'load_file', '(', 'self', ',', 'fname', ',', 'table', '=', 'None', ',', 'sep', '=', '"\\t"', ',', 'bins', '=', 'False', ',', 'indexes', '=', 'None', ')', ':', 'convs', '=', '{', '"#chr"', ':', '"chrom"', ',', '"start"', ':', '"txStart"', ',', '"end"', ':', '"txEnd"', ',', '"chr"', ':', '"chrom"', ',', '"pos"', ':', '"start"', ',', '"POS"', ':', '"start"', ',', '"chromStart"', ':', '"txStart"', ',', '"chromEnd"', ':', '"txEnd"', '}', 'if', 'table', 'is', 'None', ':', 'import', 'os', '.', 'path', 'as', 'op', 'table', '=', 'op', '.', 'basename', '(', 'op', '.', 'splitext', '(', 'fname', ')', '[', '0', ']', ')', '.', 'replace', '(', '"."', ',', '"_"', ')', 'print', '(', '"writing to:"', ',', 'table', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'from', 'pandas', '.', 'io', 'import', 'sql', 'import', 'pandas', 'as', 'pa', 'from', 'toolshed', 'import', 'nopen', 'needs_name', '=', 'False', 'for', 'i', ',', 'chunk', 'in', 'enumerate', '(', 'pa', '.', 'read_csv', '(', 'nopen', '(', 'fname', ')', ',', 'iterator', '=', 'True', ',', 'chunksize', '=', '100000', ',', 'sep', '=', 'sep', ',', 'encoding', '=', '"latin-1"', ')', ')', ':', 'chunk', '.', 'columns', '=', '[', 'convs', '.', 'get', '(', 'k', ',', 'k', ')', 'for', 'k', 'in', 'chunk', '.', 'columns', ']', 'if', 'not', '"name"', 'in', 'chunk', '.', 'columns', ':', 'needs_name', '=', 'True', 'chunk', '[', "'name'", ']', '=', 'chunk', '.', 'get', '(', "'chrom'", ',', 'chunk', '[', 'chunk', '.', 'columns', '[', '0', ']', ']', ')', 'if', 'bins', ':', 'chunk', '[', "'bin'", ']', '=', '1', 'if', 'i', '==', '0', 'and', 'not', 'table', 'in', 'self', '.', 'tables', ':', 'flavor', '=', 'self', '.', 'url', '.', 'split', '(', '":"', ')', '[', '0', ']', 'schema', '=', 'sql', '.', 'get_schema', '(', 'chunk', ',', 'table', ',', 'flavor', ')', 'print', '(', 'schema', ')', 'self', '.', 'engine', '.', 'execute', '(', 'schema', ')', 'elif', 'i', '==', '0', ':', 'print', '>>', 'sys', '.', 'stderr', ',', '"""adding to existing table, you may want to drop first"""', 'tbl', '=', 'getattr', '(', 'self', ',', 'table', ')', '.', '_table', 'cols', '=', 'chunk', '.', 'columns', 'data', '=', 'list', '(', 'dict', '(', 'zip', '(', 'cols', ',', 'x', ')', ')', 'for', 'x', 'in', 'chunk', '.', 'values', ')', 'if', 'needs_name', ':', 'for', 'd', 'in', 'data', ':', 'd', '[', "'name'", ']', '=', '"%s:%s"', '%', '(', 'd', '.', 'get', '(', '"chrom"', ')', ',', 'd', '.', 'get', '(', '"txStart"', ',', 'd', '.', 'get', '(', '"chromStart"', ')', ')', ')', 'if', 'bins', ':', 'for', 'd', 'in', 'data', ':', 'd', '[', "'bin'", ']', '=', 'max', '(', 'Genome', '.', 'bins', '(', 'int', '(', 'd', '[', '"txStart"', ']', ')', ',', 'int', '(', 'd', '[', '"txEnd"', ']', ')', ')', ')', 'self', '.', 'engine', '.', 'execute', '(', 'tbl', '.', 'insert', '(', ')', ',', 'data', ')', 'self', '.', 'session', '.', 'commit', '(', ')', 'if', 'i', '>', '0', ':', 'print', '>>', 'sys', '.', 'stderr', ',', '"writing row:"', ',', 'i', '*', '100000', 'if', '"txStart"', 'in', 'chunk', '.', 'columns', ':', 'if', '"chrom"', 'in', 'chunk', '.', 'columns', ':', 'ssql', '=', '"""CREATE INDEX "%s.chrom_txStart" ON "%s" (chrom, txStart)"""', '%', '(', 'table', ',', 'table', ')', 'else', ':', 'ssql', '=', '"""CREATE INDEX "%s.txStart" ON "%s" (txStart)"""', '%', '(', 'table', ',', 'table', ')', 'self', '.', 'engine', '.', 'execute', '(', 'ssql', ')', 'for', 'index', 'in', '(', 'indexes', 'or', '[', ']', ')', ':', 'ssql', '=', '"""CREATE INDEX "%s.%s" ON "%s" (%s)"""', '%', '(', 'table', ',', 'index', ',', 'table', ',', 'index', ')', 'self', '.', 'engine', '.', 'execute', '(', 'ssql', ')', 'if', 'bins', ':', 'ssql', '=', '"""CREATE INDEX "%s.chrom_bin" ON "%s" (chrom, bin)"""', '%', '(', 'table', ',', 'table', ')', 'self', '.', 'engine', '.', 'execute', '(', 'ssql', ')', 'self', '.', 'session', '.', 'commit', '(', ')']
use some of the machinery in pandas to load a file into a table Parameters ---------- fname : str filename or filehandle to load table : str table to load the file to sep : str CSV separator bins : bool add a "bin" column for efficient spatial queries. indexes : list[str] list of columns to index
['use', 'some', 'of', 'the', 'machinery', 'in', 'pandas', 'to', 'load', 'a', 'file', 'into', 'a', 'table']
train
https://github.com/brentp/cruzdb/blob/9068d46e25952f4a929dde0242beb31fa4c7e89a/cruzdb/__init__.py#L146-L228
6,906
jeffh/rpi_courses
rpi_courses/sis_parser/course_catalog.py
CourseCatalog.find_course_by_crn
def find_course_by_crn(self, crn): """Searches all courses by CRNs. Not particularly efficient. Returns None if not found. """ for name, course in self.courses.iteritems(): if crn in course: return course return None
python
def find_course_by_crn(self, crn): """Searches all courses by CRNs. Not particularly efficient. Returns None if not found. """ for name, course in self.courses.iteritems(): if crn in course: return course return None
['def', 'find_course_by_crn', '(', 'self', ',', 'crn', ')', ':', 'for', 'name', ',', 'course', 'in', 'self', '.', 'courses', '.', 'iteritems', '(', ')', ':', 'if', 'crn', 'in', 'course', ':', 'return', 'course', 'return', 'None']
Searches all courses by CRNs. Not particularly efficient. Returns None if not found.
['Searches', 'all', 'courses', 'by', 'CRNs', '.', 'Not', 'particularly', 'efficient', '.', 'Returns', 'None', 'if', 'not', 'found', '.']
train
https://github.com/jeffh/rpi_courses/blob/c97176f73f866f112c785910ebf3ff8a790e8e9a/rpi_courses/sis_parser/course_catalog.py#L99-L106
6,907
theSage21/lanchat
lanchat/chat.py
Node.__make_client
def __make_client(self): "Make this node a client" notice('Making client, getting server connection', self.color) self.mode = 'c' addr = utils.get_existing_server_addr() sock = utils.get_client_sock(addr) self.__s = sock with self.__client_list_lock: self.clients = deque() self.threads = deque()
python
def __make_client(self): "Make this node a client" notice('Making client, getting server connection', self.color) self.mode = 'c' addr = utils.get_existing_server_addr() sock = utils.get_client_sock(addr) self.__s = sock with self.__client_list_lock: self.clients = deque() self.threads = deque()
['def', '__make_client', '(', 'self', ')', ':', 'notice', '(', "'Making client, getting server connection'", ',', 'self', '.', 'color', ')', 'self', '.', 'mode', '=', "'c'", 'addr', '=', 'utils', '.', 'get_existing_server_addr', '(', ')', 'sock', '=', 'utils', '.', 'get_client_sock', '(', 'addr', ')', 'self', '.', '__s', '=', 'sock', 'with', 'self', '.', '__client_list_lock', ':', 'self', '.', 'clients', '=', 'deque', '(', ')', 'self', '.', 'threads', '=', 'deque', '(', ')']
Make this node a client
['Make', 'this', 'node', 'a', 'client']
train
https://github.com/theSage21/lanchat/blob/66f5dcead67fef815347b956b1d3e149a7e13b29/lanchat/chat.py#L194-L203
6,908
kpdyer/regex2dfa
third_party/re2/lib/codereview/codereview.py
GetRpcServer
def GetRpcServer(options): """Returns an instance of an AbstractRpcServer. Returns: A new AbstractRpcServer, on which RPC calls can be made. """ rpc_server_class = HttpRpcServer def GetUserCredentials(): """Prompts the user for a username and password.""" # Disable status prints so they don't obscure the password prompt. global global_status st = global_status global_status = None email = options.email if email is None: email = GetEmail("Email (login for uploading to %s)" % options.server) password = getpass.getpass("Password for %s: " % email) # Put status back. global_status = st return (email, password) # If this is the dev_appserver, use fake authentication. host = (options.host or options.server).lower() if host == "localhost" or host.startswith("localhost:"): email = options.email if email is None: email = "[email protected]" logging.info("Using debug user %s. Override with --email" % email) server = rpc_server_class( options.server, lambda: (email, "password"), host_override=options.host, extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email}, save_cookies=options.save_cookies) # Don't try to talk to ClientLogin. server.authenticated = True return server return rpc_server_class(options.server, GetUserCredentials, host_override=options.host, save_cookies=options.save_cookies)
python
def GetRpcServer(options): """Returns an instance of an AbstractRpcServer. Returns: A new AbstractRpcServer, on which RPC calls can be made. """ rpc_server_class = HttpRpcServer def GetUserCredentials(): """Prompts the user for a username and password.""" # Disable status prints so they don't obscure the password prompt. global global_status st = global_status global_status = None email = options.email if email is None: email = GetEmail("Email (login for uploading to %s)" % options.server) password = getpass.getpass("Password for %s: " % email) # Put status back. global_status = st return (email, password) # If this is the dev_appserver, use fake authentication. host = (options.host or options.server).lower() if host == "localhost" or host.startswith("localhost:"): email = options.email if email is None: email = "[email protected]" logging.info("Using debug user %s. Override with --email" % email) server = rpc_server_class( options.server, lambda: (email, "password"), host_override=options.host, extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email}, save_cookies=options.save_cookies) # Don't try to talk to ClientLogin. server.authenticated = True return server return rpc_server_class(options.server, GetUserCredentials, host_override=options.host, save_cookies=options.save_cookies)
['def', 'GetRpcServer', '(', 'options', ')', ':', 'rpc_server_class', '=', 'HttpRpcServer', 'def', 'GetUserCredentials', '(', ')', ':', '"""Prompts the user for a username and password."""', "# Disable status prints so they don't obscure the password prompt.", 'global', 'global_status', 'st', '=', 'global_status', 'global_status', '=', 'None', 'email', '=', 'options', '.', 'email', 'if', 'email', 'is', 'None', ':', 'email', '=', 'GetEmail', '(', '"Email (login for uploading to %s)"', '%', 'options', '.', 'server', ')', 'password', '=', 'getpass', '.', 'getpass', '(', '"Password for %s: "', '%', 'email', ')', '# Put status back.', 'global_status', '=', 'st', 'return', '(', 'email', ',', 'password', ')', '# If this is the dev_appserver, use fake authentication.', 'host', '=', '(', 'options', '.', 'host', 'or', 'options', '.', 'server', ')', '.', 'lower', '(', ')', 'if', 'host', '==', '"localhost"', 'or', 'host', '.', 'startswith', '(', '"localhost:"', ')', ':', 'email', '=', 'options', '.', 'email', 'if', 'email', 'is', 'None', ':', 'email', '=', '"[email protected]"', 'logging', '.', 'info', '(', '"Using debug user %s. Override with --email"', '%', 'email', ')', 'server', '=', 'rpc_server_class', '(', 'options', '.', 'server', ',', 'lambda', ':', '(', 'email', ',', '"password"', ')', ',', 'host_override', '=', 'options', '.', 'host', ',', 'extra_headers', '=', '{', '"Cookie"', ':', '\'dev_appserver_login="%s:False"\'', '%', 'email', '}', ',', 'save_cookies', '=', 'options', '.', 'save_cookies', ')', "# Don't try to talk to ClientLogin.", 'server', '.', 'authenticated', '=', 'True', 'return', 'server', 'return', 'rpc_server_class', '(', 'options', '.', 'server', ',', 'GetUserCredentials', ',', 'host_override', '=', 'options', '.', 'host', ',', 'save_cookies', '=', 'options', '.', 'save_cookies', ')']
Returns an instance of an AbstractRpcServer. Returns: A new AbstractRpcServer, on which RPC calls can be made.
['Returns', 'an', 'instance', 'of', 'an', 'AbstractRpcServer', '.']
train
https://github.com/kpdyer/regex2dfa/blob/109f877e60ef0dfcb430f11516d215930b7b9936/third_party/re2/lib/codereview/codereview.py#L3048-L3091
6,909
tipsi/tipsi_tools
tipsi_tools/doc_utils/tipsi_sphinx/dyn_serializer.py
parse_doc
def parse_doc(doc): """ Parse docstrings to dict, it should look like: key: value """ if not doc: return {} out = {} for s in doc.split('\n'): s = s.strip().split(':', maxsplit=1) if len(s) == 2: out[s[0]] = s[1] return out
python
def parse_doc(doc): """ Parse docstrings to dict, it should look like: key: value """ if not doc: return {} out = {} for s in doc.split('\n'): s = s.strip().split(':', maxsplit=1) if len(s) == 2: out[s[0]] = s[1] return out
['def', 'parse_doc', '(', 'doc', ')', ':', 'if', 'not', 'doc', ':', 'return', '{', '}', 'out', '=', '{', '}', 'for', 's', 'in', 'doc', '.', 'split', '(', "'\\n'", ')', ':', 's', '=', 's', '.', 'strip', '(', ')', '.', 'split', '(', "':'", ',', 'maxsplit', '=', '1', ')', 'if', 'len', '(', 's', ')', '==', '2', ':', 'out', '[', 's', '[', '0', ']', ']', '=', 's', '[', '1', ']', 'return', 'out']
Parse docstrings to dict, it should look like: key: value
['Parse', 'docstrings', 'to', 'dict', 'it', 'should', 'look', 'like', ':', 'key', ':', 'value']
train
https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/doc_utils/tipsi_sphinx/dyn_serializer.py#L20-L32
6,910
gr33ndata/dysl
dysl/social.py
SocialLM.karbasa
def karbasa(self, result): """ Finding if class probabilities are close to eachother Ratio of the distance between 1st and 2nd class, to the distance between 1st and last class. :param result: The dict returned by LM.calculate() """ probs = result['all_probs'] probs.sort() return float(probs[1] - probs[0]) / float(probs[-1] - probs[0])
python
def karbasa(self, result): """ Finding if class probabilities are close to eachother Ratio of the distance between 1st and 2nd class, to the distance between 1st and last class. :param result: The dict returned by LM.calculate() """ probs = result['all_probs'] probs.sort() return float(probs[1] - probs[0]) / float(probs[-1] - probs[0])
['def', 'karbasa', '(', 'self', ',', 'result', ')', ':', 'probs', '=', 'result', '[', "'all_probs'", ']', 'probs', '.', 'sort', '(', ')', 'return', 'float', '(', 'probs', '[', '1', ']', '-', 'probs', '[', '0', ']', ')', '/', 'float', '(', 'probs', '[', '-', '1', ']', '-', 'probs', '[', '0', ']', ')']
Finding if class probabilities are close to eachother Ratio of the distance between 1st and 2nd class, to the distance between 1st and last class. :param result: The dict returned by LM.calculate()
['Finding', 'if', 'class', 'probabilities', 'are', 'close', 'to', 'eachother', 'Ratio', 'of', 'the', 'distance', 'between', '1st', 'and', '2nd', 'class', 'to', 'the', 'distance', 'between', '1st', 'and', 'last', 'class', '.']
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L26-L35
6,911
titusjan/argos
argos/config/choicecti.py
ChoiceCti.createEditor
def createEditor(self, delegate, parent, option): """ Creates a ChoiceCtiEditor. For the parameters see the AbstractCti constructor documentation. """ return ChoiceCtiEditor(self, delegate, parent=parent)
python
def createEditor(self, delegate, parent, option): """ Creates a ChoiceCtiEditor. For the parameters see the AbstractCti constructor documentation. """ return ChoiceCtiEditor(self, delegate, parent=parent)
['def', 'createEditor', '(', 'self', ',', 'delegate', ',', 'parent', ',', 'option', ')', ':', 'return', 'ChoiceCtiEditor', '(', 'self', ',', 'delegate', ',', 'parent', '=', 'parent', ')']
Creates a ChoiceCtiEditor. For the parameters see the AbstractCti constructor documentation.
['Creates', 'a', 'ChoiceCtiEditor', '.', 'For', 'the', 'parameters', 'see', 'the', 'AbstractCti', 'constructor', 'documentation', '.']
train
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/choicecti.py#L161-L165
6,912
clalancette/pycdlib
pycdlib/udf.py
symlink_to_bytes
def symlink_to_bytes(symlink_target): # type: (str) -> bytes ''' A function to generate UDF symlink data from a Unix-like path. Parameters: symlink_target - The Unix-like path that is the symlink. Returns: The UDF data corresponding to the symlink. ''' symlink_data = bytearray() for comp in symlink_target.split('/'): if comp == '': # If comp is empty, then we know this is the leading slash # and we should make an absolute entry (double slashes and # such are weeded out by the earlier utils.normpath). symlink_data.extend(b'\x02\x00\x00\x00') elif comp == '.': symlink_data.extend(b'\x04\x00\x00\x00') elif comp == '..': symlink_data.extend(b'\x03\x00\x00\x00') else: symlink_data.extend(b'\x05') ostaname = _ostaunicode(comp) symlink_data.append(len(ostaname)) symlink_data.extend(b'\x00\x00') symlink_data.extend(ostaname) return symlink_data
python
def symlink_to_bytes(symlink_target): # type: (str) -> bytes ''' A function to generate UDF symlink data from a Unix-like path. Parameters: symlink_target - The Unix-like path that is the symlink. Returns: The UDF data corresponding to the symlink. ''' symlink_data = bytearray() for comp in symlink_target.split('/'): if comp == '': # If comp is empty, then we know this is the leading slash # and we should make an absolute entry (double slashes and # such are weeded out by the earlier utils.normpath). symlink_data.extend(b'\x02\x00\x00\x00') elif comp == '.': symlink_data.extend(b'\x04\x00\x00\x00') elif comp == '..': symlink_data.extend(b'\x03\x00\x00\x00') else: symlink_data.extend(b'\x05') ostaname = _ostaunicode(comp) symlink_data.append(len(ostaname)) symlink_data.extend(b'\x00\x00') symlink_data.extend(ostaname) return symlink_data
['def', 'symlink_to_bytes', '(', 'symlink_target', ')', ':', '# type: (str) -> bytes', 'symlink_data', '=', 'bytearray', '(', ')', 'for', 'comp', 'in', 'symlink_target', '.', 'split', '(', "'/'", ')', ':', 'if', 'comp', '==', "''", ':', '# If comp is empty, then we know this is the leading slash', '# and we should make an absolute entry (double slashes and', '# such are weeded out by the earlier utils.normpath).', 'symlink_data', '.', 'extend', '(', "b'\\x02\\x00\\x00\\x00'", ')', 'elif', 'comp', '==', "'.'", ':', 'symlink_data', '.', 'extend', '(', "b'\\x04\\x00\\x00\\x00'", ')', 'elif', 'comp', '==', "'..'", ':', 'symlink_data', '.', 'extend', '(', "b'\\x03\\x00\\x00\\x00'", ')', 'else', ':', 'symlink_data', '.', 'extend', '(', "b'\\x05'", ')', 'ostaname', '=', '_ostaunicode', '(', 'comp', ')', 'symlink_data', '.', 'append', '(', 'len', '(', 'ostaname', ')', ')', 'symlink_data', '.', 'extend', '(', "b'\\x00\\x00'", ')', 'symlink_data', '.', 'extend', '(', 'ostaname', ')', 'return', 'symlink_data']
A function to generate UDF symlink data from a Unix-like path. Parameters: symlink_target - The Unix-like path that is the symlink. Returns: The UDF data corresponding to the symlink.
['A', 'function', 'to', 'generate', 'UDF', 'symlink', 'data', 'from', 'a', 'Unix', '-', 'like', 'path', '.']
train
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/udf.py#L3554-L3582
6,913
GNS3/gns3-server
gns3server/compute/dynamips/nios/nio.py
NIO.set_bandwidth
def set_bandwidth(self, bandwidth): """ Sets bandwidth constraint. :param bandwidth: bandwidth integer value (in Kb/s) """ yield from self._hypervisor.send("nio set_bandwidth {name} {bandwidth}".format(name=self._name, bandwidth=bandwidth)) self._bandwidth = bandwidth
python
def set_bandwidth(self, bandwidth): """ Sets bandwidth constraint. :param bandwidth: bandwidth integer value (in Kb/s) """ yield from self._hypervisor.send("nio set_bandwidth {name} {bandwidth}".format(name=self._name, bandwidth=bandwidth)) self._bandwidth = bandwidth
['def', 'set_bandwidth', '(', 'self', ',', 'bandwidth', ')', ':', 'yield', 'from', 'self', '.', '_hypervisor', '.', 'send', '(', '"nio set_bandwidth {name} {bandwidth}"', '.', 'format', '(', 'name', '=', 'self', '.', '_name', ',', 'bandwidth', '=', 'bandwidth', ')', ')', 'self', '.', '_bandwidth', '=', 'bandwidth']
Sets bandwidth constraint. :param bandwidth: bandwidth integer value (in Kb/s)
['Sets', 'bandwidth', 'constraint', '.']
train
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nios/nio.py#L229-L237
6,914
SystemRDL/systemrdl-compiler
systemrdl/core/ComponentVisitor.py
ComponentVisitor.visitParam_def
def visitParam_def(self, ctx:SystemRDLParser.Param_defContext): """ Parameter Definition block """ self.compiler.namespace.enter_scope() param_defs = [] for elem in ctx.getTypedRuleContexts(SystemRDLParser.Param_def_elemContext): param_def = self.visit(elem) param_defs.append(param_def) self.compiler.namespace.exit_scope() return param_defs
python
def visitParam_def(self, ctx:SystemRDLParser.Param_defContext): """ Parameter Definition block """ self.compiler.namespace.enter_scope() param_defs = [] for elem in ctx.getTypedRuleContexts(SystemRDLParser.Param_def_elemContext): param_def = self.visit(elem) param_defs.append(param_def) self.compiler.namespace.exit_scope() return param_defs
['def', 'visitParam_def', '(', 'self', ',', 'ctx', ':', 'SystemRDLParser', '.', 'Param_defContext', ')', ':', 'self', '.', 'compiler', '.', 'namespace', '.', 'enter_scope', '(', ')', 'param_defs', '=', '[', ']', 'for', 'elem', 'in', 'ctx', '.', 'getTypedRuleContexts', '(', 'SystemRDLParser', '.', 'Param_def_elemContext', ')', ':', 'param_def', '=', 'self', '.', 'visit', '(', 'elem', ')', 'param_defs', '.', 'append', '(', 'param_def', ')', 'self', '.', 'compiler', '.', 'namespace', '.', 'exit_scope', '(', ')', 'return', 'param_defs']
Parameter Definition block
['Parameter', 'Definition', 'block']
train
https://github.com/SystemRDL/systemrdl-compiler/blob/6ae64f2bb6ecbbe9db356e20e8ac94e85bdeed3a/systemrdl/core/ComponentVisitor.py#L471-L483
6,915
alejandroautalan/pygubu
pygubu/widgets/calendarframe.py
CalendarFrame._draw_calendar
def _draw_calendar(self, canvas, redraw=False): """Draws calendar.""" options = self.__options # Update labels: name = self._cal.formatmonthname(self._date.year, self._date.month, 0, withyear=False) self._lmonth.configure(text=name.title()) self._lyear.configure(text=str(self._date.year)) # Update calendar ch = canvas.winfo_height() cw = canvas.winfo_width() rowh = ch / 7.0 colw = cw / 7.0 # Header background if self._rheader is None: self._rheader = canvas.create_rectangle(0, 0, cw, rowh, width=0, fill=options['headerbg']) else: canvas.itemconfigure(self._rheader, fill=options['headerbg']) canvas.coords(self._rheader, 0, 0, cw, rowh) # Header text ox = 0 oy = rowh / 2.0 coffset = colw / 2.0 cols = self._cal.formatweekheader(3).split() for i in range(0, 7): x = ox + i * colw + coffset if redraw: item = self._theader[i] canvas.coords(item, x, oy) canvas.itemconfigure(item, text=cols[i], fill=options['headerfg']) else: self._theader[i] = canvas.create_text(x, oy, text=cols[i], fill=options['headerbg']) # background matrix oy = rowh ox = 0 for i, x, y, x1, y1 in matrix_coords(6, 7, rowh, colw, ox, oy): x1 -= 1 y1 -= 1 if redraw: rec = self._recmat[i] canvas.coords(rec, x, y, x1, y1) canvas.itemconfigure(rec, fill=options['calendarbg']) else: rec = canvas.create_rectangle(x, y, x1, y1, width=1, fill=options['calendarbg'], outline=options['calendarbg'], activeoutline=options['selectbg'], activewidth=1, tags='cell') self._recmat[i] = rec # text matrix weeks = self._weeks xoffset = colw / 2.0 yoffset = rowh / 2.0 oy = rowh ox = 0 for i, x, y, x1, y1 in matrix_coords(6, 7, rowh, colw, ox, oy): x += coffset y += yoffset # day text txt = "" f, c = i2rc(i, 7) if f < len(weeks): day = weeks[f][c] txt = "{0}".format(day) if day != 0 else "" if redraw: item = self._txtmat[i] canvas.coords(item, x, y) canvas.itemconfigure(item, text=txt) else: self._txtmat[i] = canvas.create_text(x, y, text=txt, state=tk.DISABLED) # Mark days self._mark_days()
python
def _draw_calendar(self, canvas, redraw=False): """Draws calendar.""" options = self.__options # Update labels: name = self._cal.formatmonthname(self._date.year, self._date.month, 0, withyear=False) self._lmonth.configure(text=name.title()) self._lyear.configure(text=str(self._date.year)) # Update calendar ch = canvas.winfo_height() cw = canvas.winfo_width() rowh = ch / 7.0 colw = cw / 7.0 # Header background if self._rheader is None: self._rheader = canvas.create_rectangle(0, 0, cw, rowh, width=0, fill=options['headerbg']) else: canvas.itemconfigure(self._rheader, fill=options['headerbg']) canvas.coords(self._rheader, 0, 0, cw, rowh) # Header text ox = 0 oy = rowh / 2.0 coffset = colw / 2.0 cols = self._cal.formatweekheader(3).split() for i in range(0, 7): x = ox + i * colw + coffset if redraw: item = self._theader[i] canvas.coords(item, x, oy) canvas.itemconfigure(item, text=cols[i], fill=options['headerfg']) else: self._theader[i] = canvas.create_text(x, oy, text=cols[i], fill=options['headerbg']) # background matrix oy = rowh ox = 0 for i, x, y, x1, y1 in matrix_coords(6, 7, rowh, colw, ox, oy): x1 -= 1 y1 -= 1 if redraw: rec = self._recmat[i] canvas.coords(rec, x, y, x1, y1) canvas.itemconfigure(rec, fill=options['calendarbg']) else: rec = canvas.create_rectangle(x, y, x1, y1, width=1, fill=options['calendarbg'], outline=options['calendarbg'], activeoutline=options['selectbg'], activewidth=1, tags='cell') self._recmat[i] = rec # text matrix weeks = self._weeks xoffset = colw / 2.0 yoffset = rowh / 2.0 oy = rowh ox = 0 for i, x, y, x1, y1 in matrix_coords(6, 7, rowh, colw, ox, oy): x += coffset y += yoffset # day text txt = "" f, c = i2rc(i, 7) if f < len(weeks): day = weeks[f][c] txt = "{0}".format(day) if day != 0 else "" if redraw: item = self._txtmat[i] canvas.coords(item, x, y) canvas.itemconfigure(item, text=txt) else: self._txtmat[i] = canvas.create_text(x, y, text=txt, state=tk.DISABLED) # Mark days self._mark_days()
['def', '_draw_calendar', '(', 'self', ',', 'canvas', ',', 'redraw', '=', 'False', ')', ':', 'options', '=', 'self', '.', '__options', '# Update labels:', 'name', '=', 'self', '.', '_cal', '.', 'formatmonthname', '(', 'self', '.', '_date', '.', 'year', ',', 'self', '.', '_date', '.', 'month', ',', '0', ',', 'withyear', '=', 'False', ')', 'self', '.', '_lmonth', '.', 'configure', '(', 'text', '=', 'name', '.', 'title', '(', ')', ')', 'self', '.', '_lyear', '.', 'configure', '(', 'text', '=', 'str', '(', 'self', '.', '_date', '.', 'year', ')', ')', '# Update calendar', 'ch', '=', 'canvas', '.', 'winfo_height', '(', ')', 'cw', '=', 'canvas', '.', 'winfo_width', '(', ')', 'rowh', '=', 'ch', '/', '7.0', 'colw', '=', 'cw', '/', '7.0', '# Header background', 'if', 'self', '.', '_rheader', 'is', 'None', ':', 'self', '.', '_rheader', '=', 'canvas', '.', 'create_rectangle', '(', '0', ',', '0', ',', 'cw', ',', 'rowh', ',', 'width', '=', '0', ',', 'fill', '=', 'options', '[', "'headerbg'", ']', ')', 'else', ':', 'canvas', '.', 'itemconfigure', '(', 'self', '.', '_rheader', ',', 'fill', '=', 'options', '[', "'headerbg'", ']', ')', 'canvas', '.', 'coords', '(', 'self', '.', '_rheader', ',', '0', ',', '0', ',', 'cw', ',', 'rowh', ')', '# Header text', 'ox', '=', '0', 'oy', '=', 'rowh', '/', '2.0', 'coffset', '=', 'colw', '/', '2.0', 'cols', '=', 'self', '.', '_cal', '.', 'formatweekheader', '(', '3', ')', '.', 'split', '(', ')', 'for', 'i', 'in', 'range', '(', '0', ',', '7', ')', ':', 'x', '=', 'ox', '+', 'i', '*', 'colw', '+', 'coffset', 'if', 'redraw', ':', 'item', '=', 'self', '.', '_theader', '[', 'i', ']', 'canvas', '.', 'coords', '(', 'item', ',', 'x', ',', 'oy', ')', 'canvas', '.', 'itemconfigure', '(', 'item', ',', 'text', '=', 'cols', '[', 'i', ']', ',', 'fill', '=', 'options', '[', "'headerfg'", ']', ')', 'else', ':', 'self', '.', '_theader', '[', 'i', ']', '=', 'canvas', '.', 'create_text', '(', 'x', ',', 'oy', ',', 'text', '=', 'cols', '[', 'i', ']', ',', 'fill', '=', 'options', '[', "'headerbg'", ']', ')', '# background matrix', 'oy', '=', 'rowh', 'ox', '=', '0', 'for', 'i', ',', 'x', ',', 'y', ',', 'x1', ',', 'y1', 'in', 'matrix_coords', '(', '6', ',', '7', ',', 'rowh', ',', 'colw', ',', 'ox', ',', 'oy', ')', ':', 'x1', '-=', '1', 'y1', '-=', '1', 'if', 'redraw', ':', 'rec', '=', 'self', '.', '_recmat', '[', 'i', ']', 'canvas', '.', 'coords', '(', 'rec', ',', 'x', ',', 'y', ',', 'x1', ',', 'y1', ')', 'canvas', '.', 'itemconfigure', '(', 'rec', ',', 'fill', '=', 'options', '[', "'calendarbg'", ']', ')', 'else', ':', 'rec', '=', 'canvas', '.', 'create_rectangle', '(', 'x', ',', 'y', ',', 'x1', ',', 'y1', ',', 'width', '=', '1', ',', 'fill', '=', 'options', '[', "'calendarbg'", ']', ',', 'outline', '=', 'options', '[', "'calendarbg'", ']', ',', 'activeoutline', '=', 'options', '[', "'selectbg'", ']', ',', 'activewidth', '=', '1', ',', 'tags', '=', "'cell'", ')', 'self', '.', '_recmat', '[', 'i', ']', '=', 'rec', '# text matrix', 'weeks', '=', 'self', '.', '_weeks', 'xoffset', '=', 'colw', '/', '2.0', 'yoffset', '=', 'rowh', '/', '2.0', 'oy', '=', 'rowh', 'ox', '=', '0', 'for', 'i', ',', 'x', ',', 'y', ',', 'x1', ',', 'y1', 'in', 'matrix_coords', '(', '6', ',', '7', ',', 'rowh', ',', 'colw', ',', 'ox', ',', 'oy', ')', ':', 'x', '+=', 'coffset', 'y', '+=', 'yoffset', '# day text', 'txt', '=', '""', 'f', ',', 'c', '=', 'i2rc', '(', 'i', ',', '7', ')', 'if', 'f', '<', 'len', '(', 'weeks', ')', ':', 'day', '=', 'weeks', '[', 'f', ']', '[', 'c', ']', 'txt', '=', '"{0}"', '.', 'format', '(', 'day', ')', 'if', 'day', '!=', '0', 'else', '""', 'if', 'redraw', ':', 'item', '=', 'self', '.', '_txtmat', '[', 'i', ']', 'canvas', '.', 'coords', '(', 'item', ',', 'x', ',', 'y', ')', 'canvas', '.', 'itemconfigure', '(', 'item', ',', 'text', '=', 'txt', ')', 'else', ':', 'self', '.', '_txtmat', '[', 'i', ']', '=', 'canvas', '.', 'create_text', '(', 'x', ',', 'y', ',', 'text', '=', 'txt', ',', 'state', '=', 'tk', '.', 'DISABLED', ')', '# Mark days', 'self', '.', '_mark_days', '(', ')']
Draws calendar.
['Draws', 'calendar', '.']
train
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/calendarframe.py#L362-L440
6,916
log2timeline/plaso
plaso/storage/event_tag_index.py
EventTagIndex._Build
def _Build(self, storage_file): """Builds the event tag index. Args: storage_file (BaseStorageFile): storage file. """ self._index = {} for event_tag in storage_file.GetEventTags(): self.SetEventTag(event_tag)
python
def _Build(self, storage_file): """Builds the event tag index. Args: storage_file (BaseStorageFile): storage file. """ self._index = {} for event_tag in storage_file.GetEventTags(): self.SetEventTag(event_tag)
['def', '_Build', '(', 'self', ',', 'storage_file', ')', ':', 'self', '.', '_index', '=', '{', '}', 'for', 'event_tag', 'in', 'storage_file', '.', 'GetEventTags', '(', ')', ':', 'self', '.', 'SetEventTag', '(', 'event_tag', ')']
Builds the event tag index. Args: storage_file (BaseStorageFile): storage file.
['Builds', 'the', 'event', 'tag', 'index', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/event_tag_index.py#L21-L29
6,917
marcharper/python-ternary
ternary/ternary_axes_subplot.py
TernaryAxesSubplot.left_axis_label
def left_axis_label(self, label, position=None, rotation=60, offset=0.08, **kwargs): """ Sets the label on the left axis. Parameters ---------- label: String The axis label position: 3-Tuple of floats, None The position of the text label rotation: float, 60 The angle of rotation of the label offset: float, Used to compute the distance of the label from the axis kwargs: Any kwargs to pass through to matplotlib. """ if not position: position = (-offset, 3./5, 2./5) self._labels["left"] = (label, position, rotation, kwargs)
python
def left_axis_label(self, label, position=None, rotation=60, offset=0.08, **kwargs): """ Sets the label on the left axis. Parameters ---------- label: String The axis label position: 3-Tuple of floats, None The position of the text label rotation: float, 60 The angle of rotation of the label offset: float, Used to compute the distance of the label from the axis kwargs: Any kwargs to pass through to matplotlib. """ if not position: position = (-offset, 3./5, 2./5) self._labels["left"] = (label, position, rotation, kwargs)
['def', 'left_axis_label', '(', 'self', ',', 'label', ',', 'position', '=', 'None', ',', 'rotation', '=', '60', ',', 'offset', '=', '0.08', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'position', ':', 'position', '=', '(', '-', 'offset', ',', '3.', '/', '5', ',', '2.', '/', '5', ')', 'self', '.', '_labels', '[', '"left"', ']', '=', '(', 'label', ',', 'position', ',', 'rotation', ',', 'kwargs', ')']
Sets the label on the left axis. Parameters ---------- label: String The axis label position: 3-Tuple of floats, None The position of the text label rotation: float, 60 The angle of rotation of the label offset: float, Used to compute the distance of the label from the axis kwargs: Any kwargs to pass through to matplotlib.
['Sets', 'the', 'label', 'on', 'the', 'left', 'axis', '.']
train
https://github.com/marcharper/python-ternary/blob/a4bef393ec9df130d4b55707293c750498a01843/ternary/ternary_axes_subplot.py#L122-L143
6,918
materialsproject/pymatgen
pymatgen/symmetry/analyzer.py
PointGroupAnalyzer._find_spherical_axes
def _find_spherical_axes(self): """ Looks for R5, R4, R3 and R2 axes in spherical top molecules. Point group T molecules have only one unique 3-fold and one unique 2-fold axis. O molecules have one unique 4, 3 and 2-fold axes. I molecules have a unique 5-fold axis. """ rot_present = defaultdict(bool) origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol) test_set = min(dist_el_sites.values(), key=lambda s: len(s)) coords = [s.coords for s in test_set] for c1, c2, c3 in itertools.combinations(coords, 3): for cc1, cc2 in itertools.combinations([c1, c2, c3], 2): if not rot_present[2]: test_axis = cc1 + cc2 if np.linalg.norm(test_axis) > self.tol: op = SymmOp.from_axis_angle_and_translation(test_axis, 180) rot_present[2] = self.is_valid_op(op) if rot_present[2]: self.symmops.append(op) self.rot_sym.append((test_axis, 2)) test_axis = np.cross(c2 - c1, c3 - c1) if np.linalg.norm(test_axis) > self.tol: for r in (3, 4, 5): if not rot_present[r]: op = SymmOp.from_axis_angle_and_translation( test_axis, 360 / r) rot_present[r] = self.is_valid_op(op) if rot_present[r]: self.symmops.append(op) self.rot_sym.append((test_axis, r)) break if rot_present[2] and rot_present[3] and ( rot_present[4] or rot_present[5]): break
python
def _find_spherical_axes(self): """ Looks for R5, R4, R3 and R2 axes in spherical top molecules. Point group T molecules have only one unique 3-fold and one unique 2-fold axis. O molecules have one unique 4, 3 and 2-fold axes. I molecules have a unique 5-fold axis. """ rot_present = defaultdict(bool) origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol) test_set = min(dist_el_sites.values(), key=lambda s: len(s)) coords = [s.coords for s in test_set] for c1, c2, c3 in itertools.combinations(coords, 3): for cc1, cc2 in itertools.combinations([c1, c2, c3], 2): if not rot_present[2]: test_axis = cc1 + cc2 if np.linalg.norm(test_axis) > self.tol: op = SymmOp.from_axis_angle_and_translation(test_axis, 180) rot_present[2] = self.is_valid_op(op) if rot_present[2]: self.symmops.append(op) self.rot_sym.append((test_axis, 2)) test_axis = np.cross(c2 - c1, c3 - c1) if np.linalg.norm(test_axis) > self.tol: for r in (3, 4, 5): if not rot_present[r]: op = SymmOp.from_axis_angle_and_translation( test_axis, 360 / r) rot_present[r] = self.is_valid_op(op) if rot_present[r]: self.symmops.append(op) self.rot_sym.append((test_axis, r)) break if rot_present[2] and rot_present[3] and ( rot_present[4] or rot_present[5]): break
['def', '_find_spherical_axes', '(', 'self', ')', ':', 'rot_present', '=', 'defaultdict', '(', 'bool', ')', 'origin_site', ',', 'dist_el_sites', '=', 'cluster_sites', '(', 'self', '.', 'centered_mol', ',', 'self', '.', 'tol', ')', 'test_set', '=', 'min', '(', 'dist_el_sites', '.', 'values', '(', ')', ',', 'key', '=', 'lambda', 's', ':', 'len', '(', 's', ')', ')', 'coords', '=', '[', 's', '.', 'coords', 'for', 's', 'in', 'test_set', ']', 'for', 'c1', ',', 'c2', ',', 'c3', 'in', 'itertools', '.', 'combinations', '(', 'coords', ',', '3', ')', ':', 'for', 'cc1', ',', 'cc2', 'in', 'itertools', '.', 'combinations', '(', '[', 'c1', ',', 'c2', ',', 'c3', ']', ',', '2', ')', ':', 'if', 'not', 'rot_present', '[', '2', ']', ':', 'test_axis', '=', 'cc1', '+', 'cc2', 'if', 'np', '.', 'linalg', '.', 'norm', '(', 'test_axis', ')', '>', 'self', '.', 'tol', ':', 'op', '=', 'SymmOp', '.', 'from_axis_angle_and_translation', '(', 'test_axis', ',', '180', ')', 'rot_present', '[', '2', ']', '=', 'self', '.', 'is_valid_op', '(', 'op', ')', 'if', 'rot_present', '[', '2', ']', ':', 'self', '.', 'symmops', '.', 'append', '(', 'op', ')', 'self', '.', 'rot_sym', '.', 'append', '(', '(', 'test_axis', ',', '2', ')', ')', 'test_axis', '=', 'np', '.', 'cross', '(', 'c2', '-', 'c1', ',', 'c3', '-', 'c1', ')', 'if', 'np', '.', 'linalg', '.', 'norm', '(', 'test_axis', ')', '>', 'self', '.', 'tol', ':', 'for', 'r', 'in', '(', '3', ',', '4', ',', '5', ')', ':', 'if', 'not', 'rot_present', '[', 'r', ']', ':', 'op', '=', 'SymmOp', '.', 'from_axis_angle_and_translation', '(', 'test_axis', ',', '360', '/', 'r', ')', 'rot_present', '[', 'r', ']', '=', 'self', '.', 'is_valid_op', '(', 'op', ')', 'if', 'rot_present', '[', 'r', ']', ':', 'self', '.', 'symmops', '.', 'append', '(', 'op', ')', 'self', '.', 'rot_sym', '.', 'append', '(', '(', 'test_axis', ',', 'r', ')', ')', 'break', 'if', 'rot_present', '[', '2', ']', 'and', 'rot_present', '[', '3', ']', 'and', '(', 'rot_present', '[', '4', ']', 'or', 'rot_present', '[', '5', ']', ')', ':', 'break']
Looks for R5, R4, R3 and R2 axes in spherical top molecules. Point group T molecules have only one unique 3-fold and one unique 2-fold axis. O molecules have one unique 4, 3 and 2-fold axes. I molecules have a unique 5-fold axis.
['Looks', 'for', 'R5', 'R4', 'R3', 'and', 'R2', 'axes', 'in', 'spherical', 'top', 'molecules', '.', 'Point', 'group', 'T', 'molecules', 'have', 'only', 'one', 'unique', '3', '-', 'fold', 'and', 'one', 'unique', '2', '-', 'fold', 'axis', '.', 'O', 'molecules', 'have', 'one', 'unique', '4', '3', 'and', '2', '-', 'fold', 'axes', '.', 'I', 'molecules', 'have', 'a', 'unique', '5', '-', 'fold', 'axis', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/symmetry/analyzer.py#L1151-L1187
6,919
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/master.py
print_header
def print_header(text): """Prints header with given text and frame composed of '#' characters.""" print() print('#'*(len(text)+4)) print('# ' + text + ' #') print('#'*(len(text)+4)) print()
python
def print_header(text): """Prints header with given text and frame composed of '#' characters.""" print() print('#'*(len(text)+4)) print('# ' + text + ' #') print('#'*(len(text)+4)) print()
['def', 'print_header', '(', 'text', ')', ':', 'print', '(', ')', 'print', '(', "'#'", '*', '(', 'len', '(', 'text', ')', '+', '4', ')', ')', 'print', '(', "'# '", '+', 'text', '+', "' #'", ')', 'print', '(', "'#'", '*', '(', 'len', '(', 'text', ')', '+', '4', ')', ')', 'print', '(', ')']
Prints header with given text and frame composed of '#' characters.
['Prints', 'header', 'with', 'given', 'text', 'and', 'frame', 'composed', 'of', '#', 'characters', '.']
train
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L40-L46
6,920
tuomas2/automate
src/automate/traits_fixes.py
_dispatch_change_event
def _dispatch_change_event(self, object, trait_name, old, new, handler): """ Prepare and dispatch a trait change event to a listener. """ # Extract the arguments needed from the handler. args = self.argument_transform(object, trait_name, old, new) # Send a description of the event to the change event tracer. if tnotifier._pre_change_event_tracer is not None: tnotifier._pre_change_event_tracer(object, trait_name, old, new, handler) # Dispatch the event to the listener. from automate.common import SystemNotReady try: self.dispatch(handler, *args) except SystemNotReady: pass except Exception as e: if tnotifier._post_change_event_tracer is not None: tnotifier._post_change_event_tracer(object, trait_name, old, new, handler, exception=e) # This call needs to be made inside the `except` block in case # the handler wants to re-raise the exception. tnotifier.handle_exception(object, trait_name, old, new) else: if tnotifier._post_change_event_tracer is not None: tnotifier._post_change_event_tracer(object, trait_name, old, new, handler, exception=None)
python
def _dispatch_change_event(self, object, trait_name, old, new, handler): """ Prepare and dispatch a trait change event to a listener. """ # Extract the arguments needed from the handler. args = self.argument_transform(object, trait_name, old, new) # Send a description of the event to the change event tracer. if tnotifier._pre_change_event_tracer is not None: tnotifier._pre_change_event_tracer(object, trait_name, old, new, handler) # Dispatch the event to the listener. from automate.common import SystemNotReady try: self.dispatch(handler, *args) except SystemNotReady: pass except Exception as e: if tnotifier._post_change_event_tracer is not None: tnotifier._post_change_event_tracer(object, trait_name, old, new, handler, exception=e) # This call needs to be made inside the `except` block in case # the handler wants to re-raise the exception. tnotifier.handle_exception(object, trait_name, old, new) else: if tnotifier._post_change_event_tracer is not None: tnotifier._post_change_event_tracer(object, trait_name, old, new, handler, exception=None)
['def', '_dispatch_change_event', '(', 'self', ',', 'object', ',', 'trait_name', ',', 'old', ',', 'new', ',', 'handler', ')', ':', '# Extract the arguments needed from the handler.', 'args', '=', 'self', '.', 'argument_transform', '(', 'object', ',', 'trait_name', ',', 'old', ',', 'new', ')', '# Send a description of the event to the change event tracer.', 'if', 'tnotifier', '.', '_pre_change_event_tracer', 'is', 'not', 'None', ':', 'tnotifier', '.', '_pre_change_event_tracer', '(', 'object', ',', 'trait_name', ',', 'old', ',', 'new', ',', 'handler', ')', '# Dispatch the event to the listener.', 'from', 'automate', '.', 'common', 'import', 'SystemNotReady', 'try', ':', 'self', '.', 'dispatch', '(', 'handler', ',', '*', 'args', ')', 'except', 'SystemNotReady', ':', 'pass', 'except', 'Exception', 'as', 'e', ':', 'if', 'tnotifier', '.', '_post_change_event_tracer', 'is', 'not', 'None', ':', 'tnotifier', '.', '_post_change_event_tracer', '(', 'object', ',', 'trait_name', ',', 'old', ',', 'new', ',', 'handler', ',', 'exception', '=', 'e', ')', '# This call needs to be made inside the `except` block in case', '# the handler wants to re-raise the exception.', 'tnotifier', '.', 'handle_exception', '(', 'object', ',', 'trait_name', ',', 'old', ',', 'new', ')', 'else', ':', 'if', 'tnotifier', '.', '_post_change_event_tracer', 'is', 'not', 'None', ':', 'tnotifier', '.', '_post_change_event_tracer', '(', 'object', ',', 'trait_name', ',', 'old', ',', 'new', ',', 'handler', ',', 'exception', '=', 'None', ')']
Prepare and dispatch a trait change event to a listener.
['Prepare', 'and', 'dispatch', 'a', 'trait', 'change', 'event', 'to', 'a', 'listener', '.']
train
https://github.com/tuomas2/automate/blob/d8a8cd03cd0da047e033a2d305f3f260f8c4e017/src/automate/traits_fixes.py#L59-L85
6,921
scrapinghub/adblockparser
adblockparser/parser.py
AdblockRules._matches
def _matches(self, url, options, general_re, domain_required_rules, rules_with_options): """ Return if ``url``/``options`` are matched by rules defined by ``general_re``, ``domain_required_rules`` and ``rules_with_options``. ``general_re`` is a compiled regex for rules without options. ``domain_required_rules`` is a {domain: [rules_which_require_it]} mapping. ``rules_with_options`` is a list of AdblockRule instances that don't require any domain, but have other options. """ if general_re and general_re.search(url): return True rules = [] if 'domain' in options and domain_required_rules: src_domain = options['domain'] for domain in _domain_variants(src_domain): if domain in domain_required_rules: rules.extend(domain_required_rules[domain]) rules.extend(rules_with_options) if self.skip_unsupported_rules: rules = [rule for rule in rules if rule.matching_supported(options)] return any(rule.match_url(url, options) for rule in rules)
python
def _matches(self, url, options, general_re, domain_required_rules, rules_with_options): """ Return if ``url``/``options`` are matched by rules defined by ``general_re``, ``domain_required_rules`` and ``rules_with_options``. ``general_re`` is a compiled regex for rules without options. ``domain_required_rules`` is a {domain: [rules_which_require_it]} mapping. ``rules_with_options`` is a list of AdblockRule instances that don't require any domain, but have other options. """ if general_re and general_re.search(url): return True rules = [] if 'domain' in options and domain_required_rules: src_domain = options['domain'] for domain in _domain_variants(src_domain): if domain in domain_required_rules: rules.extend(domain_required_rules[domain]) rules.extend(rules_with_options) if self.skip_unsupported_rules: rules = [rule for rule in rules if rule.matching_supported(options)] return any(rule.match_url(url, options) for rule in rules)
['def', '_matches', '(', 'self', ',', 'url', ',', 'options', ',', 'general_re', ',', 'domain_required_rules', ',', 'rules_with_options', ')', ':', 'if', 'general_re', 'and', 'general_re', '.', 'search', '(', 'url', ')', ':', 'return', 'True', 'rules', '=', '[', ']', 'if', "'domain'", 'in', 'options', 'and', 'domain_required_rules', ':', 'src_domain', '=', 'options', '[', "'domain'", ']', 'for', 'domain', 'in', '_domain_variants', '(', 'src_domain', ')', ':', 'if', 'domain', 'in', 'domain_required_rules', ':', 'rules', '.', 'extend', '(', 'domain_required_rules', '[', 'domain', ']', ')', 'rules', '.', 'extend', '(', 'rules_with_options', ')', 'if', 'self', '.', 'skip_unsupported_rules', ':', 'rules', '=', '[', 'rule', 'for', 'rule', 'in', 'rules', 'if', 'rule', '.', 'matching_supported', '(', 'options', ')', ']', 'return', 'any', '(', 'rule', '.', 'match_url', '(', 'url', ',', 'options', ')', 'for', 'rule', 'in', 'rules', ')']
Return if ``url``/``options`` are matched by rules defined by ``general_re``, ``domain_required_rules`` and ``rules_with_options``. ``general_re`` is a compiled regex for rules without options. ``domain_required_rules`` is a {domain: [rules_which_require_it]} mapping. ``rules_with_options`` is a list of AdblockRule instances that don't require any domain, but have other options.
['Return', 'if', 'url', '/', 'options', 'are', 'matched', 'by', 'rules', 'defined', 'by', 'general_re', 'domain_required_rules', 'and', 'rules_with_options', '.']
train
https://github.com/scrapinghub/adblockparser/blob/4089612d65018d38dbb88dd7f697bcb07814014d/adblockparser/parser.py#L366-L395
6,922
ND-CSE-30151/tock
tock/graphs.py
Graph.only_path
def only_path(self): """Finds the only path from the start node. If there is more than one, raises ValueError.""" start = [v for v in self.nodes if self.nodes[v].get('start', False)] if len(start) != 1: raise ValueError("graph does not have exactly one start node") path = [] [v] = start while True: path.append(v) u = v vs = self.edges.get(u, ()) if len(vs) == 0: break elif len(vs) > 1: raise ValueError("graph does not have exactly one path") [v] = vs return path
python
def only_path(self): """Finds the only path from the start node. If there is more than one, raises ValueError.""" start = [v for v in self.nodes if self.nodes[v].get('start', False)] if len(start) != 1: raise ValueError("graph does not have exactly one start node") path = [] [v] = start while True: path.append(v) u = v vs = self.edges.get(u, ()) if len(vs) == 0: break elif len(vs) > 1: raise ValueError("graph does not have exactly one path") [v] = vs return path
['def', 'only_path', '(', 'self', ')', ':', 'start', '=', '[', 'v', 'for', 'v', 'in', 'self', '.', 'nodes', 'if', 'self', '.', 'nodes', '[', 'v', ']', '.', 'get', '(', "'start'", ',', 'False', ')', ']', 'if', 'len', '(', 'start', ')', '!=', '1', ':', 'raise', 'ValueError', '(', '"graph does not have exactly one start node"', ')', 'path', '=', '[', ']', '[', 'v', ']', '=', 'start', 'while', 'True', ':', 'path', '.', 'append', '(', 'v', ')', 'u', '=', 'v', 'vs', '=', 'self', '.', 'edges', '.', 'get', '(', 'u', ',', '(', ')', ')', 'if', 'len', '(', 'vs', ')', '==', '0', ':', 'break', 'elif', 'len', '(', 'vs', ')', '>', '1', ':', 'raise', 'ValueError', '(', '"graph does not have exactly one path"', ')', '[', 'v', ']', '=', 'vs', 'return', 'path']
Finds the only path from the start node. If there is more than one, raises ValueError.
['Finds', 'the', 'only', 'path', 'from', 'the', 'start', 'node', '.', 'If', 'there', 'is', 'more', 'than', 'one', 'raises', 'ValueError', '.']
train
https://github.com/ND-CSE-30151/tock/blob/b8d21901aaf0e6ac913c2afa855f5b5a882a16c6/tock/graphs.py#L44-L63
6,923
tjvr/kurt
kurt/__init__.py
BlockType.has_conversion
def has_conversion(self, plugin): """Return True if the plugin supports this block.""" plugin = kurt.plugin.Kurt.get_plugin(plugin) return plugin.name in self._plugins
python
def has_conversion(self, plugin): """Return True if the plugin supports this block.""" plugin = kurt.plugin.Kurt.get_plugin(plugin) return plugin.name in self._plugins
['def', 'has_conversion', '(', 'self', ',', 'plugin', ')', ':', 'plugin', '=', 'kurt', '.', 'plugin', '.', 'Kurt', '.', 'get_plugin', '(', 'plugin', ')', 'return', 'plugin', '.', 'name', 'in', 'self', '.', '_plugins']
Return True if the plugin supports this block.
['Return', 'True', 'if', 'the', 'plugin', 'supports', 'this', 'block', '.']
train
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/__init__.py#L1596-L1599
6,924
inasafe/inasafe
safe/gui/widgets/message.py
show_keyword_version_message
def show_keyword_version_message(sender, keyword_version, inasafe_version): """Show a message indicating that the keywords version is mismatch .. versionadded: 3.2 :param sender: Sender of the message signal. Default to Any object. :type sender: object :param keyword_version: The version of the layer's keywords :type keyword_version: str :param inasafe_version: The version of the InaSAFE :type inasafe_version: str .. note:: The print button will be disabled if this method is called. """ LOGGER.debug('Showing Mismatch Version Message') message = generate_input_error_message( tr('Layer Keyword\'s Version Mismatch:'), m.Paragraph( tr( 'Your layer\'s keyword\'s version ({layer_version}) does not ' 'match with your InaSAFE version ({inasafe_version}). If you ' 'wish to use it as an exposure, hazard, or aggregation layer ' 'in an analysis, please use the keyword wizard to update the ' 'keywords. You can open the wizard by clicking on ' 'the ').format( layer_version=keyword_version, inasafe_version=inasafe_version), m.Image( 'file:///%s/img/icons/' 'show-keyword-wizard.svg' % resources_path(), **SMALL_ICON_STYLE), tr( ' icon in the toolbar.')) ) send_static_message(sender, message)
python
def show_keyword_version_message(sender, keyword_version, inasafe_version): """Show a message indicating that the keywords version is mismatch .. versionadded: 3.2 :param sender: Sender of the message signal. Default to Any object. :type sender: object :param keyword_version: The version of the layer's keywords :type keyword_version: str :param inasafe_version: The version of the InaSAFE :type inasafe_version: str .. note:: The print button will be disabled if this method is called. """ LOGGER.debug('Showing Mismatch Version Message') message = generate_input_error_message( tr('Layer Keyword\'s Version Mismatch:'), m.Paragraph( tr( 'Your layer\'s keyword\'s version ({layer_version}) does not ' 'match with your InaSAFE version ({inasafe_version}). If you ' 'wish to use it as an exposure, hazard, or aggregation layer ' 'in an analysis, please use the keyword wizard to update the ' 'keywords. You can open the wizard by clicking on ' 'the ').format( layer_version=keyword_version, inasafe_version=inasafe_version), m.Image( 'file:///%s/img/icons/' 'show-keyword-wizard.svg' % resources_path(), **SMALL_ICON_STYLE), tr( ' icon in the toolbar.')) ) send_static_message(sender, message)
['def', 'show_keyword_version_message', '(', 'sender', ',', 'keyword_version', ',', 'inasafe_version', ')', ':', 'LOGGER', '.', 'debug', '(', "'Showing Mismatch Version Message'", ')', 'message', '=', 'generate_input_error_message', '(', 'tr', '(', "'Layer Keyword\\'s Version Mismatch:'", ')', ',', 'm', '.', 'Paragraph', '(', 'tr', '(', "'Your layer\\'s keyword\\'s version ({layer_version}) does not '", "'match with your InaSAFE version ({inasafe_version}). If you '", "'wish to use it as an exposure, hazard, or aggregation layer '", "'in an analysis, please use the keyword wizard to update the '", "'keywords. You can open the wizard by clicking on '", "'the '", ')', '.', 'format', '(', 'layer_version', '=', 'keyword_version', ',', 'inasafe_version', '=', 'inasafe_version', ')', ',', 'm', '.', 'Image', '(', "'file:///%s/img/icons/'", "'show-keyword-wizard.svg'", '%', 'resources_path', '(', ')', ',', '*', '*', 'SMALL_ICON_STYLE', ')', ',', 'tr', '(', "' icon in the toolbar.'", ')', ')', ')', 'send_static_message', '(', 'sender', ',', 'message', ')']
Show a message indicating that the keywords version is mismatch .. versionadded: 3.2 :param sender: Sender of the message signal. Default to Any object. :type sender: object :param keyword_version: The version of the layer's keywords :type keyword_version: str :param inasafe_version: The version of the InaSAFE :type inasafe_version: str .. note:: The print button will be disabled if this method is called.
['Show', 'a', 'message', 'indicating', 'that', 'the', 'keywords', 'version', 'is', 'mismatch']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/message.py#L187-L223
6,925
acorg/dark-matter
dark/sam.py
SAMFilter.addFilteringOptions
def addFilteringOptions(parser, samfileIsPositionalArg=False): """ Add options to an argument parser for filtering SAM/BAM. @param samfileIsPositionalArg: If C{True} the SAM/BAM file must be given as the final argument on the command line (without being preceded by --samfile). @param parser: An C{argparse.ArgumentParser} instance. """ parser.add_argument( '%ssamfile' % ('' if samfileIsPositionalArg else '--'), required=True, help='The SAM/BAM file to filter.') parser.add_argument( '--referenceId', metavar='ID', nargs='+', action='append', help=('A reference sequence id whose alignments should be kept ' '(alignments against other references will be dropped). ' 'If omitted, alignments against all references will be ' 'kept. May be repeated.')) parser.add_argument( '--dropUnmapped', default=False, action='store_true', help='If given, unmapped matches will not be output.') parser.add_argument( '--dropSecondary', default=False, action='store_true', help='If given, secondary matches will not be output.') parser.add_argument( '--dropSupplementary', default=False, action='store_true', help='If given, supplementary matches will not be output.') parser.add_argument( '--dropDuplicates', default=False, action='store_true', help=('If given, matches flagged as optical or PCR duplicates ' 'will not be output.')) parser.add_argument( '--keepQCFailures', default=False, action='store_true', help=('If given, reads that are considered quality control ' 'failures will be included in the output.')) parser.add_argument( '--minScore', type=float, help=('If given, alignments with --scoreTag (default AS) values ' 'less than this value will not be output. If given, ' 'alignments that do not have a score will not be output.')) parser.add_argument( '--maxScore', type=float, help=('If given, alignments with --scoreTag (default AS) values ' 'greater than this value will not be output. If given, ' 'alignments that do not have a score will not be output.')) parser.add_argument( '--scoreTag', default='AS', help=('The alignment tag to extract for --minScore and --maxScore ' 'comparisons.'))
python
def addFilteringOptions(parser, samfileIsPositionalArg=False): """ Add options to an argument parser for filtering SAM/BAM. @param samfileIsPositionalArg: If C{True} the SAM/BAM file must be given as the final argument on the command line (without being preceded by --samfile). @param parser: An C{argparse.ArgumentParser} instance. """ parser.add_argument( '%ssamfile' % ('' if samfileIsPositionalArg else '--'), required=True, help='The SAM/BAM file to filter.') parser.add_argument( '--referenceId', metavar='ID', nargs='+', action='append', help=('A reference sequence id whose alignments should be kept ' '(alignments against other references will be dropped). ' 'If omitted, alignments against all references will be ' 'kept. May be repeated.')) parser.add_argument( '--dropUnmapped', default=False, action='store_true', help='If given, unmapped matches will not be output.') parser.add_argument( '--dropSecondary', default=False, action='store_true', help='If given, secondary matches will not be output.') parser.add_argument( '--dropSupplementary', default=False, action='store_true', help='If given, supplementary matches will not be output.') parser.add_argument( '--dropDuplicates', default=False, action='store_true', help=('If given, matches flagged as optical or PCR duplicates ' 'will not be output.')) parser.add_argument( '--keepQCFailures', default=False, action='store_true', help=('If given, reads that are considered quality control ' 'failures will be included in the output.')) parser.add_argument( '--minScore', type=float, help=('If given, alignments with --scoreTag (default AS) values ' 'less than this value will not be output. If given, ' 'alignments that do not have a score will not be output.')) parser.add_argument( '--maxScore', type=float, help=('If given, alignments with --scoreTag (default AS) values ' 'greater than this value will not be output. If given, ' 'alignments that do not have a score will not be output.')) parser.add_argument( '--scoreTag', default='AS', help=('The alignment tag to extract for --minScore and --maxScore ' 'comparisons.'))
['def', 'addFilteringOptions', '(', 'parser', ',', 'samfileIsPositionalArg', '=', 'False', ')', ':', 'parser', '.', 'add_argument', '(', "'%ssamfile'", '%', '(', "''", 'if', 'samfileIsPositionalArg', 'else', "'--'", ')', ',', 'required', '=', 'True', ',', 'help', '=', "'The SAM/BAM file to filter.'", ')', 'parser', '.', 'add_argument', '(', "'--referenceId'", ',', 'metavar', '=', "'ID'", ',', 'nargs', '=', "'+'", ',', 'action', '=', "'append'", ',', 'help', '=', '(', "'A reference sequence id whose alignments should be kept '", "'(alignments against other references will be dropped). '", "'If omitted, alignments against all references will be '", "'kept. May be repeated.'", ')', ')', 'parser', '.', 'add_argument', '(', "'--dropUnmapped'", ',', 'default', '=', 'False', ',', 'action', '=', "'store_true'", ',', 'help', '=', "'If given, unmapped matches will not be output.'", ')', 'parser', '.', 'add_argument', '(', "'--dropSecondary'", ',', 'default', '=', 'False', ',', 'action', '=', "'store_true'", ',', 'help', '=', "'If given, secondary matches will not be output.'", ')', 'parser', '.', 'add_argument', '(', "'--dropSupplementary'", ',', 'default', '=', 'False', ',', 'action', '=', "'store_true'", ',', 'help', '=', "'If given, supplementary matches will not be output.'", ')', 'parser', '.', 'add_argument', '(', "'--dropDuplicates'", ',', 'default', '=', 'False', ',', 'action', '=', "'store_true'", ',', 'help', '=', '(', "'If given, matches flagged as optical or PCR duplicates '", "'will not be output.'", ')', ')', 'parser', '.', 'add_argument', '(', "'--keepQCFailures'", ',', 'default', '=', 'False', ',', 'action', '=', "'store_true'", ',', 'help', '=', '(', "'If given, reads that are considered quality control '", "'failures will be included in the output.'", ')', ')', 'parser', '.', 'add_argument', '(', "'--minScore'", ',', 'type', '=', 'float', ',', 'help', '=', '(', "'If given, alignments with --scoreTag (default AS) values '", "'less than this value will not be output. If given, '", "'alignments that do not have a score will not be output.'", ')', ')', 'parser', '.', 'add_argument', '(', "'--maxScore'", ',', 'type', '=', 'float', ',', 'help', '=', '(', "'If given, alignments with --scoreTag (default AS) values '", "'greater than this value will not be output. If given, '", "'alignments that do not have a score will not be output.'", ')', ')', 'parser', '.', 'add_argument', '(', "'--scoreTag'", ',', 'default', '=', "'AS'", ',', 'help', '=', '(', "'The alignment tag to extract for --minScore and --maxScore '", "'comparisons.'", ')', ')']
Add options to an argument parser for filtering SAM/BAM. @param samfileIsPositionalArg: If C{True} the SAM/BAM file must be given as the final argument on the command line (without being preceded by --samfile). @param parser: An C{argparse.ArgumentParser} instance.
['Add', 'options', 'to', 'an', 'argument', 'parser', 'for', 'filtering', 'SAM', '/', 'BAM', '.']
train
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/sam.py#L211-L269
6,926
LonamiWebs/Telethon
telethon/events/common.py
_into_id_set
async def _into_id_set(client, chats): """Helper util to turn the input chat or chats into a set of IDs.""" if chats is None: return None if not utils.is_list_like(chats): chats = (chats,) result = set() for chat in chats: if isinstance(chat, int): if chat < 0: result.add(chat) # Explicitly marked IDs are negative else: result.update({ # Support all valid types of peers utils.get_peer_id(types.PeerUser(chat)), utils.get_peer_id(types.PeerChat(chat)), utils.get_peer_id(types.PeerChannel(chat)), }) elif isinstance(chat, TLObject) and chat.SUBCLASS_OF_ID == 0x2d45687: # 0x2d45687 == crc32(b'Peer') result.add(utils.get_peer_id(chat)) else: chat = await client.get_input_entity(chat) if isinstance(chat, types.InputPeerSelf): chat = await client.get_me(input_peer=True) result.add(utils.get_peer_id(chat)) return result
python
async def _into_id_set(client, chats): """Helper util to turn the input chat or chats into a set of IDs.""" if chats is None: return None if not utils.is_list_like(chats): chats = (chats,) result = set() for chat in chats: if isinstance(chat, int): if chat < 0: result.add(chat) # Explicitly marked IDs are negative else: result.update({ # Support all valid types of peers utils.get_peer_id(types.PeerUser(chat)), utils.get_peer_id(types.PeerChat(chat)), utils.get_peer_id(types.PeerChannel(chat)), }) elif isinstance(chat, TLObject) and chat.SUBCLASS_OF_ID == 0x2d45687: # 0x2d45687 == crc32(b'Peer') result.add(utils.get_peer_id(chat)) else: chat = await client.get_input_entity(chat) if isinstance(chat, types.InputPeerSelf): chat = await client.get_me(input_peer=True) result.add(utils.get_peer_id(chat)) return result
['async', 'def', '_into_id_set', '(', 'client', ',', 'chats', ')', ':', 'if', 'chats', 'is', 'None', ':', 'return', 'None', 'if', 'not', 'utils', '.', 'is_list_like', '(', 'chats', ')', ':', 'chats', '=', '(', 'chats', ',', ')', 'result', '=', 'set', '(', ')', 'for', 'chat', 'in', 'chats', ':', 'if', 'isinstance', '(', 'chat', ',', 'int', ')', ':', 'if', 'chat', '<', '0', ':', 'result', '.', 'add', '(', 'chat', ')', '# Explicitly marked IDs are negative', 'else', ':', 'result', '.', 'update', '(', '{', '# Support all valid types of peers', 'utils', '.', 'get_peer_id', '(', 'types', '.', 'PeerUser', '(', 'chat', ')', ')', ',', 'utils', '.', 'get_peer_id', '(', 'types', '.', 'PeerChat', '(', 'chat', ')', ')', ',', 'utils', '.', 'get_peer_id', '(', 'types', '.', 'PeerChannel', '(', 'chat', ')', ')', ',', '}', ')', 'elif', 'isinstance', '(', 'chat', ',', 'TLObject', ')', 'and', 'chat', '.', 'SUBCLASS_OF_ID', '==', '0x2d45687', ':', "# 0x2d45687 == crc32(b'Peer')", 'result', '.', 'add', '(', 'utils', '.', 'get_peer_id', '(', 'chat', ')', ')', 'else', ':', 'chat', '=', 'await', 'client', '.', 'get_input_entity', '(', 'chat', ')', 'if', 'isinstance', '(', 'chat', ',', 'types', '.', 'InputPeerSelf', ')', ':', 'chat', '=', 'await', 'client', '.', 'get_me', '(', 'input_peer', '=', 'True', ')', 'result', '.', 'add', '(', 'utils', '.', 'get_peer_id', '(', 'chat', ')', ')', 'return', 'result']
Helper util to turn the input chat or chats into a set of IDs.
['Helper', 'util', 'to', 'turn', 'the', 'input', 'chat', 'or', 'chats', 'into', 'a', 'set', 'of', 'IDs', '.']
train
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/events/common.py#L11-L39
6,927
jazzband/django-widget-tweaks
widget_tweaks/templatetags/widget_tweaks.py
render_field
def render_field(parser, token): """ Render a form field using given attribute-value pairs Takes form field as first argument and list of attribute-value pairs for all other arguments. Attribute-value pairs should be in the form of attribute=value or attribute="a value" for assignment and attribute+=value or attribute+="value" for appending. """ error_msg = '%r tag requires a form field followed by a list of attributes and values in the form attr="value"' % token.split_contents()[0] try: bits = token.split_contents() tag_name = bits[0] form_field = bits[1] attr_list = bits[2:] except ValueError: raise TemplateSyntaxError(error_msg) form_field = parser.compile_filter(form_field) set_attrs = [] append_attrs = [] for pair in attr_list: match = ATTRIBUTE_RE.match(pair) if not match: raise TemplateSyntaxError(error_msg + ": %s" % pair) dct = match.groupdict() attr, sign, value = \ dct['attr'], dct['sign'], parser.compile_filter(dct['value']) if sign == "=": set_attrs.append((attr, value)) else: append_attrs.append((attr, value)) return FieldAttributeNode(form_field, set_attrs, append_attrs)
python
def render_field(parser, token): """ Render a form field using given attribute-value pairs Takes form field as first argument and list of attribute-value pairs for all other arguments. Attribute-value pairs should be in the form of attribute=value or attribute="a value" for assignment and attribute+=value or attribute+="value" for appending. """ error_msg = '%r tag requires a form field followed by a list of attributes and values in the form attr="value"' % token.split_contents()[0] try: bits = token.split_contents() tag_name = bits[0] form_field = bits[1] attr_list = bits[2:] except ValueError: raise TemplateSyntaxError(error_msg) form_field = parser.compile_filter(form_field) set_attrs = [] append_attrs = [] for pair in attr_list: match = ATTRIBUTE_RE.match(pair) if not match: raise TemplateSyntaxError(error_msg + ": %s" % pair) dct = match.groupdict() attr, sign, value = \ dct['attr'], dct['sign'], parser.compile_filter(dct['value']) if sign == "=": set_attrs.append((attr, value)) else: append_attrs.append((attr, value)) return FieldAttributeNode(form_field, set_attrs, append_attrs)
['def', 'render_field', '(', 'parser', ',', 'token', ')', ':', 'error_msg', '=', '\'%r tag requires a form field followed by a list of attributes and values in the form attr="value"\'', '%', 'token', '.', 'split_contents', '(', ')', '[', '0', ']', 'try', ':', 'bits', '=', 'token', '.', 'split_contents', '(', ')', 'tag_name', '=', 'bits', '[', '0', ']', 'form_field', '=', 'bits', '[', '1', ']', 'attr_list', '=', 'bits', '[', '2', ':', ']', 'except', 'ValueError', ':', 'raise', 'TemplateSyntaxError', '(', 'error_msg', ')', 'form_field', '=', 'parser', '.', 'compile_filter', '(', 'form_field', ')', 'set_attrs', '=', '[', ']', 'append_attrs', '=', '[', ']', 'for', 'pair', 'in', 'attr_list', ':', 'match', '=', 'ATTRIBUTE_RE', '.', 'match', '(', 'pair', ')', 'if', 'not', 'match', ':', 'raise', 'TemplateSyntaxError', '(', 'error_msg', '+', '": %s"', '%', 'pair', ')', 'dct', '=', 'match', '.', 'groupdict', '(', ')', 'attr', ',', 'sign', ',', 'value', '=', 'dct', '[', "'attr'", ']', ',', 'dct', '[', "'sign'", ']', ',', 'parser', '.', 'compile_filter', '(', 'dct', '[', "'value'", ']', ')', 'if', 'sign', '==', '"="', ':', 'set_attrs', '.', 'append', '(', '(', 'attr', ',', 'value', ')', ')', 'else', ':', 'append_attrs', '.', 'append', '(', '(', 'attr', ',', 'value', ')', ')', 'return', 'FieldAttributeNode', '(', 'form_field', ',', 'set_attrs', ',', 'append_attrs', ')']
Render a form field using given attribute-value pairs Takes form field as first argument and list of attribute-value pairs for all other arguments. Attribute-value pairs should be in the form of attribute=value or attribute="a value" for assignment and attribute+=value or attribute+="value" for appending.
['Render', 'a', 'form', 'field', 'using', 'given', 'attribute', '-', 'value', 'pairs']
train
https://github.com/jazzband/django-widget-tweaks/blob/f50ee92410d68e81528a7643a10544e7331af8fb/widget_tweaks/templatetags/widget_tweaks.py#L138-L172
6,928
jmgilman/Neolib
neolib/pyamf/remoting/client/__init__.py
RemotingService.addRequest
def addRequest(self, service, *args): """ Adds a request to be sent to the remoting gateway. """ wrapper = RequestWrapper(self, '/%d' % self.request_number, service, *args) self.request_number += 1 self.requests.append(wrapper) if self.logger: self.logger.debug('Adding request %s%r', wrapper.service, args) return wrapper
python
def addRequest(self, service, *args): """ Adds a request to be sent to the remoting gateway. """ wrapper = RequestWrapper(self, '/%d' % self.request_number, service, *args) self.request_number += 1 self.requests.append(wrapper) if self.logger: self.logger.debug('Adding request %s%r', wrapper.service, args) return wrapper
['def', 'addRequest', '(', 'self', ',', 'service', ',', '*', 'args', ')', ':', 'wrapper', '=', 'RequestWrapper', '(', 'self', ',', "'/%d'", '%', 'self', '.', 'request_number', ',', 'service', ',', '*', 'args', ')', 'self', '.', 'request_number', '+=', '1', 'self', '.', 'requests', '.', 'append', '(', 'wrapper', ')', 'if', 'self', '.', 'logger', ':', 'self', '.', 'logger', '.', 'debug', '(', "'Adding request %s%r'", ',', 'wrapper', '.', 'service', ',', 'args', ')', 'return', 'wrapper']
Adds a request to be sent to the remoting gateway.
['Adds', 'a', 'request', 'to', 'be', 'sent', 'to', 'the', 'remoting', 'gateway', '.']
train
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/client/__init__.py#L298-L311
6,929
chrlie/frogsay
src/frogsay/__init__.py
cli
def cli(): """\ Frogsay generates an ASCII picture of a FROG spouting a FROG tip. FROG tips are fetched from frog.tips's API endpoint when needed, otherwise they are cached locally in an application-specific folder. """ with open_client(cache_dir=get_cache_dir()) as client: tip = client.frog_tip() terminal_width = click.termui.get_terminal_size()[0] wisdom = make_frog_fresco(tip, width=terminal_width) click.echo(wisdom)
python
def cli(): """\ Frogsay generates an ASCII picture of a FROG spouting a FROG tip. FROG tips are fetched from frog.tips's API endpoint when needed, otherwise they are cached locally in an application-specific folder. """ with open_client(cache_dir=get_cache_dir()) as client: tip = client.frog_tip() terminal_width = click.termui.get_terminal_size()[0] wisdom = make_frog_fresco(tip, width=terminal_width) click.echo(wisdom)
['def', 'cli', '(', ')', ':', 'with', 'open_client', '(', 'cache_dir', '=', 'get_cache_dir', '(', ')', ')', 'as', 'client', ':', 'tip', '=', 'client', '.', 'frog_tip', '(', ')', 'terminal_width', '=', 'click', '.', 'termui', '.', 'get_terminal_size', '(', ')', '[', '0', ']', 'wisdom', '=', 'make_frog_fresco', '(', 'tip', ',', 'width', '=', 'terminal_width', ')', 'click', '.', 'echo', '(', 'wisdom', ')']
\ Frogsay generates an ASCII picture of a FROG spouting a FROG tip. FROG tips are fetched from frog.tips's API endpoint when needed, otherwise they are cached locally in an application-specific folder.
['\\', 'Frogsay', 'generates', 'an', 'ASCII', 'picture', 'of', 'a', 'FROG', 'spouting', 'a', 'FROG', 'tip', '.']
train
https://github.com/chrlie/frogsay/blob/1c21e1401dc24719732218af830d34b842ab10b9/src/frogsay/__init__.py#L17-L30
6,930
wonambi-python/wonambi
wonambi/trans/select.py
_create_subepochs
def _create_subepochs(x, nperseg, step): """Transform the data into a matrix for easy manipulation Parameters ---------- x : 1d ndarray actual data values nperseg : int number of samples in each row to create step : int distance in samples between rows Returns ------- 2d ndarray a view (i.e. doesn't copy data) of the original x, with shape determined by nperseg and step. You should use the last dimension """ axis = x.ndim - 1 # last dim nsmp = x.shape[axis] stride = x.strides[axis] noverlap = nperseg - step v_shape = *x.shape[:axis], (nsmp - noverlap) // step, nperseg v_strides = *x.strides[:axis], stride * step, stride v = as_strided(x, shape=v_shape, strides=v_strides, writeable=False) # much safer return v
python
def _create_subepochs(x, nperseg, step): """Transform the data into a matrix for easy manipulation Parameters ---------- x : 1d ndarray actual data values nperseg : int number of samples in each row to create step : int distance in samples between rows Returns ------- 2d ndarray a view (i.e. doesn't copy data) of the original x, with shape determined by nperseg and step. You should use the last dimension """ axis = x.ndim - 1 # last dim nsmp = x.shape[axis] stride = x.strides[axis] noverlap = nperseg - step v_shape = *x.shape[:axis], (nsmp - noverlap) // step, nperseg v_strides = *x.strides[:axis], stride * step, stride v = as_strided(x, shape=v_shape, strides=v_strides, writeable=False) # much safer return v
['def', '_create_subepochs', '(', 'x', ',', 'nperseg', ',', 'step', ')', ':', 'axis', '=', 'x', '.', 'ndim', '-', '1', '# last dim', 'nsmp', '=', 'x', '.', 'shape', '[', 'axis', ']', 'stride', '=', 'x', '.', 'strides', '[', 'axis', ']', 'noverlap', '=', 'nperseg', '-', 'step', 'v_shape', '=', '*', 'x', '.', 'shape', '[', ':', 'axis', ']', ',', '(', 'nsmp', '-', 'noverlap', ')', '//', 'step', ',', 'nperseg', 'v_strides', '=', '*', 'x', '.', 'strides', '[', ':', 'axis', ']', ',', 'stride', '*', 'step', ',', 'stride', 'v', '=', 'as_strided', '(', 'x', ',', 'shape', '=', 'v_shape', ',', 'strides', '=', 'v_strides', ',', 'writeable', '=', 'False', ')', '# much safer', 'return', 'v']
Transform the data into a matrix for easy manipulation Parameters ---------- x : 1d ndarray actual data values nperseg : int number of samples in each row to create step : int distance in samples between rows Returns ------- 2d ndarray a view (i.e. doesn't copy data) of the original x, with shape determined by nperseg and step. You should use the last dimension
['Transform', 'the', 'data', 'into', 'a', 'matrix', 'for', 'easy', 'manipulation']
train
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/trans/select.py#L690-L715
6,931
numenta/htmresearch
projects/l2_pooling/topology_experiments.py
plotConvergenceByColumnTopology
def plotConvergenceByColumnTopology(results, columnRange, featureRange, networkType, numTrials): """ Plots the convergence graph: iterations vs number of columns. Each curve shows the convergence for a given number of unique features. """ ######################################################################## # # Accumulate all the results per column in a convergence array. # # Convergence[f, c, t] = how long it took it to converge with f unique # features, c columns and topology t. convergence = numpy.zeros((max(featureRange), max(columnRange) + 1, len(networkType))) networkTypeNames = {} for i, topologyType in enumerate(networkType): if "Topology" in topologyType: networkTypeNames[i] = "Normal" else: networkTypeNames[i] = "Dense" for r in results: convergence[r["numFeatures"] - 1, r["numColumns"], networkType.index(r["networkType"])] += r["convergencePoint"] convergence /= numTrials # For each column, print convergence as fct of number of unique features for c in range(1, max(columnRange) + 1): for t in range(len(networkType)): print c, convergence[:, c, t] # Print everything anyway for debugging print "Average convergence array=", convergence ######################################################################## # # Create the plot. x-axis= plt.figure() plotPath = os.path.join("plots", "convergence_by_column_topology.pdf") # Plot each curve legendList = [] colormap = plt.get_cmap("jet") colorList = [colormap(x) for x in numpy.linspace(0., 1., len(featureRange)*len(networkType))] for i in range(len(featureRange)): for t in range(len(networkType)): f = featureRange[i] print columnRange print convergence[f-1,columnRange, t] legendList.append('Unique features={}, topology={}'.format(f, networkTypeNames[t])) plt.plot(columnRange, convergence[f-1,columnRange, t], color=colorList[i*len(networkType) + t]) # format plt.legend(legendList, loc="upper right") plt.xlabel("Number of columns") plt.xticks(columnRange) plt.yticks(range(0,int(convergence.max())+1)) plt.ylabel("Average number of touches") plt.title("Number of touches to recognize one object (multiple columns)") # save plt.savefig(plotPath) plt.close()
python
def plotConvergenceByColumnTopology(results, columnRange, featureRange, networkType, numTrials): """ Plots the convergence graph: iterations vs number of columns. Each curve shows the convergence for a given number of unique features. """ ######################################################################## # # Accumulate all the results per column in a convergence array. # # Convergence[f, c, t] = how long it took it to converge with f unique # features, c columns and topology t. convergence = numpy.zeros((max(featureRange), max(columnRange) + 1, len(networkType))) networkTypeNames = {} for i, topologyType in enumerate(networkType): if "Topology" in topologyType: networkTypeNames[i] = "Normal" else: networkTypeNames[i] = "Dense" for r in results: convergence[r["numFeatures"] - 1, r["numColumns"], networkType.index(r["networkType"])] += r["convergencePoint"] convergence /= numTrials # For each column, print convergence as fct of number of unique features for c in range(1, max(columnRange) + 1): for t in range(len(networkType)): print c, convergence[:, c, t] # Print everything anyway for debugging print "Average convergence array=", convergence ######################################################################## # # Create the plot. x-axis= plt.figure() plotPath = os.path.join("plots", "convergence_by_column_topology.pdf") # Plot each curve legendList = [] colormap = plt.get_cmap("jet") colorList = [colormap(x) for x in numpy.linspace(0., 1., len(featureRange)*len(networkType))] for i in range(len(featureRange)): for t in range(len(networkType)): f = featureRange[i] print columnRange print convergence[f-1,columnRange, t] legendList.append('Unique features={}, topology={}'.format(f, networkTypeNames[t])) plt.plot(columnRange, convergence[f-1,columnRange, t], color=colorList[i*len(networkType) + t]) # format plt.legend(legendList, loc="upper right") plt.xlabel("Number of columns") plt.xticks(columnRange) plt.yticks(range(0,int(convergence.max())+1)) plt.ylabel("Average number of touches") plt.title("Number of touches to recognize one object (multiple columns)") # save plt.savefig(plotPath) plt.close()
['def', 'plotConvergenceByColumnTopology', '(', 'results', ',', 'columnRange', ',', 'featureRange', ',', 'networkType', ',', 'numTrials', ')', ':', '########################################################################', '#', '# Accumulate all the results per column in a convergence array.', '#', '# Convergence[f, c, t] = how long it took it to converge with f unique', '# features, c columns and topology t.', 'convergence', '=', 'numpy', '.', 'zeros', '(', '(', 'max', '(', 'featureRange', ')', ',', 'max', '(', 'columnRange', ')', '+', '1', ',', 'len', '(', 'networkType', ')', ')', ')', 'networkTypeNames', '=', '{', '}', 'for', 'i', ',', 'topologyType', 'in', 'enumerate', '(', 'networkType', ')', ':', 'if', '"Topology"', 'in', 'topologyType', ':', 'networkTypeNames', '[', 'i', ']', '=', '"Normal"', 'else', ':', 'networkTypeNames', '[', 'i', ']', '=', '"Dense"', 'for', 'r', 'in', 'results', ':', 'convergence', '[', 'r', '[', '"numFeatures"', ']', '-', '1', ',', 'r', '[', '"numColumns"', ']', ',', 'networkType', '.', 'index', '(', 'r', '[', '"networkType"', ']', ')', ']', '+=', 'r', '[', '"convergencePoint"', ']', 'convergence', '/=', 'numTrials', '# For each column, print convergence as fct of number of unique features', 'for', 'c', 'in', 'range', '(', '1', ',', 'max', '(', 'columnRange', ')', '+', '1', ')', ':', 'for', 't', 'in', 'range', '(', 'len', '(', 'networkType', ')', ')', ':', 'print', 'c', ',', 'convergence', '[', ':', ',', 'c', ',', 't', ']', '# Print everything anyway for debugging', 'print', '"Average convergence array="', ',', 'convergence', '########################################################################', '#', '# Create the plot. x-axis=', 'plt', '.', 'figure', '(', ')', 'plotPath', '=', 'os', '.', 'path', '.', 'join', '(', '"plots"', ',', '"convergence_by_column_topology.pdf"', ')', '# Plot each curve', 'legendList', '=', '[', ']', 'colormap', '=', 'plt', '.', 'get_cmap', '(', '"jet"', ')', 'colorList', '=', '[', 'colormap', '(', 'x', ')', 'for', 'x', 'in', 'numpy', '.', 'linspace', '(', '0.', ',', '1.', ',', 'len', '(', 'featureRange', ')', '*', 'len', '(', 'networkType', ')', ')', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'featureRange', ')', ')', ':', 'for', 't', 'in', 'range', '(', 'len', '(', 'networkType', ')', ')', ':', 'f', '=', 'featureRange', '[', 'i', ']', 'print', 'columnRange', 'print', 'convergence', '[', 'f', '-', '1', ',', 'columnRange', ',', 't', ']', 'legendList', '.', 'append', '(', "'Unique features={}, topology={}'", '.', 'format', '(', 'f', ',', 'networkTypeNames', '[', 't', ']', ')', ')', 'plt', '.', 'plot', '(', 'columnRange', ',', 'convergence', '[', 'f', '-', '1', ',', 'columnRange', ',', 't', ']', ',', 'color', '=', 'colorList', '[', 'i', '*', 'len', '(', 'networkType', ')', '+', 't', ']', ')', '# format', 'plt', '.', 'legend', '(', 'legendList', ',', 'loc', '=', '"upper right"', ')', 'plt', '.', 'xlabel', '(', '"Number of columns"', ')', 'plt', '.', 'xticks', '(', 'columnRange', ')', 'plt', '.', 'yticks', '(', 'range', '(', '0', ',', 'int', '(', 'convergence', '.', 'max', '(', ')', ')', '+', '1', ')', ')', 'plt', '.', 'ylabel', '(', '"Average number of touches"', ')', 'plt', '.', 'title', '(', '"Number of touches to recognize one object (multiple columns)"', ')', '# save', 'plt', '.', 'savefig', '(', 'plotPath', ')', 'plt', '.', 'close', '(', ')']
Plots the convergence graph: iterations vs number of columns. Each curve shows the convergence for a given number of unique features.
['Plots', 'the', 'convergence', 'graph', ':', 'iterations', 'vs', 'number', 'of', 'columns', '.', 'Each', 'curve', 'shows', 'the', 'convergence', 'for', 'a', 'given', 'number', 'of', 'unique', 'features', '.']
train
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/l2_pooling/topology_experiments.py#L118-L182
6,932
StanfordVL/robosuite
robosuite/utils/mjcf_utils.py
xml_path_completion
def xml_path_completion(xml_path): """ Takes in a local xml path and returns a full path. if @xml_path is absolute, do nothing if @xml_path is not absolute, load xml that is shipped by the package """ if xml_path.startswith("/"): full_path = xml_path else: full_path = os.path.join(robosuite.models.assets_root, xml_path) return full_path
python
def xml_path_completion(xml_path): """ Takes in a local xml path and returns a full path. if @xml_path is absolute, do nothing if @xml_path is not absolute, load xml that is shipped by the package """ if xml_path.startswith("/"): full_path = xml_path else: full_path = os.path.join(robosuite.models.assets_root, xml_path) return full_path
['def', 'xml_path_completion', '(', 'xml_path', ')', ':', 'if', 'xml_path', '.', 'startswith', '(', '"/"', ')', ':', 'full_path', '=', 'xml_path', 'else', ':', 'full_path', '=', 'os', '.', 'path', '.', 'join', '(', 'robosuite', '.', 'models', '.', 'assets_root', ',', 'xml_path', ')', 'return', 'full_path']
Takes in a local xml path and returns a full path. if @xml_path is absolute, do nothing if @xml_path is not absolute, load xml that is shipped by the package
['Takes', 'in', 'a', 'local', 'xml', 'path', 'and', 'returns', 'a', 'full', 'path', '.', 'if']
train
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/utils/mjcf_utils.py#L14-L24
6,933
SuperCowPowers/workbench
workbench_apps/workbench_cli/workbench_shell.py
WorkbenchShell.run
def run(self): ''' Running the workbench CLI ''' # Announce versions self.versions() # Sample/Tag info and Help self.tags() print '\n%s' % self.workbench.help('cli') # Now that we have the Workbench connection spun up, we register some stuff # with the embedded IPython interpreter and than spin it up # cfg = IPython.config.loader.Config() cfg = Config() cfg.InteractiveShellEmbed.autocall = 2 cfg.InteractiveShellEmbed.colors = 'Linux' cfg.InteractiveShellEmbed.color_info = True cfg.InteractiveShellEmbed.autoindent = True cfg.InteractiveShellEmbed.deep_reload = True cfg.PromptManager.in_template = ( r'{color.LightPurple}{short_md5}{color.Yellow}{prompt_deco}{color.LightBlue} Workbench{color.Green}[\#]> ') # cfg.PromptManager.out_template = '' # Create the IPython shell self.ipshell = IPython.terminal.embed.InteractiveShellEmbed( config=cfg, banner1='', exit_msg='\nWorkbench has SuperCowPowers...') # Register our transformer, the shell will use this for 'shortcut' commands auto_quoter = auto_quote_xform.AutoQuoteTransformer(self.ipshell, self.ipshell.prefilter_manager) auto_quoter.register_command_set(self.command_set) # Setting up some Pandas options pd.set_option('display.width', 140) pd.set_option('max_colwidth', 15) # Start up the shell with our set of workbench commands self.ipshell(local_ns=self.command_dict)
python
def run(self): ''' Running the workbench CLI ''' # Announce versions self.versions() # Sample/Tag info and Help self.tags() print '\n%s' % self.workbench.help('cli') # Now that we have the Workbench connection spun up, we register some stuff # with the embedded IPython interpreter and than spin it up # cfg = IPython.config.loader.Config() cfg = Config() cfg.InteractiveShellEmbed.autocall = 2 cfg.InteractiveShellEmbed.colors = 'Linux' cfg.InteractiveShellEmbed.color_info = True cfg.InteractiveShellEmbed.autoindent = True cfg.InteractiveShellEmbed.deep_reload = True cfg.PromptManager.in_template = ( r'{color.LightPurple}{short_md5}{color.Yellow}{prompt_deco}{color.LightBlue} Workbench{color.Green}[\#]> ') # cfg.PromptManager.out_template = '' # Create the IPython shell self.ipshell = IPython.terminal.embed.InteractiveShellEmbed( config=cfg, banner1='', exit_msg='\nWorkbench has SuperCowPowers...') # Register our transformer, the shell will use this for 'shortcut' commands auto_quoter = auto_quote_xform.AutoQuoteTransformer(self.ipshell, self.ipshell.prefilter_manager) auto_quoter.register_command_set(self.command_set) # Setting up some Pandas options pd.set_option('display.width', 140) pd.set_option('max_colwidth', 15) # Start up the shell with our set of workbench commands self.ipshell(local_ns=self.command_dict)
['def', 'run', '(', 'self', ')', ':', '# Announce versions', 'self', '.', 'versions', '(', ')', '# Sample/Tag info and Help', 'self', '.', 'tags', '(', ')', 'print', "'\\n%s'", '%', 'self', '.', 'workbench', '.', 'help', '(', "'cli'", ')', '# Now that we have the Workbench connection spun up, we register some stuff', '# with the embedded IPython interpreter and than spin it up', '# cfg = IPython.config.loader.Config()', 'cfg', '=', 'Config', '(', ')', 'cfg', '.', 'InteractiveShellEmbed', '.', 'autocall', '=', '2', 'cfg', '.', 'InteractiveShellEmbed', '.', 'colors', '=', "'Linux'", 'cfg', '.', 'InteractiveShellEmbed', '.', 'color_info', '=', 'True', 'cfg', '.', 'InteractiveShellEmbed', '.', 'autoindent', '=', 'True', 'cfg', '.', 'InteractiveShellEmbed', '.', 'deep_reload', '=', 'True', 'cfg', '.', 'PromptManager', '.', 'in_template', '=', '(', "r'{color.LightPurple}{short_md5}{color.Yellow}{prompt_deco}{color.LightBlue} Workbench{color.Green}[\\#]> '", ')', "# cfg.PromptManager.out_template = ''", '# Create the IPython shell', 'self', '.', 'ipshell', '=', 'IPython', '.', 'terminal', '.', 'embed', '.', 'InteractiveShellEmbed', '(', 'config', '=', 'cfg', ',', 'banner1', '=', "''", ',', 'exit_msg', '=', "'\\nWorkbench has SuperCowPowers...'", ')', "# Register our transformer, the shell will use this for 'shortcut' commands", 'auto_quoter', '=', 'auto_quote_xform', '.', 'AutoQuoteTransformer', '(', 'self', '.', 'ipshell', ',', 'self', '.', 'ipshell', '.', 'prefilter_manager', ')', 'auto_quoter', '.', 'register_command_set', '(', 'self', '.', 'command_set', ')', '# Setting up some Pandas options', 'pd', '.', 'set_option', '(', "'display.width'", ',', '140', ')', 'pd', '.', 'set_option', '(', "'max_colwidth'", ',', '15', ')', '# Start up the shell with our set of workbench commands', 'self', '.', 'ipshell', '(', 'local_ns', '=', 'self', '.', 'command_dict', ')']
Running the workbench CLI
['Running', 'the', 'workbench', 'CLI']
train
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench_apps/workbench_cli/workbench_shell.py#L238-L274
6,934
dpgaspar/Flask-AppBuilder
flask_appbuilder/api/convert.py
Model2SchemaConverter._meta_schema_factory
def _meta_schema_factory(self, columns, model, class_mixin): """ Creates ModelSchema marshmallow-sqlalchemy :param columns: a list of columns to mix :param model: Model :param class_mixin: a marshamallow Schema to mix :return: ModelSchema """ _model = model if columns: class MetaSchema(ModelSchema, class_mixin): class Meta: model = _model fields = columns strict = True sqla_session = self.datamodel.session else: class MetaSchema(ModelSchema, class_mixin): class Meta: model = _model strict = True sqla_session = self.datamodel.session return MetaSchema
python
def _meta_schema_factory(self, columns, model, class_mixin): """ Creates ModelSchema marshmallow-sqlalchemy :param columns: a list of columns to mix :param model: Model :param class_mixin: a marshamallow Schema to mix :return: ModelSchema """ _model = model if columns: class MetaSchema(ModelSchema, class_mixin): class Meta: model = _model fields = columns strict = True sqla_session = self.datamodel.session else: class MetaSchema(ModelSchema, class_mixin): class Meta: model = _model strict = True sqla_session = self.datamodel.session return MetaSchema
['def', '_meta_schema_factory', '(', 'self', ',', 'columns', ',', 'model', ',', 'class_mixin', ')', ':', '_model', '=', 'model', 'if', 'columns', ':', 'class', 'MetaSchema', '(', 'ModelSchema', ',', 'class_mixin', ')', ':', 'class', 'Meta', ':', 'model', '=', '_model', 'fields', '=', 'columns', 'strict', '=', 'True', 'sqla_session', '=', 'self', '.', 'datamodel', '.', 'session', 'else', ':', 'class', 'MetaSchema', '(', 'ModelSchema', ',', 'class_mixin', ')', ':', 'class', 'Meta', ':', 'model', '=', '_model', 'strict', '=', 'True', 'sqla_session', '=', 'self', '.', 'datamodel', '.', 'session', 'return', 'MetaSchema']
Creates ModelSchema marshmallow-sqlalchemy :param columns: a list of columns to mix :param model: Model :param class_mixin: a marshamallow Schema to mix :return: ModelSchema
['Creates', 'ModelSchema', 'marshmallow', '-', 'sqlalchemy']
train
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/api/convert.py#L87-L110
6,935
bcbio/bcbio-nextgen
bcbio/distributed/resources.py
_get_prog_memory
def _get_prog_memory(resources, cores_per_job): """Get expected memory usage, in Gb per core, for a program from resource specification. """ out = None for jvm_opt in resources.get("jvm_opts", []): if jvm_opt.startswith("-Xmx"): out = _str_memory_to_gb(jvm_opt[4:]) memory = resources.get("memory") if memory: out = _str_memory_to_gb(memory) prog_cores = resources.get("cores") # if a single core with memory is requested for the job # and we run multiple cores, scale down to avoid overscheduling if out and prog_cores and int(prog_cores) == 1 and cores_per_job > int(prog_cores): out = out / float(cores_per_job) return out
python
def _get_prog_memory(resources, cores_per_job): """Get expected memory usage, in Gb per core, for a program from resource specification. """ out = None for jvm_opt in resources.get("jvm_opts", []): if jvm_opt.startswith("-Xmx"): out = _str_memory_to_gb(jvm_opt[4:]) memory = resources.get("memory") if memory: out = _str_memory_to_gb(memory) prog_cores = resources.get("cores") # if a single core with memory is requested for the job # and we run multiple cores, scale down to avoid overscheduling if out and prog_cores and int(prog_cores) == 1 and cores_per_job > int(prog_cores): out = out / float(cores_per_job) return out
['def', '_get_prog_memory', '(', 'resources', ',', 'cores_per_job', ')', ':', 'out', '=', 'None', 'for', 'jvm_opt', 'in', 'resources', '.', 'get', '(', '"jvm_opts"', ',', '[', ']', ')', ':', 'if', 'jvm_opt', '.', 'startswith', '(', '"-Xmx"', ')', ':', 'out', '=', '_str_memory_to_gb', '(', 'jvm_opt', '[', '4', ':', ']', ')', 'memory', '=', 'resources', '.', 'get', '(', '"memory"', ')', 'if', 'memory', ':', 'out', '=', '_str_memory_to_gb', '(', 'memory', ')', 'prog_cores', '=', 'resources', '.', 'get', '(', '"cores"', ')', '# if a single core with memory is requested for the job', '# and we run multiple cores, scale down to avoid overscheduling', 'if', 'out', 'and', 'prog_cores', 'and', 'int', '(', 'prog_cores', ')', '==', '1', 'and', 'cores_per_job', '>', 'int', '(', 'prog_cores', ')', ':', 'out', '=', 'out', '/', 'float', '(', 'cores_per_job', ')', 'return', 'out']
Get expected memory usage, in Gb per core, for a program from resource specification.
['Get', 'expected', 'memory', 'usage', 'in', 'Gb', 'per', 'core', 'for', 'a', 'program', 'from', 'resource', 'specification', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/resources.py#L83-L98
6,936
pschmitt/python-opsview
opsview/opsview.py
main
def main(): ''' Main function ''' # We should only steal the root logger if we're the application, not the module logging.basicConfig(level=logging.DEBUG) args = get_args() if args.password: password = args.password else: password = getpass( prompt='Enter password for {}@{}: '.format(args.user, args.host) ) opsview = Opsview( host=args.host, port=args.port, use_ssl=args.ssl, verify_ssl=not args.skip_ssl_verification, username=args.user, password=password, verbose=args.verbose, ) d = {} with open('vcenter.json') as f: d = json.load(f) logger.debug( pformat(opsview.create_host(params=d, verbose=args.verbose)) )
python
def main(): ''' Main function ''' # We should only steal the root logger if we're the application, not the module logging.basicConfig(level=logging.DEBUG) args = get_args() if args.password: password = args.password else: password = getpass( prompt='Enter password for {}@{}: '.format(args.user, args.host) ) opsview = Opsview( host=args.host, port=args.port, use_ssl=args.ssl, verify_ssl=not args.skip_ssl_verification, username=args.user, password=password, verbose=args.verbose, ) d = {} with open('vcenter.json') as f: d = json.load(f) logger.debug( pformat(opsview.create_host(params=d, verbose=args.verbose)) )
['def', 'main', '(', ')', ':', "# We should only steal the root logger if we're the application, not the module", 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'DEBUG', ')', 'args', '=', 'get_args', '(', ')', 'if', 'args', '.', 'password', ':', 'password', '=', 'args', '.', 'password', 'else', ':', 'password', '=', 'getpass', '(', 'prompt', '=', "'Enter password for {}@{}: '", '.', 'format', '(', 'args', '.', 'user', ',', 'args', '.', 'host', ')', ')', 'opsview', '=', 'Opsview', '(', 'host', '=', 'args', '.', 'host', ',', 'port', '=', 'args', '.', 'port', ',', 'use_ssl', '=', 'args', '.', 'ssl', ',', 'verify_ssl', '=', 'not', 'args', '.', 'skip_ssl_verification', ',', 'username', '=', 'args', '.', 'user', ',', 'password', '=', 'password', ',', 'verbose', '=', 'args', '.', 'verbose', ',', ')', 'd', '=', '{', '}', 'with', 'open', '(', "'vcenter.json'", ')', 'as', 'f', ':', 'd', '=', 'json', '.', 'load', '(', 'f', ')', 'logger', '.', 'debug', '(', 'pformat', '(', 'opsview', '.', 'create_host', '(', 'params', '=', 'd', ',', 'verbose', '=', 'args', '.', 'verbose', ')', ')', ')']
Main function
['Main', 'function']
train
https://github.com/pschmitt/python-opsview/blob/720acc06c491db32d18c79d20f04cae16e57a7fb/opsview/opsview.py#L500-L529
6,937
nchopin/particles
particles/kalman.py
predict_step
def predict_step(F, covX, filt): """Predictive step of Kalman filter. Parameters ---------- F: (dx, dx) numpy array Mean of X_t | X_{t-1} is F * X_{t-1} covX: (dx, dx) numpy array covariance of X_t | X_{t-1} filt: MeanAndCov object filtering distribution at time t-1 Returns ------- pred: MeanAndCov object predictive distribution at time t Note ---- filt.mean may either be of shape (dx,) or (N, dx); in the latter case N predictive steps are performed in parallel. """ pred_mean = np.matmul(filt.mean, F.T) pred_cov = dotdot(F, filt.cov, F.T) + covX return MeanAndCov(mean=pred_mean, cov=pred_cov)
python
def predict_step(F, covX, filt): """Predictive step of Kalman filter. Parameters ---------- F: (dx, dx) numpy array Mean of X_t | X_{t-1} is F * X_{t-1} covX: (dx, dx) numpy array covariance of X_t | X_{t-1} filt: MeanAndCov object filtering distribution at time t-1 Returns ------- pred: MeanAndCov object predictive distribution at time t Note ---- filt.mean may either be of shape (dx,) or (N, dx); in the latter case N predictive steps are performed in parallel. """ pred_mean = np.matmul(filt.mean, F.T) pred_cov = dotdot(F, filt.cov, F.T) + covX return MeanAndCov(mean=pred_mean, cov=pred_cov)
['def', 'predict_step', '(', 'F', ',', 'covX', ',', 'filt', ')', ':', 'pred_mean', '=', 'np', '.', 'matmul', '(', 'filt', '.', 'mean', ',', 'F', '.', 'T', ')', 'pred_cov', '=', 'dotdot', '(', 'F', ',', 'filt', '.', 'cov', ',', 'F', '.', 'T', ')', '+', 'covX', 'return', 'MeanAndCov', '(', 'mean', '=', 'pred_mean', ',', 'cov', '=', 'pred_cov', ')']
Predictive step of Kalman filter. Parameters ---------- F: (dx, dx) numpy array Mean of X_t | X_{t-1} is F * X_{t-1} covX: (dx, dx) numpy array covariance of X_t | X_{t-1} filt: MeanAndCov object filtering distribution at time t-1 Returns ------- pred: MeanAndCov object predictive distribution at time t Note ---- filt.mean may either be of shape (dx,) or (N, dx); in the latter case N predictive steps are performed in parallel.
['Predictive', 'step', 'of', 'Kalman', 'filter', '.']
train
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/kalman.py#L163-L187
6,938
CivicSpleen/ambry
ambry/bundle/bundle.py
Bundle.phase_search_names
def phase_search_names(self, source, phase): """Search the bundle.yaml metadata file for pipeline configurations. Looks for: - <phase>-<source_table> - <phase>-<dest_table> - <phase>-<source_name> """ search = [] assert phase is not None # Create a search list of names for getting a pipline from the metadata if source and source.source_table_name: search.append(phase + '-' + source.source_table_name) if source and source.dest_table_name: search.append(phase + '-' + source.dest_table_name) if source: search.append(phase + '-' + source.name) search.append(phase) return search
python
def phase_search_names(self, source, phase): """Search the bundle.yaml metadata file for pipeline configurations. Looks for: - <phase>-<source_table> - <phase>-<dest_table> - <phase>-<source_name> """ search = [] assert phase is not None # Create a search list of names for getting a pipline from the metadata if source and source.source_table_name: search.append(phase + '-' + source.source_table_name) if source and source.dest_table_name: search.append(phase + '-' + source.dest_table_name) if source: search.append(phase + '-' + source.name) search.append(phase) return search
['def', 'phase_search_names', '(', 'self', ',', 'source', ',', 'phase', ')', ':', 'search', '=', '[', ']', 'assert', 'phase', 'is', 'not', 'None', '# Create a search list of names for getting a pipline from the metadata', 'if', 'source', 'and', 'source', '.', 'source_table_name', ':', 'search', '.', 'append', '(', 'phase', '+', "'-'", '+', 'source', '.', 'source_table_name', ')', 'if', 'source', 'and', 'source', '.', 'dest_table_name', ':', 'search', '.', 'append', '(', 'phase', '+', "'-'", '+', 'source', '.', 'dest_table_name', ')', 'if', 'source', ':', 'search', '.', 'append', '(', 'phase', '+', "'-'", '+', 'source', '.', 'name', ')', 'search', '.', 'append', '(', 'phase', ')', 'return', 'search']
Search the bundle.yaml metadata file for pipeline configurations. Looks for: - <phase>-<source_table> - <phase>-<dest_table> - <phase>-<source_name>
['Search', 'the', 'bundle', '.', 'yaml', 'metadata', 'file', 'for', 'pipeline', 'configurations', '.', 'Looks', 'for', ':', '-', '<phase', '>', '-', '<source_table', '>', '-', '<phase', '>', '-', '<dest_table', '>', '-', '<phase', '>', '-', '<source_name', '>']
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L644-L667
6,939
fabioz/PyDev.Debugger
_pydev_imps/_pydev_SocketServer.py
ForkingMixIn.process_request
def process_request(self, request, client_address): """Fork a new subprocess to process the request.""" self.collect_children() pid = os.fork() # @UndefinedVariable if pid: # Parent process if self.active_children is None: self.active_children = [] self.active_children.append(pid) self.close_request(request) #close handle in parent process return else: # Child process. # This must never return, hence os._exit()! try: self.finish_request(request, client_address) self.shutdown_request(request) os._exit(0) except: try: self.handle_error(request, client_address) self.shutdown_request(request) finally: os._exit(1)
python
def process_request(self, request, client_address): """Fork a new subprocess to process the request.""" self.collect_children() pid = os.fork() # @UndefinedVariable if pid: # Parent process if self.active_children is None: self.active_children = [] self.active_children.append(pid) self.close_request(request) #close handle in parent process return else: # Child process. # This must never return, hence os._exit()! try: self.finish_request(request, client_address) self.shutdown_request(request) os._exit(0) except: try: self.handle_error(request, client_address) self.shutdown_request(request) finally: os._exit(1)
['def', 'process_request', '(', 'self', ',', 'request', ',', 'client_address', ')', ':', 'self', '.', 'collect_children', '(', ')', 'pid', '=', 'os', '.', 'fork', '(', ')', '# @UndefinedVariable', 'if', 'pid', ':', '# Parent process', 'if', 'self', '.', 'active_children', 'is', 'None', ':', 'self', '.', 'active_children', '=', '[', ']', 'self', '.', 'active_children', '.', 'append', '(', 'pid', ')', 'self', '.', 'close_request', '(', 'request', ')', '#close handle in parent process', 'return', 'else', ':', '# Child process.', '# This must never return, hence os._exit()!', 'try', ':', 'self', '.', 'finish_request', '(', 'request', ',', 'client_address', ')', 'self', '.', 'shutdown_request', '(', 'request', ')', 'os', '.', '_exit', '(', '0', ')', 'except', ':', 'try', ':', 'self', '.', 'handle_error', '(', 'request', ',', 'client_address', ')', 'self', '.', 'shutdown_request', '(', 'request', ')', 'finally', ':', 'os', '.', '_exit', '(', '1', ')']
Fork a new subprocess to process the request.
['Fork', 'a', 'new', 'subprocess', 'to', 'process', 'the', 'request', '.']
train
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_imps/_pydev_SocketServer.py#L542-L565
6,940
andreikop/qutepart
qutepart/vim.py
isChar
def isChar(ev): """ Check if an event may be a typed character """ text = ev.text() if len(text) != 1: return False if ev.modifiers() not in (Qt.ShiftModifier, Qt.KeypadModifier, Qt.NoModifier): return False asciiCode = ord(text) if asciiCode <= 31 or asciiCode == 0x7f: # control characters return False if text == ' ' and ev.modifiers() == Qt.ShiftModifier: return False # Shift+Space is a shortcut, not a text return True
python
def isChar(ev): """ Check if an event may be a typed character """ text = ev.text() if len(text) != 1: return False if ev.modifiers() not in (Qt.ShiftModifier, Qt.KeypadModifier, Qt.NoModifier): return False asciiCode = ord(text) if asciiCode <= 31 or asciiCode == 0x7f: # control characters return False if text == ' ' and ev.modifiers() == Qt.ShiftModifier: return False # Shift+Space is a shortcut, not a text return True
['def', 'isChar', '(', 'ev', ')', ':', 'text', '=', 'ev', '.', 'text', '(', ')', 'if', 'len', '(', 'text', ')', '!=', '1', ':', 'return', 'False', 'if', 'ev', '.', 'modifiers', '(', ')', 'not', 'in', '(', 'Qt', '.', 'ShiftModifier', ',', 'Qt', '.', 'KeypadModifier', ',', 'Qt', '.', 'NoModifier', ')', ':', 'return', 'False', 'asciiCode', '=', 'ord', '(', 'text', ')', 'if', 'asciiCode', '<=', '31', 'or', 'asciiCode', '==', '0x7f', ':', '# control characters', 'return', 'False', 'if', 'text', '==', "' '", 'and', 'ev', '.', 'modifiers', '(', ')', '==', 'Qt', '.', 'ShiftModifier', ':', 'return', 'False', '# Shift+Space is a shortcut, not a text', 'return', 'True']
Check if an event may be a typed character
['Check', 'if', 'an', 'event', 'may', 'be', 'a', 'typed', 'character']
train
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/vim.py#L47-L64
6,941
openego/ding0
ding0/tools/geo.py
calc_geo_centre_point
def calc_geo_centre_point(node_source, node_target): """ Calculates the geodesic distance between `node_source` and `node_target` incorporating the detour factor specified in config_calc.cfg. Parameters ---------- node_source: LVStationDing0, GeneratorDing0, or CableDistributorDing0 source node, member of GridDing0._graph node_target: LVStationDing0, GeneratorDing0, or CableDistributorDing0 target node, member of GridDing0._graph Returns ------- :any:`float` Distance in m. """ proj_source = partial( pyproj.transform, pyproj.Proj(init='epsg:4326'), # source coordinate system pyproj.Proj(init='epsg:3035')) # destination coordinate system # ETRS (equidistant) to WGS84 (conformal) projection proj_target = partial( pyproj.transform, pyproj.Proj(init='epsg:3035'), # source coordinate system pyproj.Proj(init='epsg:4326')) # destination coordinate system branch_shp = transform(proj_source, LineString([node_source.geo_data, node_target.geo_data])) distance = vincenty((node_source.geo_data.y, node_source.geo_data.x), (node_target.geo_data.y, node_target.geo_data.x)).m centre_point_shp = transform(proj_target, branch_shp.interpolate(distance/2)) return centre_point_shp
python
def calc_geo_centre_point(node_source, node_target): """ Calculates the geodesic distance between `node_source` and `node_target` incorporating the detour factor specified in config_calc.cfg. Parameters ---------- node_source: LVStationDing0, GeneratorDing0, or CableDistributorDing0 source node, member of GridDing0._graph node_target: LVStationDing0, GeneratorDing0, or CableDistributorDing0 target node, member of GridDing0._graph Returns ------- :any:`float` Distance in m. """ proj_source = partial( pyproj.transform, pyproj.Proj(init='epsg:4326'), # source coordinate system pyproj.Proj(init='epsg:3035')) # destination coordinate system # ETRS (equidistant) to WGS84 (conformal) projection proj_target = partial( pyproj.transform, pyproj.Proj(init='epsg:3035'), # source coordinate system pyproj.Proj(init='epsg:4326')) # destination coordinate system branch_shp = transform(proj_source, LineString([node_source.geo_data, node_target.geo_data])) distance = vincenty((node_source.geo_data.y, node_source.geo_data.x), (node_target.geo_data.y, node_target.geo_data.x)).m centre_point_shp = transform(proj_target, branch_shp.interpolate(distance/2)) return centre_point_shp
['def', 'calc_geo_centre_point', '(', 'node_source', ',', 'node_target', ')', ':', 'proj_source', '=', 'partial', '(', 'pyproj', '.', 'transform', ',', 'pyproj', '.', 'Proj', '(', 'init', '=', "'epsg:4326'", ')', ',', '# source coordinate system', 'pyproj', '.', 'Proj', '(', 'init', '=', "'epsg:3035'", ')', ')', '# destination coordinate system', '# ETRS (equidistant) to WGS84 (conformal) projection', 'proj_target', '=', 'partial', '(', 'pyproj', '.', 'transform', ',', 'pyproj', '.', 'Proj', '(', 'init', '=', "'epsg:3035'", ')', ',', '# source coordinate system', 'pyproj', '.', 'Proj', '(', 'init', '=', "'epsg:4326'", ')', ')', '# destination coordinate system', 'branch_shp', '=', 'transform', '(', 'proj_source', ',', 'LineString', '(', '[', 'node_source', '.', 'geo_data', ',', 'node_target', '.', 'geo_data', ']', ')', ')', 'distance', '=', 'vincenty', '(', '(', 'node_source', '.', 'geo_data', '.', 'y', ',', 'node_source', '.', 'geo_data', '.', 'x', ')', ',', '(', 'node_target', '.', 'geo_data', '.', 'y', ',', 'node_target', '.', 'geo_data', '.', 'x', ')', ')', '.', 'm', 'centre_point_shp', '=', 'transform', '(', 'proj_target', ',', 'branch_shp', '.', 'interpolate', '(', 'distance', '/', '2', ')', ')', 'return', 'centre_point_shp']
Calculates the geodesic distance between `node_source` and `node_target` incorporating the detour factor specified in config_calc.cfg. Parameters ---------- node_source: LVStationDing0, GeneratorDing0, or CableDistributorDing0 source node, member of GridDing0._graph node_target: LVStationDing0, GeneratorDing0, or CableDistributorDing0 target node, member of GridDing0._graph Returns ------- :any:`float` Distance in m.
['Calculates', 'the', 'geodesic', 'distance', 'between', 'node_source', 'and', 'node_target', 'incorporating', 'the', 'detour', 'factor', 'specified', 'in', 'config_calc', '.', 'cfg', '.', 'Parameters', '----------', 'node_source', ':', 'LVStationDing0', 'GeneratorDing0', 'or', 'CableDistributorDing0', 'source', 'node', 'member', 'of', 'GridDing0', '.', '_graph', 'node_target', ':', 'LVStationDing0', 'GeneratorDing0', 'or', 'CableDistributorDing0', 'target', 'node', 'member', 'of', 'GridDing0', '.', '_graph']
train
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/tools/geo.py#L206-L241
6,942
elmotec/massedit
massedit.py
MassEdit.append_code_expr
def append_code_expr(self, code): """Compile argument and adds it to the list of code objects.""" # expects a string. if isinstance(code, str) and not isinstance(code, unicode): code = unicode(code) if not isinstance(code, unicode): raise TypeError("string expected") log.debug("compiling code %s...", code) try: code_obj = compile(code, '<string>', 'eval') self.code_objs[code] = code_obj except SyntaxError as syntax_err: log.error("cannot compile %s: %s", code, syntax_err) raise log.debug("compiled code %s", code)
python
def append_code_expr(self, code): """Compile argument and adds it to the list of code objects.""" # expects a string. if isinstance(code, str) and not isinstance(code, unicode): code = unicode(code) if not isinstance(code, unicode): raise TypeError("string expected") log.debug("compiling code %s...", code) try: code_obj = compile(code, '<string>', 'eval') self.code_objs[code] = code_obj except SyntaxError as syntax_err: log.error("cannot compile %s: %s", code, syntax_err) raise log.debug("compiled code %s", code)
['def', 'append_code_expr', '(', 'self', ',', 'code', ')', ':', '# expects a string.', 'if', 'isinstance', '(', 'code', ',', 'str', ')', 'and', 'not', 'isinstance', '(', 'code', ',', 'unicode', ')', ':', 'code', '=', 'unicode', '(', 'code', ')', 'if', 'not', 'isinstance', '(', 'code', ',', 'unicode', ')', ':', 'raise', 'TypeError', '(', '"string expected"', ')', 'log', '.', 'debug', '(', '"compiling code %s..."', ',', 'code', ')', 'try', ':', 'code_obj', '=', 'compile', '(', 'code', ',', "'<string>'", ',', "'eval'", ')', 'self', '.', 'code_objs', '[', 'code', ']', '=', 'code_obj', 'except', 'SyntaxError', 'as', 'syntax_err', ':', 'log', '.', 'error', '(', '"cannot compile %s: %s"', ',', 'code', ',', 'syntax_err', ')', 'raise', 'log', '.', 'debug', '(', '"compiled code %s"', ',', 'code', ')']
Compile argument and adds it to the list of code objects.
['Compile', 'argument', 'and', 'adds', 'it', 'to', 'the', 'list', 'of', 'code', 'objects', '.']
train
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L262-L276
6,943
ska-sa/montblanc
montblanc/util/__init__.py
random_like
def random_like(ary=None, shape=None, dtype=None): """ Returns a random array of the same shape and type as the supplied array argument, or the supplied shape and dtype """ if ary is not None: shape, dtype = ary.shape, ary.dtype elif shape is None or dtype is None: raise ValueError(( 'random_like(ary, shape, dtype) must be supplied ' 'with either an array argument, or the shape and dtype ' 'of the desired random array.')) if np.issubdtype(dtype, np.complexfloating): return (np.random.random(size=shape) + \ np.random.random(size=shape)*1j).astype(dtype) else: return np.random.random(size=shape).astype(dtype)
python
def random_like(ary=None, shape=None, dtype=None): """ Returns a random array of the same shape and type as the supplied array argument, or the supplied shape and dtype """ if ary is not None: shape, dtype = ary.shape, ary.dtype elif shape is None or dtype is None: raise ValueError(( 'random_like(ary, shape, dtype) must be supplied ' 'with either an array argument, or the shape and dtype ' 'of the desired random array.')) if np.issubdtype(dtype, np.complexfloating): return (np.random.random(size=shape) + \ np.random.random(size=shape)*1j).astype(dtype) else: return np.random.random(size=shape).astype(dtype)
['def', 'random_like', '(', 'ary', '=', 'None', ',', 'shape', '=', 'None', ',', 'dtype', '=', 'None', ')', ':', 'if', 'ary', 'is', 'not', 'None', ':', 'shape', ',', 'dtype', '=', 'ary', '.', 'shape', ',', 'ary', '.', 'dtype', 'elif', 'shape', 'is', 'None', 'or', 'dtype', 'is', 'None', ':', 'raise', 'ValueError', '(', '(', "'random_like(ary, shape, dtype) must be supplied '", "'with either an array argument, or the shape and dtype '", "'of the desired random array.'", ')', ')', 'if', 'np', '.', 'issubdtype', '(', 'dtype', ',', 'np', '.', 'complexfloating', ')', ':', 'return', '(', 'np', '.', 'random', '.', 'random', '(', 'size', '=', 'shape', ')', '+', 'np', '.', 'random', '.', 'random', '(', 'size', '=', 'shape', ')', '*', '1j', ')', '.', 'astype', '(', 'dtype', ')', 'else', ':', 'return', 'np', '.', 'random', '.', 'random', '(', 'size', '=', 'shape', ')', '.', 'astype', '(', 'dtype', ')']
Returns a random array of the same shape and type as the supplied array argument, or the supplied shape and dtype
['Returns', 'a', 'random', 'array', 'of', 'the', 'same', 'shape', 'and', 'type', 'as', 'the', 'supplied', 'array', 'argument', 'or', 'the', 'supplied', 'shape', 'and', 'dtype']
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L96-L113
6,944
soldag/python-pwmled
pwmled/led/rgb.py
RgbLed._prepare_transition
def _prepare_transition(self, is_on=None, brightness=None, color=None): """ Perform pre-transition tasks and construct the destination state. :param is_on: The on-off state to transition to. :param brightness: The brightness to transition to (0.0-1.0). :param color: The color to transition to. :return: The destination state of the transition. """ dest_state = super()._prepare_transition(is_on, brightness=brightness, color=color) # Handle transitions from off to on and changing color if is_on and not self.is_on and color is not None: self.set(color=color, cancel_transition=False) return dest_state
python
def _prepare_transition(self, is_on=None, brightness=None, color=None): """ Perform pre-transition tasks and construct the destination state. :param is_on: The on-off state to transition to. :param brightness: The brightness to transition to (0.0-1.0). :param color: The color to transition to. :return: The destination state of the transition. """ dest_state = super()._prepare_transition(is_on, brightness=brightness, color=color) # Handle transitions from off to on and changing color if is_on and not self.is_on and color is not None: self.set(color=color, cancel_transition=False) return dest_state
['def', '_prepare_transition', '(', 'self', ',', 'is_on', '=', 'None', ',', 'brightness', '=', 'None', ',', 'color', '=', 'None', ')', ':', 'dest_state', '=', 'super', '(', ')', '.', '_prepare_transition', '(', 'is_on', ',', 'brightness', '=', 'brightness', ',', 'color', '=', 'color', ')', '# Handle transitions from off to on and changing color', 'if', 'is_on', 'and', 'not', 'self', '.', 'is_on', 'and', 'color', 'is', 'not', 'None', ':', 'self', '.', 'set', '(', 'color', '=', 'color', ',', 'cancel_transition', '=', 'False', ')', 'return', 'dest_state']
Perform pre-transition tasks and construct the destination state. :param is_on: The on-off state to transition to. :param brightness: The brightness to transition to (0.0-1.0). :param color: The color to transition to. :return: The destination state of the transition.
['Perform', 'pre', '-', 'transition', 'tasks', 'and', 'construct', 'the', 'destination', 'state', '.']
train
https://github.com/soldag/python-pwmled/blob/09cde36ecc0153fa81dc2a1b9bb07d1c0e418c8c/pwmled/led/rgb.py#L74-L91
6,945
geopy/geopy
geopy/geocoders/arcgis.py
ArcGIS._authenticated_call_geocoder
def _authenticated_call_geocoder(self, url, timeout=DEFAULT_SENTINEL): """ Wrap self._call_geocoder, handling tokens. """ if self.token is None or int(time()) > self.token_expiry: self._refresh_authentication_token() request = Request( "&".join((url, urlencode({"token": self.token}))), headers={"Referer": self.referer} ) return self._base_call_geocoder(request, timeout=timeout)
python
def _authenticated_call_geocoder(self, url, timeout=DEFAULT_SENTINEL): """ Wrap self._call_geocoder, handling tokens. """ if self.token is None or int(time()) > self.token_expiry: self._refresh_authentication_token() request = Request( "&".join((url, urlencode({"token": self.token}))), headers={"Referer": self.referer} ) return self._base_call_geocoder(request, timeout=timeout)
['def', '_authenticated_call_geocoder', '(', 'self', ',', 'url', ',', 'timeout', '=', 'DEFAULT_SENTINEL', ')', ':', 'if', 'self', '.', 'token', 'is', 'None', 'or', 'int', '(', 'time', '(', ')', ')', '>', 'self', '.', 'token_expiry', ':', 'self', '.', '_refresh_authentication_token', '(', ')', 'request', '=', 'Request', '(', '"&"', '.', 'join', '(', '(', 'url', ',', 'urlencode', '(', '{', '"token"', ':', 'self', '.', 'token', '}', ')', ')', ')', ',', 'headers', '=', '{', '"Referer"', ':', 'self', '.', 'referer', '}', ')', 'return', 'self', '.', '_base_call_geocoder', '(', 'request', ',', 'timeout', '=', 'timeout', ')']
Wrap self._call_geocoder, handling tokens.
['Wrap', 'self', '.', '_call_geocoder', 'handling', 'tokens', '.']
train
https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/arcgis.py#L145-L155
6,946
ismms-himc/clustergrammer2
setupbase.py
_get_data_files
def _get_data_files(data_specs, existing, top=HERE): """Expand data file specs into valid data files metadata. Parameters ---------- data_specs: list of tuples See [create_cmdclass] for description. existing: list of tuples The existing distrubution data_files metadata. Returns ------- A valid list of data_files items. """ # Extract the existing data files into a staging object. file_data = defaultdict(list) for (path, files) in existing or []: file_data[path] = files # Extract the files and assign them to the proper data # files path. for (path, dname, pattern) in data_specs or []: if os.path.isabs(dname): dname = os.path.relpath(dname, top) dname = dname.replace(os.sep, '/') offset = 0 if dname in ('.', '') else len(dname) + 1 files = _get_files(_glob_pjoin(dname, pattern), top=top) for fname in files: # Normalize the path. root = os.path.dirname(fname) full_path = _glob_pjoin(path, root[offset:]) print(dname, root, full_path, offset) if full_path.endswith('/'): full_path = full_path[:-1] file_data[full_path].append(fname) # Construct the data files spec. data_files = [] for (path, files) in file_data.items(): data_files.append((path, files)) return data_files
python
def _get_data_files(data_specs, existing, top=HERE): """Expand data file specs into valid data files metadata. Parameters ---------- data_specs: list of tuples See [create_cmdclass] for description. existing: list of tuples The existing distrubution data_files metadata. Returns ------- A valid list of data_files items. """ # Extract the existing data files into a staging object. file_data = defaultdict(list) for (path, files) in existing or []: file_data[path] = files # Extract the files and assign them to the proper data # files path. for (path, dname, pattern) in data_specs or []: if os.path.isabs(dname): dname = os.path.relpath(dname, top) dname = dname.replace(os.sep, '/') offset = 0 if dname in ('.', '') else len(dname) + 1 files = _get_files(_glob_pjoin(dname, pattern), top=top) for fname in files: # Normalize the path. root = os.path.dirname(fname) full_path = _glob_pjoin(path, root[offset:]) print(dname, root, full_path, offset) if full_path.endswith('/'): full_path = full_path[:-1] file_data[full_path].append(fname) # Construct the data files spec. data_files = [] for (path, files) in file_data.items(): data_files.append((path, files)) return data_files
['def', '_get_data_files', '(', 'data_specs', ',', 'existing', ',', 'top', '=', 'HERE', ')', ':', '# Extract the existing data files into a staging object.', 'file_data', '=', 'defaultdict', '(', 'list', ')', 'for', '(', 'path', ',', 'files', ')', 'in', 'existing', 'or', '[', ']', ':', 'file_data', '[', 'path', ']', '=', 'files', '# Extract the files and assign them to the proper data', '# files path.', 'for', '(', 'path', ',', 'dname', ',', 'pattern', ')', 'in', 'data_specs', 'or', '[', ']', ':', 'if', 'os', '.', 'path', '.', 'isabs', '(', 'dname', ')', ':', 'dname', '=', 'os', '.', 'path', '.', 'relpath', '(', 'dname', ',', 'top', ')', 'dname', '=', 'dname', '.', 'replace', '(', 'os', '.', 'sep', ',', "'/'", ')', 'offset', '=', '0', 'if', 'dname', 'in', '(', "'.'", ',', "''", ')', 'else', 'len', '(', 'dname', ')', '+', '1', 'files', '=', '_get_files', '(', '_glob_pjoin', '(', 'dname', ',', 'pattern', ')', ',', 'top', '=', 'top', ')', 'for', 'fname', 'in', 'files', ':', '# Normalize the path.', 'root', '=', 'os', '.', 'path', '.', 'dirname', '(', 'fname', ')', 'full_path', '=', '_glob_pjoin', '(', 'path', ',', 'root', '[', 'offset', ':', ']', ')', 'print', '(', 'dname', ',', 'root', ',', 'full_path', ',', 'offset', ')', 'if', 'full_path', '.', 'endswith', '(', "'/'", ')', ':', 'full_path', '=', 'full_path', '[', ':', '-', '1', ']', 'file_data', '[', 'full_path', ']', '.', 'append', '(', 'fname', ')', '# Construct the data files spec.', 'data_files', '=', '[', ']', 'for', '(', 'path', ',', 'files', ')', 'in', 'file_data', '.', 'items', '(', ')', ':', 'data_files', '.', 'append', '(', '(', 'path', ',', 'files', ')', ')', 'return', 'data_files']
Expand data file specs into valid data files metadata. Parameters ---------- data_specs: list of tuples See [create_cmdclass] for description. existing: list of tuples The existing distrubution data_files metadata. Returns ------- A valid list of data_files items.
['Expand', 'data', 'file', 'specs', 'into', 'valid', 'data', 'files', 'metadata', '.']
train
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/setupbase.py#L514-L554
6,947
fhcrc/taxtastic
taxtastic/taxonomy.py
Taxonomy.primary_from_name
def primary_from_name(self, tax_name): """ Return tax_id and primary tax_name corresponding to tax_name. """ names = self.names s1 = select([names.c.tax_id, names.c.is_primary], names.c.tax_name == tax_name) log.debug(str(s1)) res = s1.execute().fetchone() if res: tax_id, is_primary = res else: msg = '"{}" not found in names.tax_names'.format(tax_name) raise ValueError(msg) if not is_primary: s2 = select([names.c.tax_name], and_(names.c.tax_id == tax_id, names.c.is_primary)) tax_name = s2.execute().fetchone()[0] return tax_id, tax_name, bool(is_primary)
python
def primary_from_name(self, tax_name): """ Return tax_id and primary tax_name corresponding to tax_name. """ names = self.names s1 = select([names.c.tax_id, names.c.is_primary], names.c.tax_name == tax_name) log.debug(str(s1)) res = s1.execute().fetchone() if res: tax_id, is_primary = res else: msg = '"{}" not found in names.tax_names'.format(tax_name) raise ValueError(msg) if not is_primary: s2 = select([names.c.tax_name], and_(names.c.tax_id == tax_id, names.c.is_primary)) tax_name = s2.execute().fetchone()[0] return tax_id, tax_name, bool(is_primary)
['def', 'primary_from_name', '(', 'self', ',', 'tax_name', ')', ':', 'names', '=', 'self', '.', 'names', 's1', '=', 'select', '(', '[', 'names', '.', 'c', '.', 'tax_id', ',', 'names', '.', 'c', '.', 'is_primary', ']', ',', 'names', '.', 'c', '.', 'tax_name', '==', 'tax_name', ')', 'log', '.', 'debug', '(', 'str', '(', 's1', ')', ')', 'res', '=', 's1', '.', 'execute', '(', ')', '.', 'fetchone', '(', ')', 'if', 'res', ':', 'tax_id', ',', 'is_primary', '=', 'res', 'else', ':', 'msg', '=', '\'"{}" not found in names.tax_names\'', '.', 'format', '(', 'tax_name', ')', 'raise', 'ValueError', '(', 'msg', ')', 'if', 'not', 'is_primary', ':', 's2', '=', 'select', '(', '[', 'names', '.', 'c', '.', 'tax_name', ']', ',', 'and_', '(', 'names', '.', 'c', '.', 'tax_id', '==', 'tax_id', ',', 'names', '.', 'c', '.', 'is_primary', ')', ')', 'tax_name', '=', 's2', '.', 'execute', '(', ')', '.', 'fetchone', '(', ')', '[', '0', ']', 'return', 'tax_id', ',', 'tax_name', ',', 'bool', '(', 'is_primary', ')']
Return tax_id and primary tax_name corresponding to tax_name.
['Return', 'tax_id', 'and', 'primary', 'tax_name', 'corresponding', 'to', 'tax_name', '.']
train
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L169-L193
6,948
tornadoweb/tornado
tornado/auth.py
GoogleOAuth2Mixin.get_authenticated_user
async def get_authenticated_user( self, redirect_uri: str, code: str ) -> Dict[str, Any]: """Handles the login for the Google user, returning an access token. The result is a dictionary containing an ``access_token`` field ([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)). Unlike other ``get_authenticated_user`` methods in this package, this method does not return any additional information about the user. The returned access token can be used with `OAuth2Mixin.oauth2_request` to request additional information (perhaps from ``https://www.googleapis.com/oauth2/v2/userinfo``) Example usage: .. testcode:: class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): async def get(self): if self.get_argument('code', False): access = await self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) user = await self.oauth2_request( "https://www.googleapis.com/oauth2/v1/userinfo", access_token=access["access_token"]) # Save the user and access token with # e.g. set_secure_cookie. else: await self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) .. testoutput:: :hide: .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ # noqa: E501 handler = cast(RequestHandler, self) http = self.get_auth_http_client() body = urllib.parse.urlencode( { "redirect_uri": redirect_uri, "code": code, "client_id": handler.settings[self._OAUTH_SETTINGS_KEY]["key"], "client_secret": handler.settings[self._OAUTH_SETTINGS_KEY]["secret"], "grant_type": "authorization_code", } ) response = await http.fetch( self._OAUTH_ACCESS_TOKEN_URL, method="POST", headers={"Content-Type": "application/x-www-form-urlencoded"}, body=body, ) return escape.json_decode(response.body)
python
async def get_authenticated_user( self, redirect_uri: str, code: str ) -> Dict[str, Any]: """Handles the login for the Google user, returning an access token. The result is a dictionary containing an ``access_token`` field ([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)). Unlike other ``get_authenticated_user`` methods in this package, this method does not return any additional information about the user. The returned access token can be used with `OAuth2Mixin.oauth2_request` to request additional information (perhaps from ``https://www.googleapis.com/oauth2/v2/userinfo``) Example usage: .. testcode:: class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): async def get(self): if self.get_argument('code', False): access = await self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) user = await self.oauth2_request( "https://www.googleapis.com/oauth2/v1/userinfo", access_token=access["access_token"]) # Save the user and access token with # e.g. set_secure_cookie. else: await self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) .. testoutput:: :hide: .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ # noqa: E501 handler = cast(RequestHandler, self) http = self.get_auth_http_client() body = urllib.parse.urlencode( { "redirect_uri": redirect_uri, "code": code, "client_id": handler.settings[self._OAUTH_SETTINGS_KEY]["key"], "client_secret": handler.settings[self._OAUTH_SETTINGS_KEY]["secret"], "grant_type": "authorization_code", } ) response = await http.fetch( self._OAUTH_ACCESS_TOKEN_URL, method="POST", headers={"Content-Type": "application/x-www-form-urlencoded"}, body=body, ) return escape.json_decode(response.body)
['async', 'def', 'get_authenticated_user', '(', 'self', ',', 'redirect_uri', ':', 'str', ',', 'code', ':', 'str', ')', '->', 'Dict', '[', 'str', ',', 'Any', ']', ':', '# noqa: E501', 'handler', '=', 'cast', '(', 'RequestHandler', ',', 'self', ')', 'http', '=', 'self', '.', 'get_auth_http_client', '(', ')', 'body', '=', 'urllib', '.', 'parse', '.', 'urlencode', '(', '{', '"redirect_uri"', ':', 'redirect_uri', ',', '"code"', ':', 'code', ',', '"client_id"', ':', 'handler', '.', 'settings', '[', 'self', '.', '_OAUTH_SETTINGS_KEY', ']', '[', '"key"', ']', ',', '"client_secret"', ':', 'handler', '.', 'settings', '[', 'self', '.', '_OAUTH_SETTINGS_KEY', ']', '[', '"secret"', ']', ',', '"grant_type"', ':', '"authorization_code"', ',', '}', ')', 'response', '=', 'await', 'http', '.', 'fetch', '(', 'self', '.', '_OAUTH_ACCESS_TOKEN_URL', ',', 'method', '=', '"POST"', ',', 'headers', '=', '{', '"Content-Type"', ':', '"application/x-www-form-urlencoded"', '}', ',', 'body', '=', 'body', ',', ')', 'return', 'escape', '.', 'json_decode', '(', 'response', '.', 'body', ')']
Handles the login for the Google user, returning an access token. The result is a dictionary containing an ``access_token`` field ([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)). Unlike other ``get_authenticated_user`` methods in this package, this method does not return any additional information about the user. The returned access token can be used with `OAuth2Mixin.oauth2_request` to request additional information (perhaps from ``https://www.googleapis.com/oauth2/v2/userinfo``) Example usage: .. testcode:: class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): async def get(self): if self.get_argument('code', False): access = await self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) user = await self.oauth2_request( "https://www.googleapis.com/oauth2/v1/userinfo", access_token=access["access_token"]) # Save the user and access token with # e.g. set_secure_cookie. else: await self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) .. testoutput:: :hide: .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead.
['Handles', 'the', 'login', 'for', 'the', 'Google', 'user', 'returning', 'an', 'access', 'token', '.']
train
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/auth.py#L854-L916
6,949
ensime/ensime-vim
ensime_shared/editor.py
Editor.point2pos
def point2pos(self, point): """Converts a point or offset in a file to a (row, col) position.""" row = self._vim.eval('byte2line({})'.format(point)) col = self._vim.eval('{} - line2byte({})'.format(point, row)) return (int(row), int(col))
python
def point2pos(self, point): """Converts a point or offset in a file to a (row, col) position.""" row = self._vim.eval('byte2line({})'.format(point)) col = self._vim.eval('{} - line2byte({})'.format(point, row)) return (int(row), int(col))
['def', 'point2pos', '(', 'self', ',', 'point', ')', ':', 'row', '=', 'self', '.', '_vim', '.', 'eval', '(', "'byte2line({})'", '.', 'format', '(', 'point', ')', ')', 'col', '=', 'self', '.', '_vim', '.', 'eval', '(', "'{} - line2byte({})'", '.', 'format', '(', 'point', ',', 'row', ')', ')', 'return', '(', 'int', '(', 'row', ')', ',', 'int', '(', 'col', ')', ')']
Converts a point or offset in a file to a (row, col) position.
['Converts', 'a', 'point', 'or', 'offset', 'in', 'a', 'file', 'to', 'a', '(', 'row', 'col', ')', 'position', '.']
train
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/editor.py#L89-L93
6,950
parkouss/pyewmh
ewmh/ewmh.py
EWMH._setProperty
def _setProperty(self, _type, data, win=None, mask=None): """ Send a ClientMessage event to the root window """ if not win: win = self.root if type(data) is str: dataSize = 8 else: data = (data+[0]*(5-len(data)))[:5] dataSize = 32 ev = protocol.event.ClientMessage( window=win, client_type=self.display.get_atom(_type), data=(dataSize, data)) if not mask: mask = (X.SubstructureRedirectMask | X.SubstructureNotifyMask) self.root.send_event(ev, event_mask=mask)
python
def _setProperty(self, _type, data, win=None, mask=None): """ Send a ClientMessage event to the root window """ if not win: win = self.root if type(data) is str: dataSize = 8 else: data = (data+[0]*(5-len(data)))[:5] dataSize = 32 ev = protocol.event.ClientMessage( window=win, client_type=self.display.get_atom(_type), data=(dataSize, data)) if not mask: mask = (X.SubstructureRedirectMask | X.SubstructureNotifyMask) self.root.send_event(ev, event_mask=mask)
['def', '_setProperty', '(', 'self', ',', '_type', ',', 'data', ',', 'win', '=', 'None', ',', 'mask', '=', 'None', ')', ':', 'if', 'not', 'win', ':', 'win', '=', 'self', '.', 'root', 'if', 'type', '(', 'data', ')', 'is', 'str', ':', 'dataSize', '=', '8', 'else', ':', 'data', '=', '(', 'data', '+', '[', '0', ']', '*', '(', '5', '-', 'len', '(', 'data', ')', ')', ')', '[', ':', '5', ']', 'dataSize', '=', '32', 'ev', '=', 'protocol', '.', 'event', '.', 'ClientMessage', '(', 'window', '=', 'win', ',', 'client_type', '=', 'self', '.', 'display', '.', 'get_atom', '(', '_type', ')', ',', 'data', '=', '(', 'dataSize', ',', 'data', ')', ')', 'if', 'not', 'mask', ':', 'mask', '=', '(', 'X', '.', 'SubstructureRedirectMask', '|', 'X', '.', 'SubstructureNotifyMask', ')', 'self', '.', 'root', '.', 'send_event', '(', 'ev', ',', 'event_mask', '=', 'mask', ')']
Send a ClientMessage event to the root window
['Send', 'a', 'ClientMessage', 'event', 'to', 'the', 'root', 'window']
train
https://github.com/parkouss/pyewmh/blob/8209e9d942b4f39e32f14e2684d94bb5e6269aac/ewmh/ewmh.py#L412-L430
6,951
stephen-bunn/file-config
tasks/docs.py
view
def view(ctx): """ Build and view docs. """ report.info(ctx, "docs.view", f"viewing documentation") build_path = ctx.docs.directory / "build" / "html" / "index.html" build_path = pathname2url(build_path.as_posix()) webbrowser.open(f"file:{build_path!s}")
python
def view(ctx): """ Build and view docs. """ report.info(ctx, "docs.view", f"viewing documentation") build_path = ctx.docs.directory / "build" / "html" / "index.html" build_path = pathname2url(build_path.as_posix()) webbrowser.open(f"file:{build_path!s}")
['def', 'view', '(', 'ctx', ')', ':', 'report', '.', 'info', '(', 'ctx', ',', '"docs.view"', ',', 'f"viewing documentation"', ')', 'build_path', '=', 'ctx', '.', 'docs', '.', 'directory', '/', '"build"', '/', '"html"', '/', '"index.html"', 'build_path', '=', 'pathname2url', '(', 'build_path', '.', 'as_posix', '(', ')', ')', 'webbrowser', '.', 'open', '(', 'f"file:{build_path!s}"', ')']
Build and view docs.
['Build', 'and', 'view', 'docs', '.']
train
https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/tasks/docs.py#L58-L65
6,952
HazyResearch/fonduer
src/fonduer/candidates/matchers.py
_FigureMatcher._get_span
def _get_span(self, m): """ Gets a tuple that identifies a figure for the specific mention class that m belongs to. """ return (m.figure.document.id, m.figure.position)
python
def _get_span(self, m): """ Gets a tuple that identifies a figure for the specific mention class that m belongs to. """ return (m.figure.document.id, m.figure.position)
['def', '_get_span', '(', 'self', ',', 'm', ')', ':', 'return', '(', 'm', '.', 'figure', '.', 'document', '.', 'id', ',', 'm', '.', 'figure', '.', 'position', ')']
Gets a tuple that identifies a figure for the specific mention class that m belongs to.
['Gets', 'a', 'tuple', 'that', 'identifies', 'a', 'figure', 'for', 'the', 'specific', 'mention', 'class', 'that', 'm', 'belongs', 'to', '.']
train
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/matchers.py#L476-L481
6,953
gwastro/pycbc
pycbc/inference/sampler/base_mcmc.py
BaseMCMC.set_target_from_config
def set_target_from_config(self, cp, section): """Sets the target using the given config file. This looks for ``niterations`` to set the ``target_niterations``, and ``effective-nsamples`` to set the ``target_eff_nsamples``. Parameters ---------- cp : ConfigParser Open config parser to retrieve the argument from. section : str Name of the section to retrieve from. """ if cp.has_option(section, "niterations"): niterations = int(cp.get(section, "niterations")) else: niterations = None if cp.has_option(section, "effective-nsamples"): nsamples = int(cp.get(section, "effective-nsamples")) else: nsamples = None self.set_target(niterations=niterations, eff_nsamples=nsamples)
python
def set_target_from_config(self, cp, section): """Sets the target using the given config file. This looks for ``niterations`` to set the ``target_niterations``, and ``effective-nsamples`` to set the ``target_eff_nsamples``. Parameters ---------- cp : ConfigParser Open config parser to retrieve the argument from. section : str Name of the section to retrieve from. """ if cp.has_option(section, "niterations"): niterations = int(cp.get(section, "niterations")) else: niterations = None if cp.has_option(section, "effective-nsamples"): nsamples = int(cp.get(section, "effective-nsamples")) else: nsamples = None self.set_target(niterations=niterations, eff_nsamples=nsamples)
['def', 'set_target_from_config', '(', 'self', ',', 'cp', ',', 'section', ')', ':', 'if', 'cp', '.', 'has_option', '(', 'section', ',', '"niterations"', ')', ':', 'niterations', '=', 'int', '(', 'cp', '.', 'get', '(', 'section', ',', '"niterations"', ')', ')', 'else', ':', 'niterations', '=', 'None', 'if', 'cp', '.', 'has_option', '(', 'section', ',', '"effective-nsamples"', ')', ':', 'nsamples', '=', 'int', '(', 'cp', '.', 'get', '(', 'section', ',', '"effective-nsamples"', ')', ')', 'else', ':', 'nsamples', '=', 'None', 'self', '.', 'set_target', '(', 'niterations', '=', 'niterations', ',', 'eff_nsamples', '=', 'nsamples', ')']
Sets the target using the given config file. This looks for ``niterations`` to set the ``target_niterations``, and ``effective-nsamples`` to set the ``target_eff_nsamples``. Parameters ---------- cp : ConfigParser Open config parser to retrieve the argument from. section : str Name of the section to retrieve from.
['Sets', 'the', 'target', 'using', 'the', 'given', 'config', 'file', '.']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/base_mcmc.py#L661-L682
6,954
choderalab/pymbar
pymbar/utils.py
check_w_normalized
def check_w_normalized(W, N_k, tolerance = 1.0e-4): """Check the weight matrix W is properly normalized. The sum over N should be 1, and the sum over k by N_k should aslo be 1 Parameters ---------- W : np.ndarray, shape=(N, K), dtype='float' The normalized weight matrix for snapshots and states. W[n, k] is the weight of snapshot n in state k. N_k : np.ndarray, shape=(K), dtype='int' N_k[k] is the number of samples from state k. tolerance : float, optional, default=1.0e-4 Tolerance for checking equality of sums Returns ------- None : NoneType Returns a None object if test passes, otherwise raises a ParameterError with appropriate message if W is not normalized within tolerance. """ [N, K] = W.shape column_sums = np.sum(W, axis=0) badcolumns = (np.abs(column_sums - 1) > tolerance) if np.any(badcolumns): which_badcolumns = np.arange(K)[badcolumns] firstbad = which_badcolumns[0] raise ParameterError( 'Warning: Should have \sum_n W_nk = 1. Actual column sum for state %d was %f. %d other columns have similar problems' % (firstbad, column_sums[firstbad], np.sum(badcolumns))) row_sums = np.sum(W * N_k, axis=1) badrows = (np.abs(row_sums - 1) > tolerance) if np.any(badrows): which_badrows = np.arange(N)[badrows] firstbad = which_badrows[0] raise ParameterError( 'Warning: Should have \sum_k N_k W_nk = 1. Actual row sum for sample %d was %f. %d other rows have similar problems' % (firstbad, row_sums[firstbad], np.sum(badrows))) return
python
def check_w_normalized(W, N_k, tolerance = 1.0e-4): """Check the weight matrix W is properly normalized. The sum over N should be 1, and the sum over k by N_k should aslo be 1 Parameters ---------- W : np.ndarray, shape=(N, K), dtype='float' The normalized weight matrix for snapshots and states. W[n, k] is the weight of snapshot n in state k. N_k : np.ndarray, shape=(K), dtype='int' N_k[k] is the number of samples from state k. tolerance : float, optional, default=1.0e-4 Tolerance for checking equality of sums Returns ------- None : NoneType Returns a None object if test passes, otherwise raises a ParameterError with appropriate message if W is not normalized within tolerance. """ [N, K] = W.shape column_sums = np.sum(W, axis=0) badcolumns = (np.abs(column_sums - 1) > tolerance) if np.any(badcolumns): which_badcolumns = np.arange(K)[badcolumns] firstbad = which_badcolumns[0] raise ParameterError( 'Warning: Should have \sum_n W_nk = 1. Actual column sum for state %d was %f. %d other columns have similar problems' % (firstbad, column_sums[firstbad], np.sum(badcolumns))) row_sums = np.sum(W * N_k, axis=1) badrows = (np.abs(row_sums - 1) > tolerance) if np.any(badrows): which_badrows = np.arange(N)[badrows] firstbad = which_badrows[0] raise ParameterError( 'Warning: Should have \sum_k N_k W_nk = 1. Actual row sum for sample %d was %f. %d other rows have similar problems' % (firstbad, row_sums[firstbad], np.sum(badrows))) return
['def', 'check_w_normalized', '(', 'W', ',', 'N_k', ',', 'tolerance', '=', '1.0e-4', ')', ':', '[', 'N', ',', 'K', ']', '=', 'W', '.', 'shape', 'column_sums', '=', 'np', '.', 'sum', '(', 'W', ',', 'axis', '=', '0', ')', 'badcolumns', '=', '(', 'np', '.', 'abs', '(', 'column_sums', '-', '1', ')', '>', 'tolerance', ')', 'if', 'np', '.', 'any', '(', 'badcolumns', ')', ':', 'which_badcolumns', '=', 'np', '.', 'arange', '(', 'K', ')', '[', 'badcolumns', ']', 'firstbad', '=', 'which_badcolumns', '[', '0', ']', 'raise', 'ParameterError', '(', "'Warning: Should have \\sum_n W_nk = 1. Actual column sum for state %d was %f. %d other columns have similar problems'", '%', '(', 'firstbad', ',', 'column_sums', '[', 'firstbad', ']', ',', 'np', '.', 'sum', '(', 'badcolumns', ')', ')', ')', 'row_sums', '=', 'np', '.', 'sum', '(', 'W', '*', 'N_k', ',', 'axis', '=', '1', ')', 'badrows', '=', '(', 'np', '.', 'abs', '(', 'row_sums', '-', '1', ')', '>', 'tolerance', ')', 'if', 'np', '.', 'any', '(', 'badrows', ')', ':', 'which_badrows', '=', 'np', '.', 'arange', '(', 'N', ')', '[', 'badrows', ']', 'firstbad', '=', 'which_badrows', '[', '0', ']', 'raise', 'ParameterError', '(', "'Warning: Should have \\sum_k N_k W_nk = 1. Actual row sum for sample %d was %f. %d other rows have similar problems'", '%', '(', 'firstbad', ',', 'row_sums', '[', 'firstbad', ']', ',', 'np', '.', 'sum', '(', 'badrows', ')', ')', ')', 'return']
Check the weight matrix W is properly normalized. The sum over N should be 1, and the sum over k by N_k should aslo be 1 Parameters ---------- W : np.ndarray, shape=(N, K), dtype='float' The normalized weight matrix for snapshots and states. W[n, k] is the weight of snapshot n in state k. N_k : np.ndarray, shape=(K), dtype='int' N_k[k] is the number of samples from state k. tolerance : float, optional, default=1.0e-4 Tolerance for checking equality of sums Returns ------- None : NoneType Returns a None object if test passes, otherwise raises a ParameterError with appropriate message if W is not normalized within tolerance.
['Check', 'the', 'weight', 'matrix', 'W', 'is', 'properly', 'normalized', '.', 'The', 'sum', 'over', 'N', 'should', 'be', '1', 'and', 'the', 'sum', 'over', 'k', 'by', 'N_k', 'should', 'aslo', 'be', '1']
train
https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/utils.py#L332-L371
6,955
Microsoft/nni
src/sdk/pynni/nni/metis_tuner/metis_tuner.py
_rand_init
def _rand_init(x_bounds, x_types, selection_num_starting_points): ''' Random sample some init seed within bounds. ''' return [lib_data.rand(x_bounds, x_types) for i \ in range(0, selection_num_starting_points)]
python
def _rand_init(x_bounds, x_types, selection_num_starting_points): ''' Random sample some init seed within bounds. ''' return [lib_data.rand(x_bounds, x_types) for i \ in range(0, selection_num_starting_points)]
['def', '_rand_init', '(', 'x_bounds', ',', 'x_types', ',', 'selection_num_starting_points', ')', ':', 'return', '[', 'lib_data', '.', 'rand', '(', 'x_bounds', ',', 'x_types', ')', 'for', 'i', 'in', 'range', '(', '0', ',', 'selection_num_starting_points', ')', ']']
Random sample some init seed within bounds.
['Random', 'sample', 'some', 'init', 'seed', 'within', 'bounds', '.']
train
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/metis_tuner/metis_tuner.py#L493-L498
6,956
juju/python-libjuju
juju/model.py
Model._wait
async def _wait(self, entity_type, entity_id, action, predicate=None): """ Block the calling routine until a given action has happened to the given entity :param entity_type: The entity's type. :param entity_id: The entity's id. :param action: the type of action (e.g., 'add', 'change', or 'remove') :param predicate: optional callable that must take as an argument a delta, and must return a boolean, indicating whether the delta contains the specific action we're looking for. For example, you might check to see whether a 'change' has a 'completed' status. See the _Observer class for details. """ q = asyncio.Queue(loop=self._connector.loop) async def callback(delta, old, new, model): await q.put(delta.get_id()) self.add_observer(callback, entity_type, action, entity_id, predicate) entity_id = await q.get() # object might not be in the entity_map if we were waiting for a # 'remove' action return self.state._live_entity_map(entity_type).get(entity_id)
python
async def _wait(self, entity_type, entity_id, action, predicate=None): """ Block the calling routine until a given action has happened to the given entity :param entity_type: The entity's type. :param entity_id: The entity's id. :param action: the type of action (e.g., 'add', 'change', or 'remove') :param predicate: optional callable that must take as an argument a delta, and must return a boolean, indicating whether the delta contains the specific action we're looking for. For example, you might check to see whether a 'change' has a 'completed' status. See the _Observer class for details. """ q = asyncio.Queue(loop=self._connector.loop) async def callback(delta, old, new, model): await q.put(delta.get_id()) self.add_observer(callback, entity_type, action, entity_id, predicate) entity_id = await q.get() # object might not be in the entity_map if we were waiting for a # 'remove' action return self.state._live_entity_map(entity_type).get(entity_id)
['async', 'def', '_wait', '(', 'self', ',', 'entity_type', ',', 'entity_id', ',', 'action', ',', 'predicate', '=', 'None', ')', ':', 'q', '=', 'asyncio', '.', 'Queue', '(', 'loop', '=', 'self', '.', '_connector', '.', 'loop', ')', 'async', 'def', 'callback', '(', 'delta', ',', 'old', ',', 'new', ',', 'model', ')', ':', 'await', 'q', '.', 'put', '(', 'delta', '.', 'get_id', '(', ')', ')', 'self', '.', 'add_observer', '(', 'callback', ',', 'entity_type', ',', 'action', ',', 'entity_id', ',', 'predicate', ')', 'entity_id', '=', 'await', 'q', '.', 'get', '(', ')', '# object might not be in the entity_map if we were waiting for a', "# 'remove' action", 'return', 'self', '.', 'state', '.', '_live_entity_map', '(', 'entity_type', ')', '.', 'get', '(', 'entity_id', ')']
Block the calling routine until a given action has happened to the given entity :param entity_type: The entity's type. :param entity_id: The entity's id. :param action: the type of action (e.g., 'add', 'change', or 'remove') :param predicate: optional callable that must take as an argument a delta, and must return a boolean, indicating whether the delta contains the specific action we're looking for. For example, you might check to see whether a 'change' has a 'completed' status. See the _Observer class for details.
['Block', 'the', 'calling', 'routine', 'until', 'a', 'given', 'action', 'has', 'happened', 'to', 'the', 'given', 'entity']
train
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/model.py#L898-L922
6,957
theislab/anndata
anndata/h5py/h5sparse.py
_set_many
def _set_many(self, i, j, x): """Sets value at each (i, j) to x Here (i,j) index major and minor respectively, and must not contain duplicate entries. """ i, j, M, N = self._prepare_indices(i, j) n_samples = len(x) offsets = np.empty(n_samples, dtype=self.indices.dtype) ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, i, j, offsets) if ret == 1: # rinse and repeat self.sum_duplicates() _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, i, j, offsets) if -1 not in offsets: # make a list for interaction with h5py offsets = list(offsets) # only affects existing non-zero cells self.data[offsets] = x return else: # raise ValueError( # 'Currently, you cannot change the sparsity structure of a SparseDataset.') # replace where possible mask = offsets > -1 self.data[offsets[mask]] = x[mask] # only insertions remain mask = ~mask i = i[mask] i[i < 0] += M j = j[mask] j[j < 0] += N self._insert_many(i, j, x[mask])
python
def _set_many(self, i, j, x): """Sets value at each (i, j) to x Here (i,j) index major and minor respectively, and must not contain duplicate entries. """ i, j, M, N = self._prepare_indices(i, j) n_samples = len(x) offsets = np.empty(n_samples, dtype=self.indices.dtype) ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, i, j, offsets) if ret == 1: # rinse and repeat self.sum_duplicates() _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, i, j, offsets) if -1 not in offsets: # make a list for interaction with h5py offsets = list(offsets) # only affects existing non-zero cells self.data[offsets] = x return else: # raise ValueError( # 'Currently, you cannot change the sparsity structure of a SparseDataset.') # replace where possible mask = offsets > -1 self.data[offsets[mask]] = x[mask] # only insertions remain mask = ~mask i = i[mask] i[i < 0] += M j = j[mask] j[j < 0] += N self._insert_many(i, j, x[mask])
['def', '_set_many', '(', 'self', ',', 'i', ',', 'j', ',', 'x', ')', ':', 'i', ',', 'j', ',', 'M', ',', 'N', '=', 'self', '.', '_prepare_indices', '(', 'i', ',', 'j', ')', 'n_samples', '=', 'len', '(', 'x', ')', 'offsets', '=', 'np', '.', 'empty', '(', 'n_samples', ',', 'dtype', '=', 'self', '.', 'indices', '.', 'dtype', ')', 'ret', '=', '_sparsetools', '.', 'csr_sample_offsets', '(', 'M', ',', 'N', ',', 'self', '.', 'indptr', ',', 'self', '.', 'indices', ',', 'n_samples', ',', 'i', ',', 'j', ',', 'offsets', ')', 'if', 'ret', '==', '1', ':', '# rinse and repeat', 'self', '.', 'sum_duplicates', '(', ')', '_sparsetools', '.', 'csr_sample_offsets', '(', 'M', ',', 'N', ',', 'self', '.', 'indptr', ',', 'self', '.', 'indices', ',', 'n_samples', ',', 'i', ',', 'j', ',', 'offsets', ')', 'if', '-', '1', 'not', 'in', 'offsets', ':', '# make a list for interaction with h5py', 'offsets', '=', 'list', '(', 'offsets', ')', '# only affects existing non-zero cells', 'self', '.', 'data', '[', 'offsets', ']', '=', 'x', 'return', 'else', ':', '# raise ValueError(', "# 'Currently, you cannot change the sparsity structure of a SparseDataset.')", '# replace where possible', 'mask', '=', 'offsets', '>', '-', '1', 'self', '.', 'data', '[', 'offsets', '[', 'mask', ']', ']', '=', 'x', '[', 'mask', ']', '# only insertions remain', 'mask', '=', '~', 'mask', 'i', '=', 'i', '[', 'mask', ']', 'i', '[', 'i', '<', '0', ']', '+=', 'M', 'j', '=', 'j', '[', 'mask', ']', 'j', '[', 'j', '<', '0', ']', '+=', 'N', 'self', '.', '_insert_many', '(', 'i', ',', 'j', ',', 'x', '[', 'mask', ']', ')']
Sets value at each (i, j) to x Here (i,j) index major and minor respectively, and must not contain duplicate entries.
['Sets', 'value', 'at', 'each', '(', 'i', 'j', ')', 'to', 'x']
train
https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/h5py/h5sparse.py#L170-L208
6,958
radical-cybertools/radical.entk
src/radical/entk/stage/stage.py
Stage._validate_entities
def _validate_entities(self, tasks): """ Purpose: Validate whether the 'tasks' is of type set. Validate the description of each Task. """ if not tasks: raise TypeError(expected_type=Task, actual_type=type(tasks)) if not isinstance(tasks, set): if not isinstance(tasks, list): tasks = set([tasks]) else: tasks = set(tasks) for t in tasks: if not isinstance(t, Task): raise TypeError(expected_type=Task, actual_type=type(t)) return tasks
python
def _validate_entities(self, tasks): """ Purpose: Validate whether the 'tasks' is of type set. Validate the description of each Task. """ if not tasks: raise TypeError(expected_type=Task, actual_type=type(tasks)) if not isinstance(tasks, set): if not isinstance(tasks, list): tasks = set([tasks]) else: tasks = set(tasks) for t in tasks: if not isinstance(t, Task): raise TypeError(expected_type=Task, actual_type=type(t)) return tasks
['def', '_validate_entities', '(', 'self', ',', 'tasks', ')', ':', 'if', 'not', 'tasks', ':', 'raise', 'TypeError', '(', 'expected_type', '=', 'Task', ',', 'actual_type', '=', 'type', '(', 'tasks', ')', ')', 'if', 'not', 'isinstance', '(', 'tasks', ',', 'set', ')', ':', 'if', 'not', 'isinstance', '(', 'tasks', ',', 'list', ')', ':', 'tasks', '=', 'set', '(', '[', 'tasks', ']', ')', 'else', ':', 'tasks', '=', 'set', '(', 'tasks', ')', 'for', 't', 'in', 'tasks', ':', 'if', 'not', 'isinstance', '(', 't', ',', 'Task', ')', ':', 'raise', 'TypeError', '(', 'expected_type', '=', 'Task', ',', 'actual_type', '=', 'type', '(', 't', ')', ')', 'return', 'tasks']
Purpose: Validate whether the 'tasks' is of type set. Validate the description of each Task.
['Purpose', ':', 'Validate', 'whether', 'the', 'tasks', 'is', 'of', 'type', 'set', '.', 'Validate', 'the', 'description', 'of', 'each', 'Task', '.']
train
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/stage/stage.py#L308-L328
6,959
boriel/zxbasic
zxbpplex.py
Lexer.t_singlecomment_NEWLINE
def t_singlecomment_NEWLINE(self, t): r'\r?\n' t.lexer.pop_state() # Back to initial t.lexer.lineno += 1 return t
python
def t_singlecomment_NEWLINE(self, t): r'\r?\n' t.lexer.pop_state() # Back to initial t.lexer.lineno += 1 return t
['def', 't_singlecomment_NEWLINE', '(', 'self', ',', 't', ')', ':', 't', '.', 'lexer', '.', 'pop_state', '(', ')', '# Back to initial', 't', '.', 'lexer', '.', 'lineno', '+=', '1', 'return', 't']
r'\r?\n
['r', '\\', 'r?', '\\', 'n']
train
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbpplex.py#L152-L156
6,960
infothrill/python-dyndnsc
dyndnsc/detector/rand.py
RandomIPGenerator.random_public_ip
def random_public_ip(self): """Return a randomly generated, public IPv4 address. :return: ip address """ randomip = random_ip() while self.is_reserved_ip(randomip): randomip = random_ip() return randomip
python
def random_public_ip(self): """Return a randomly generated, public IPv4 address. :return: ip address """ randomip = random_ip() while self.is_reserved_ip(randomip): randomip = random_ip() return randomip
['def', 'random_public_ip', '(', 'self', ')', ':', 'randomip', '=', 'random_ip', '(', ')', 'while', 'self', '.', 'is_reserved_ip', '(', 'randomip', ')', ':', 'randomip', '=', 'random_ip', '(', ')', 'return', 'randomip']
Return a randomly generated, public IPv4 address. :return: ip address
['Return', 'a', 'randomly', 'generated', 'public', 'IPv4', 'address', '.']
train
https://github.com/infothrill/python-dyndnsc/blob/2196d48aa6098da9835a7611fbdb0b5f0fbf51e4/dyndnsc/detector/rand.py#L64-L72
6,961
fermiPy/fermipy
fermipy/jobs/file_archive.py
FileArchive.get_file_paths
def get_file_paths(self, id_list): """Get a list of file paths based of a set of ids Parameters ---------- id_list : list List of integer file keys Returns list of file paths """ if id_list is None: return [] try: path_array = self._table[id_list - 1]['path'] except IndexError: print("IndexError ", len(self._table), id_list) path_array = [] return [path for path in path_array]
python
def get_file_paths(self, id_list): """Get a list of file paths based of a set of ids Parameters ---------- id_list : list List of integer file keys Returns list of file paths """ if id_list is None: return [] try: path_array = self._table[id_list - 1]['path'] except IndexError: print("IndexError ", len(self._table), id_list) path_array = [] return [path for path in path_array]
['def', 'get_file_paths', '(', 'self', ',', 'id_list', ')', ':', 'if', 'id_list', 'is', 'None', ':', 'return', '[', ']', 'try', ':', 'path_array', '=', 'self', '.', '_table', '[', 'id_list', '-', '1', ']', '[', "'path'", ']', 'except', 'IndexError', ':', 'print', '(', '"IndexError "', ',', 'len', '(', 'self', '.', '_table', ')', ',', 'id_list', ')', 'path_array', '=', '[', ']', 'return', '[', 'path', 'for', 'path', 'in', 'path_array', ']']
Get a list of file paths based of a set of ids Parameters ---------- id_list : list List of integer file keys Returns list of file paths
['Get', 'a', 'list', 'of', 'file', 'paths', 'based', 'of', 'a', 'set', 'of', 'ids']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/file_archive.py#L746-L764
6,962
mitsei/dlkit
dlkit/json_/assessment_authoring/managers.py
AssessmentAuthoringManager.get_sequence_rule_admin_session_for_bank
def get_sequence_rule_admin_session_for_bank(self, bank_id): """Gets the ``OsidSession`` associated with the sequence rule administration service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` return: (osid.assessment.authoring.SequenceRuleAdminSession) - a ``SequenceRuleAdminSession`` raise: NotFound - no ``Bank`` found by the given ``Id`` raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_sequence_rule_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_sequence_rule_admin()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_sequence_rule_admin(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.SequenceRuleAdminSession(bank_id, runtime=self._runtime)
python
def get_sequence_rule_admin_session_for_bank(self, bank_id): """Gets the ``OsidSession`` associated with the sequence rule administration service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` return: (osid.assessment.authoring.SequenceRuleAdminSession) - a ``SequenceRuleAdminSession`` raise: NotFound - no ``Bank`` found by the given ``Id`` raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_sequence_rule_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_sequence_rule_admin()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_sequence_rule_admin(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.SequenceRuleAdminSession(bank_id, runtime=self._runtime)
['def', 'get_sequence_rule_admin_session_for_bank', '(', 'self', ',', 'bank_id', ')', ':', 'if', 'not', 'self', '.', 'supports_sequence_rule_admin', '(', ')', ':', 'raise', 'errors', '.', 'Unimplemented', '(', ')', '##', '# Also include check to see if the catalog Id is found otherwise raise errors.NotFound', '##', '# pylint: disable=no-member', 'return', 'sessions', '.', 'SequenceRuleAdminSession', '(', 'bank_id', ',', 'runtime', '=', 'self', '.', '_runtime', ')']
Gets the ``OsidSession`` associated with the sequence rule administration service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` return: (osid.assessment.authoring.SequenceRuleAdminSession) - a ``SequenceRuleAdminSession`` raise: NotFound - no ``Bank`` found by the given ``Id`` raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_sequence_rule_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_sequence_rule_admin()`` and ``supports_visible_federation()`` are ``true``.*
['Gets', 'the', 'OsidSession', 'associated', 'with', 'the', 'sequence', 'rule', 'administration', 'service', 'for', 'the', 'given', 'bank', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment_authoring/managers.py#L560-L582
6,963
camptocamp/Studio
studio/config/environment.py
load_environment
def load_environment(global_conf, app_conf): """Configure the Pylons environment via the ``pylons.config`` object """ # Pylons paths root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) paths = dict(root=root, controllers=os.path.join(root, 'controllers'), static_files=os.path.join(root, 'public'), templates=[os.path.join(root, 'templates')]) # Initialize config with the basic options config.init_app(global_conf, app_conf, package='studio', paths=paths) # Defines custom config parameters config['resources_dir'] = os.path.join(root, 'resources') # path to mapserver dir containing default fonts and symbols config['mapserver_dir'] = os.path.join(config['resources_dir'], 'mapserver') # path to default directory datastore config['default_datastore_dir'] = os.path.join(config['resources_dir'], 'default_datastore') # path to the template including the <script> tags config['js_tmpl'] = os.path.join(paths['templates'][0], 'index.html') # Convert the debug variable from the config to a boolean value config['debug'] = asbool(config['debug']) config['routes.map'] = make_map() config['pylons.app_globals'] = app_globals.Globals() config['pylons.h'] = studio.lib.helpers # Create the Mako TemplateLookup, with the default auto-escaping config['pylons.app_globals'].mako_lookup = TemplateLookup( directories=paths['templates'], error_handler=handle_mako_error, module_directory=os.path.join(app_conf['cache_dir'], 'templates'), input_encoding='utf-8', output_encoding='utf-8', imports=['from webhelpers.html import escape'], default_filters=['escape']) # Setup SQLAlchemy database engine engine = engine_from_config(config, 'sqlalchemy.') init_model(engine)
python
def load_environment(global_conf, app_conf): """Configure the Pylons environment via the ``pylons.config`` object """ # Pylons paths root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) paths = dict(root=root, controllers=os.path.join(root, 'controllers'), static_files=os.path.join(root, 'public'), templates=[os.path.join(root, 'templates')]) # Initialize config with the basic options config.init_app(global_conf, app_conf, package='studio', paths=paths) # Defines custom config parameters config['resources_dir'] = os.path.join(root, 'resources') # path to mapserver dir containing default fonts and symbols config['mapserver_dir'] = os.path.join(config['resources_dir'], 'mapserver') # path to default directory datastore config['default_datastore_dir'] = os.path.join(config['resources_dir'], 'default_datastore') # path to the template including the <script> tags config['js_tmpl'] = os.path.join(paths['templates'][0], 'index.html') # Convert the debug variable from the config to a boolean value config['debug'] = asbool(config['debug']) config['routes.map'] = make_map() config['pylons.app_globals'] = app_globals.Globals() config['pylons.h'] = studio.lib.helpers # Create the Mako TemplateLookup, with the default auto-escaping config['pylons.app_globals'].mako_lookup = TemplateLookup( directories=paths['templates'], error_handler=handle_mako_error, module_directory=os.path.join(app_conf['cache_dir'], 'templates'), input_encoding='utf-8', output_encoding='utf-8', imports=['from webhelpers.html import escape'], default_filters=['escape']) # Setup SQLAlchemy database engine engine = engine_from_config(config, 'sqlalchemy.') init_model(engine)
['def', 'load_environment', '(', 'global_conf', ',', 'app_conf', ')', ':', '# Pylons paths', 'root', '=', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'abspath', '(', '__file__', ')', ')', ')', 'paths', '=', 'dict', '(', 'root', '=', 'root', ',', 'controllers', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', "'controllers'", ')', ',', 'static_files', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', "'public'", ')', ',', 'templates', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'root', ',', "'templates'", ')', ']', ')', '# Initialize config with the basic options', 'config', '.', 'init_app', '(', 'global_conf', ',', 'app_conf', ',', 'package', '=', "'studio'", ',', 'paths', '=', 'paths', ')', '# Defines custom config parameters', 'config', '[', "'resources_dir'", ']', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', "'resources'", ')', '# path to mapserver dir containing default fonts and symbols', 'config', '[', "'mapserver_dir'", ']', '=', 'os', '.', 'path', '.', 'join', '(', 'config', '[', "'resources_dir'", ']', ',', "'mapserver'", ')', '# path to default directory datastore', 'config', '[', "'default_datastore_dir'", ']', '=', 'os', '.', 'path', '.', 'join', '(', 'config', '[', "'resources_dir'", ']', ',', "'default_datastore'", ')', '# path to the template including the <script> tags', 'config', '[', "'js_tmpl'", ']', '=', 'os', '.', 'path', '.', 'join', '(', 'paths', '[', "'templates'", ']', '[', '0', ']', ',', "'index.html'", ')', '# Convert the debug variable from the config to a boolean value', 'config', '[', "'debug'", ']', '=', 'asbool', '(', 'config', '[', "'debug'", ']', ')', 'config', '[', "'routes.map'", ']', '=', 'make_map', '(', ')', 'config', '[', "'pylons.app_globals'", ']', '=', 'app_globals', '.', 'Globals', '(', ')', 'config', '[', "'pylons.h'", ']', '=', 'studio', '.', 'lib', '.', 'helpers', '# Create the Mako TemplateLookup, with the default auto-escaping', 'config', '[', "'pylons.app_globals'", ']', '.', 'mako_lookup', '=', 'TemplateLookup', '(', 'directories', '=', 'paths', '[', "'templates'", ']', ',', 'error_handler', '=', 'handle_mako_error', ',', 'module_directory', '=', 'os', '.', 'path', '.', 'join', '(', 'app_conf', '[', "'cache_dir'", ']', ',', "'templates'", ')', ',', 'input_encoding', '=', "'utf-8'", ',', 'output_encoding', '=', "'utf-8'", ',', 'imports', '=', '[', "'from webhelpers.html import escape'", ']', ',', 'default_filters', '=', '[', "'escape'", ']', ')', '# Setup SQLAlchemy database engine', 'engine', '=', 'engine_from_config', '(', 'config', ',', "'sqlalchemy.'", ')', 'init_model', '(', 'engine', ')']
Configure the Pylons environment via the ``pylons.config`` object
['Configure', 'the', 'Pylons', 'environment', 'via', 'the', 'pylons', '.', 'config', 'object']
train
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/config/environment.py#L34-L75
6,964
jilljenn/tryalgo
tryalgo/windows_k_distinct.py
windows_k_distinct
def windows_k_distinct(x, k): """Find all largest windows containing exactly k distinct elements :param x: list or string :param k: positive integer :yields: largest intervals [i, j) with len(set(x[i:j])) == k :complexity: `O(|x|)` """ dist, i, j = 0, 0, 0 # dist = |{x[i], ..., x[j-1]}| occ = {xi: 0 for xi in x} # number of occurrences in x[i:j] while j < len(x): while dist == k: # move start of interval occ[x[i]] -= 1 # update counters if occ[x[i]] == 0: dist -= 1 i += 1 while j < len(x) and (dist < k or occ[x[j]]): if occ[x[j]] == 0: # update counters dist += 1 occ[x[j]] += 1 j += 1 # move end of interval if dist == k: yield (i, j)
python
def windows_k_distinct(x, k): """Find all largest windows containing exactly k distinct elements :param x: list or string :param k: positive integer :yields: largest intervals [i, j) with len(set(x[i:j])) == k :complexity: `O(|x|)` """ dist, i, j = 0, 0, 0 # dist = |{x[i], ..., x[j-1]}| occ = {xi: 0 for xi in x} # number of occurrences in x[i:j] while j < len(x): while dist == k: # move start of interval occ[x[i]] -= 1 # update counters if occ[x[i]] == 0: dist -= 1 i += 1 while j < len(x) and (dist < k or occ[x[j]]): if occ[x[j]] == 0: # update counters dist += 1 occ[x[j]] += 1 j += 1 # move end of interval if dist == k: yield (i, j)
['def', 'windows_k_distinct', '(', 'x', ',', 'k', ')', ':', 'dist', ',', 'i', ',', 'j', '=', '0', ',', '0', ',', '0', '# dist = |{x[i], ..., x[j-1]}|', 'occ', '=', '{', 'xi', ':', '0', 'for', 'xi', 'in', 'x', '}', '# number of occurrences in x[i:j]', 'while', 'j', '<', 'len', '(', 'x', ')', ':', 'while', 'dist', '==', 'k', ':', '# move start of interval', 'occ', '[', 'x', '[', 'i', ']', ']', '-=', '1', '# update counters', 'if', 'occ', '[', 'x', '[', 'i', ']', ']', '==', '0', ':', 'dist', '-=', '1', 'i', '+=', '1', 'while', 'j', '<', 'len', '(', 'x', ')', 'and', '(', 'dist', '<', 'k', 'or', 'occ', '[', 'x', '[', 'j', ']', ']', ')', ':', 'if', 'occ', '[', 'x', '[', 'j', ']', ']', '==', '0', ':', '# update counters', 'dist', '+=', '1', 'occ', '[', 'x', '[', 'j', ']', ']', '+=', '1', 'j', '+=', '1', '# move end of interval', 'if', 'dist', '==', 'k', ':', 'yield', '(', 'i', ',', 'j', ')']
Find all largest windows containing exactly k distinct elements :param x: list or string :param k: positive integer :yields: largest intervals [i, j) with len(set(x[i:j])) == k :complexity: `O(|x|)`
['Find', 'all', 'largest', 'windows', 'containing', 'exactly', 'k', 'distinct', 'elements']
train
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/windows_k_distinct.py#L8-L30
6,965
SwoopSearch/pyaddress
address/dstk.py
post_multipart
def post_multipart(host, selector, fields, files): """ Post fields and files to an http host as multipart/form-data. fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return the server's response page. """ content_type, body = encode_multipart_formdata(fields, files) h = httplib.HTTP(host) h.putrequest('POST', selector) h.putheader('content-type', content_type) h.putheader('content-length', str(len(body))) h.endheaders() h.send(body) errcode, errmsg, headers = h.getreply() return h.file.read()
python
def post_multipart(host, selector, fields, files): """ Post fields and files to an http host as multipart/form-data. fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return the server's response page. """ content_type, body = encode_multipart_formdata(fields, files) h = httplib.HTTP(host) h.putrequest('POST', selector) h.putheader('content-type', content_type) h.putheader('content-length', str(len(body))) h.endheaders() h.send(body) errcode, errmsg, headers = h.getreply() return h.file.read()
['def', 'post_multipart', '(', 'host', ',', 'selector', ',', 'fields', ',', 'files', ')', ':', 'content_type', ',', 'body', '=', 'encode_multipart_formdata', '(', 'fields', ',', 'files', ')', 'h', '=', 'httplib', '.', 'HTTP', '(', 'host', ')', 'h', '.', 'putrequest', '(', "'POST'", ',', 'selector', ')', 'h', '.', 'putheader', '(', "'content-type'", ',', 'content_type', ')', 'h', '.', 'putheader', '(', "'content-length'", ',', 'str', '(', 'len', '(', 'body', ')', ')', ')', 'h', '.', 'endheaders', '(', ')', 'h', '.', 'send', '(', 'body', ')', 'errcode', ',', 'errmsg', ',', 'headers', '=', 'h', '.', 'getreply', '(', ')', 'return', 'h', '.', 'file', '.', 'read', '(', ')']
Post fields and files to an http host as multipart/form-data. fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return the server's response page.
['Post', 'fields', 'and', 'files', 'to', 'an', 'http', 'host', 'as', 'multipart', '/', 'form', '-', 'data', '.', 'fields', 'is', 'a', 'sequence', 'of', '(', 'name', 'value', ')', 'elements', 'for', 'regular', 'form', 'fields', '.', 'files', 'is', 'a', 'sequence', 'of', '(', 'name', 'filename', 'value', ')', 'elements', 'for', 'data', 'to', 'be', 'uploaded', 'as', 'files', 'Return', 'the', 'server', 's', 'response', 'page', '.']
train
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/dstk.py#L208-L223
6,966
Komnomnomnom/swigibpy
swigibpy.py
EWrapper.tickEFP
def tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureExpiry, dividendImpact, dividendsToExpiry): """tickEFP(EWrapper self, TickerId tickerId, TickType tickType, double basisPoints, IBString const & formattedBasisPoints, double totalDividends, int holdDays, IBString const & futureExpiry, double dividendImpact, double dividendsToExpiry)""" return _swigibpy.EWrapper_tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureExpiry, dividendImpact, dividendsToExpiry)
python
def tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureExpiry, dividendImpact, dividendsToExpiry): """tickEFP(EWrapper self, TickerId tickerId, TickType tickType, double basisPoints, IBString const & formattedBasisPoints, double totalDividends, int holdDays, IBString const & futureExpiry, double dividendImpact, double dividendsToExpiry)""" return _swigibpy.EWrapper_tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureExpiry, dividendImpact, dividendsToExpiry)
['def', 'tickEFP', '(', 'self', ',', 'tickerId', ',', 'tickType', ',', 'basisPoints', ',', 'formattedBasisPoints', ',', 'totalDividends', ',', 'holdDays', ',', 'futureExpiry', ',', 'dividendImpact', ',', 'dividendsToExpiry', ')', ':', 'return', '_swigibpy', '.', 'EWrapper_tickEFP', '(', 'self', ',', 'tickerId', ',', 'tickType', ',', 'basisPoints', ',', 'formattedBasisPoints', ',', 'totalDividends', ',', 'holdDays', ',', 'futureExpiry', ',', 'dividendImpact', ',', 'dividendsToExpiry', ')']
tickEFP(EWrapper self, TickerId tickerId, TickType tickType, double basisPoints, IBString const & formattedBasisPoints, double totalDividends, int holdDays, IBString const & futureExpiry, double dividendImpact, double dividendsToExpiry)
['tickEFP', '(', 'EWrapper', 'self', 'TickerId', 'tickerId', 'TickType', 'tickType', 'double', 'basisPoints', 'IBString', 'const', '&', 'formattedBasisPoints', 'double', 'totalDividends', 'int', 'holdDays', 'IBString', 'const', '&', 'futureExpiry', 'double', 'dividendImpact', 'double', 'dividendsToExpiry', ')']
train
https://github.com/Komnomnomnom/swigibpy/blob/cfd307fdbfaffabc69a2dc037538d7e34a8b8daf/swigibpy.py#L2446-L2448
6,967
eyurtsev/FlowCytometryTools
FlowCytometryTools/core/transforms.py
transform_frame
def transform_frame(frame, transform, columns=None, direction='forward', return_all=True, args=(), **kwargs): """ Apply transform to specified columns. direction: 'forward' | 'inverse' return_all: bool True - return all columns, with specified ones transformed. False - return only specified columns. .. warning:: deprecated """ tfun, tname = parse_transform(transform, direction) columns = to_list(columns) if columns is None: columns = frame.columns if return_all: transformed = frame.copy() for c in columns: transformed[c] = tfun(frame[c], *args, **kwargs) else: transformed = frame.filter(columns).apply(tfun, *args, **kwargs) return transformed
python
def transform_frame(frame, transform, columns=None, direction='forward', return_all=True, args=(), **kwargs): """ Apply transform to specified columns. direction: 'forward' | 'inverse' return_all: bool True - return all columns, with specified ones transformed. False - return only specified columns. .. warning:: deprecated """ tfun, tname = parse_transform(transform, direction) columns = to_list(columns) if columns is None: columns = frame.columns if return_all: transformed = frame.copy() for c in columns: transformed[c] = tfun(frame[c], *args, **kwargs) else: transformed = frame.filter(columns).apply(tfun, *args, **kwargs) return transformed
['def', 'transform_frame', '(', 'frame', ',', 'transform', ',', 'columns', '=', 'None', ',', 'direction', '=', "'forward'", ',', 'return_all', '=', 'True', ',', 'args', '=', '(', ')', ',', '*', '*', 'kwargs', ')', ':', 'tfun', ',', 'tname', '=', 'parse_transform', '(', 'transform', ',', 'direction', ')', 'columns', '=', 'to_list', '(', 'columns', ')', 'if', 'columns', 'is', 'None', ':', 'columns', '=', 'frame', '.', 'columns', 'if', 'return_all', ':', 'transformed', '=', 'frame', '.', 'copy', '(', ')', 'for', 'c', 'in', 'columns', ':', 'transformed', '[', 'c', ']', '=', 'tfun', '(', 'frame', '[', 'c', ']', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'else', ':', 'transformed', '=', 'frame', '.', 'filter', '(', 'columns', ')', '.', 'apply', '(', 'tfun', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'transformed']
Apply transform to specified columns. direction: 'forward' | 'inverse' return_all: bool True - return all columns, with specified ones transformed. False - return only specified columns. .. warning:: deprecated
['Apply', 'transform', 'to', 'specified', 'columns', '.']
train
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/transforms.py#L303-L325
6,968
ethereum/py-evm
eth/vm/base.py
VM.validate_header
def validate_header(cls, header: BlockHeader, parent_header: BlockHeader, check_seal: bool = True) -> None: """ :raise eth.exceptions.ValidationError: if the header is not valid """ if parent_header is None: # to validate genesis header, check if it equals canonical header at block number 0 raise ValidationError("Must have access to parent header to validate current header") else: validate_length_lte(header.extra_data, 32, title="BlockHeader.extra_data") validate_gas_limit(header.gas_limit, parent_header.gas_limit) if header.block_number != parent_header.block_number + 1: raise ValidationError( "Blocks must be numbered consecutively. Block number #{} has parent #{}".format( header.block_number, parent_header.block_number, ) ) # timestamp if header.timestamp <= parent_header.timestamp: raise ValidationError( "timestamp must be strictly later than parent, but is {} seconds before.\n" "- child : {}\n" "- parent : {}. ".format( parent_header.timestamp - header.timestamp, header.timestamp, parent_header.timestamp, ) ) if check_seal: cls.validate_seal(header)
python
def validate_header(cls, header: BlockHeader, parent_header: BlockHeader, check_seal: bool = True) -> None: """ :raise eth.exceptions.ValidationError: if the header is not valid """ if parent_header is None: # to validate genesis header, check if it equals canonical header at block number 0 raise ValidationError("Must have access to parent header to validate current header") else: validate_length_lte(header.extra_data, 32, title="BlockHeader.extra_data") validate_gas_limit(header.gas_limit, parent_header.gas_limit) if header.block_number != parent_header.block_number + 1: raise ValidationError( "Blocks must be numbered consecutively. Block number #{} has parent #{}".format( header.block_number, parent_header.block_number, ) ) # timestamp if header.timestamp <= parent_header.timestamp: raise ValidationError( "timestamp must be strictly later than parent, but is {} seconds before.\n" "- child : {}\n" "- parent : {}. ".format( parent_header.timestamp - header.timestamp, header.timestamp, parent_header.timestamp, ) ) if check_seal: cls.validate_seal(header)
['def', 'validate_header', '(', 'cls', ',', 'header', ':', 'BlockHeader', ',', 'parent_header', ':', 'BlockHeader', ',', 'check_seal', ':', 'bool', '=', 'True', ')', '->', 'None', ':', 'if', 'parent_header', 'is', 'None', ':', '# to validate genesis header, check if it equals canonical header at block number 0', 'raise', 'ValidationError', '(', '"Must have access to parent header to validate current header"', ')', 'else', ':', 'validate_length_lte', '(', 'header', '.', 'extra_data', ',', '32', ',', 'title', '=', '"BlockHeader.extra_data"', ')', 'validate_gas_limit', '(', 'header', '.', 'gas_limit', ',', 'parent_header', '.', 'gas_limit', ')', 'if', 'header', '.', 'block_number', '!=', 'parent_header', '.', 'block_number', '+', '1', ':', 'raise', 'ValidationError', '(', '"Blocks must be numbered consecutively. Block number #{} has parent #{}"', '.', 'format', '(', 'header', '.', 'block_number', ',', 'parent_header', '.', 'block_number', ',', ')', ')', '# timestamp', 'if', 'header', '.', 'timestamp', '<=', 'parent_header', '.', 'timestamp', ':', 'raise', 'ValidationError', '(', '"timestamp must be strictly later than parent, but is {} seconds before.\\n"', '"- child : {}\\n"', '"- parent : {}. "', '.', 'format', '(', 'parent_header', '.', 'timestamp', '-', 'header', '.', 'timestamp', ',', 'header', '.', 'timestamp', ',', 'parent_header', '.', 'timestamp', ',', ')', ')', 'if', 'check_seal', ':', 'cls', '.', 'validate_seal', '(', 'header', ')']
:raise eth.exceptions.ValidationError: if the header is not valid
[':', 'raise', 'eth', '.', 'exceptions', '.', 'ValidationError', ':', 'if', 'the', 'header', 'is', 'not', 'valid']
train
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L879-L915
6,969
seequent/properties
properties/base/instance.py
Instance.assert_valid
def assert_valid(self, instance, value=None): """Checks if valid, including HasProperty instances pass validation""" valid = super(Instance, self).assert_valid(instance, value) if not valid: return False if value is None: value = instance._get(self.name) if isinstance(value, HasProperties): value.validate() return True
python
def assert_valid(self, instance, value=None): """Checks if valid, including HasProperty instances pass validation""" valid = super(Instance, self).assert_valid(instance, value) if not valid: return False if value is None: value = instance._get(self.name) if isinstance(value, HasProperties): value.validate() return True
['def', 'assert_valid', '(', 'self', ',', 'instance', ',', 'value', '=', 'None', ')', ':', 'valid', '=', 'super', '(', 'Instance', ',', 'self', ')', '.', 'assert_valid', '(', 'instance', ',', 'value', ')', 'if', 'not', 'valid', ':', 'return', 'False', 'if', 'value', 'is', 'None', ':', 'value', '=', 'instance', '.', '_get', '(', 'self', '.', 'name', ')', 'if', 'isinstance', '(', 'value', ',', 'HasProperties', ')', ':', 'value', '.', 'validate', '(', ')', 'return', 'True']
Checks if valid, including HasProperty instances pass validation
['Checks', 'if', 'valid', 'including', 'HasProperty', 'instances', 'pass', 'validation']
train
https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/base/instance.py#L113-L122
6,970
jssimporter/python-jss
jss/jamf_software_server.py
JSS.from_pickle
def from_pickle(cls, path): """Load all objects from pickle file and return as dict. The dict returned will have keys named the same as the JSSObject classes contained, and the values will be JSSObjectLists of all full objects of that class (for example, the equivalent of my_jss.Computer().retrieve_all()). This method can potentially take a very long time! Pickling is Python's method for serializing/deserializing Python objects. This allows you to save a fully functional JSSObject to disk, and then load it later, without having to retrieve it from the JSS. Args: path: String file path to the file you wish to load from. Path will have ~ expanded prior to opening. """ with open(os.path.expanduser(path), "rb") as pickle: return cPickle.Unpickler(pickle).load()
python
def from_pickle(cls, path): """Load all objects from pickle file and return as dict. The dict returned will have keys named the same as the JSSObject classes contained, and the values will be JSSObjectLists of all full objects of that class (for example, the equivalent of my_jss.Computer().retrieve_all()). This method can potentially take a very long time! Pickling is Python's method for serializing/deserializing Python objects. This allows you to save a fully functional JSSObject to disk, and then load it later, without having to retrieve it from the JSS. Args: path: String file path to the file you wish to load from. Path will have ~ expanded prior to opening. """ with open(os.path.expanduser(path), "rb") as pickle: return cPickle.Unpickler(pickle).load()
['def', 'from_pickle', '(', 'cls', ',', 'path', ')', ':', 'with', 'open', '(', 'os', '.', 'path', '.', 'expanduser', '(', 'path', ')', ',', '"rb"', ')', 'as', 'pickle', ':', 'return', 'cPickle', '.', 'Unpickler', '(', 'pickle', ')', '.', 'load', '(', ')']
Load all objects from pickle file and return as dict. The dict returned will have keys named the same as the JSSObject classes contained, and the values will be JSSObjectLists of all full objects of that class (for example, the equivalent of my_jss.Computer().retrieve_all()). This method can potentially take a very long time! Pickling is Python's method for serializing/deserializing Python objects. This allows you to save a fully functional JSSObject to disk, and then load it later, without having to retrieve it from the JSS. Args: path: String file path to the file you wish to load from. Path will have ~ expanded prior to opening.
['Load', 'all', 'objects', 'from', 'pickle', 'file', 'and', 'return', 'as', 'dict', '.']
train
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jamf_software_server.py#L393-L413
6,971
rodluger/everest
everest/missions/k2/pipelines.py
plot
def plot(ID, pipeline='everest2', show=True, campaign=None): ''' Plots the de-trended flux for the given EPIC `ID` and for the specified `pipeline`. ''' # Get the data time, flux = get(ID, pipeline=pipeline, campaign=campaign) # Remove nans mask = np.where(np.isnan(flux))[0] time = np.delete(time, mask) flux = np.delete(flux, mask) # Plot it fig, ax = pl.subplots(1, figsize=(10, 4)) fig.subplots_adjust(bottom=0.15) ax.plot(time, flux, "k.", markersize=3, alpha=0.5) # Axis limits N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) # Show the CDPP from .k2 import CDPP ax.annotate('%.2f ppm' % CDPP(flux), xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=12, color='r', zorder=99) # Appearance ax.margins(0, None) ax.set_xlabel("Time (BJD - 2454833)", fontsize=16) ax.set_ylabel("%s Flux" % pipeline.upper(), fontsize=16) fig.canvas.set_window_title("%s: EPIC %d" % (pipeline.upper(), ID)) if show: pl.show() pl.close() else: return fig, ax
python
def plot(ID, pipeline='everest2', show=True, campaign=None): ''' Plots the de-trended flux for the given EPIC `ID` and for the specified `pipeline`. ''' # Get the data time, flux = get(ID, pipeline=pipeline, campaign=campaign) # Remove nans mask = np.where(np.isnan(flux))[0] time = np.delete(time, mask) flux = np.delete(flux, mask) # Plot it fig, ax = pl.subplots(1, figsize=(10, 4)) fig.subplots_adjust(bottom=0.15) ax.plot(time, flux, "k.", markersize=3, alpha=0.5) # Axis limits N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) # Show the CDPP from .k2 import CDPP ax.annotate('%.2f ppm' % CDPP(flux), xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=12, color='r', zorder=99) # Appearance ax.margins(0, None) ax.set_xlabel("Time (BJD - 2454833)", fontsize=16) ax.set_ylabel("%s Flux" % pipeline.upper(), fontsize=16) fig.canvas.set_window_title("%s: EPIC %d" % (pipeline.upper(), ID)) if show: pl.show() pl.close() else: return fig, ax
['def', 'plot', '(', 'ID', ',', 'pipeline', '=', "'everest2'", ',', 'show', '=', 'True', ',', 'campaign', '=', 'None', ')', ':', '# Get the data', 'time', ',', 'flux', '=', 'get', '(', 'ID', ',', 'pipeline', '=', 'pipeline', ',', 'campaign', '=', 'campaign', ')', '# Remove nans', 'mask', '=', 'np', '.', 'where', '(', 'np', '.', 'isnan', '(', 'flux', ')', ')', '[', '0', ']', 'time', '=', 'np', '.', 'delete', '(', 'time', ',', 'mask', ')', 'flux', '=', 'np', '.', 'delete', '(', 'flux', ',', 'mask', ')', '# Plot it', 'fig', ',', 'ax', '=', 'pl', '.', 'subplots', '(', '1', ',', 'figsize', '=', '(', '10', ',', '4', ')', ')', 'fig', '.', 'subplots_adjust', '(', 'bottom', '=', '0.15', ')', 'ax', '.', 'plot', '(', 'time', ',', 'flux', ',', '"k."', ',', 'markersize', '=', '3', ',', 'alpha', '=', '0.5', ')', '# Axis limits', 'N', '=', 'int', '(', '0.995', '*', 'len', '(', 'flux', ')', ')', 'hi', ',', 'lo', '=', 'flux', '[', 'np', '.', 'argsort', '(', 'flux', ')', ']', '[', '[', 'N', ',', '-', 'N', ']', ']', 'pad', '=', '(', 'hi', '-', 'lo', ')', '*', '0.1', 'ylim', '=', '(', 'lo', '-', 'pad', ',', 'hi', '+', 'pad', ')', 'ax', '.', 'set_ylim', '(', 'ylim', ')', '# Show the CDPP', 'from', '.', 'k2', 'import', 'CDPP', 'ax', '.', 'annotate', '(', "'%.2f ppm'", '%', 'CDPP', '(', 'flux', ')', ',', 'xy', '=', '(', '0.98', ',', '0.975', ')', ',', 'xycoords', '=', "'axes fraction'", ',', 'ha', '=', "'right'", ',', 'va', '=', "'top'", ',', 'fontsize', '=', '12', ',', 'color', '=', "'r'", ',', 'zorder', '=', '99', ')', '# Appearance', 'ax', '.', 'margins', '(', '0', ',', 'None', ')', 'ax', '.', 'set_xlabel', '(', '"Time (BJD - 2454833)"', ',', 'fontsize', '=', '16', ')', 'ax', '.', 'set_ylabel', '(', '"%s Flux"', '%', 'pipeline', '.', 'upper', '(', ')', ',', 'fontsize', '=', '16', ')', 'fig', '.', 'canvas', '.', 'set_window_title', '(', '"%s: EPIC %d"', '%', '(', 'pipeline', '.', 'upper', '(', ')', ',', 'ID', ')', ')', 'if', 'show', ':', 'pl', '.', 'show', '(', ')', 'pl', '.', 'close', '(', ')', 'else', ':', 'return', 'fig', ',', 'ax']
Plots the de-trended flux for the given EPIC `ID` and for the specified `pipeline`.
['Plots', 'the', 'de', '-', 'trended', 'flux', 'for', 'the', 'given', 'EPIC', 'ID', 'and', 'for', 'the', 'specified', 'pipeline', '.']
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pipelines.py#L91-L134
6,972
Neurosim-lab/netpyne
doc/source/code/HHCellFile.py
Cell.createNetcon
def createNetcon(self, thresh=10): """ created netcon to record spikes """ nc = h.NetCon(self.soma(0.5)._ref_v, None, sec = self.soma) nc.threshold = thresh return nc
python
def createNetcon(self, thresh=10): """ created netcon to record spikes """ nc = h.NetCon(self.soma(0.5)._ref_v, None, sec = self.soma) nc.threshold = thresh return nc
['def', 'createNetcon', '(', 'self', ',', 'thresh', '=', '10', ')', ':', 'nc', '=', 'h', '.', 'NetCon', '(', 'self', '.', 'soma', '(', '0.5', ')', '.', '_ref_v', ',', 'None', ',', 'sec', '=', 'self', '.', 'soma', ')', 'nc', '.', 'threshold', '=', 'thresh', 'return', 'nc']
created netcon to record spikes
['created', 'netcon', 'to', 'record', 'spikes']
train
https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/doc/source/code/HHCellFile.py#L42-L46
6,973
jpscaletti/solution
solution/fields/select.py
MultiSelect._clean_data
def _clean_data(self, str_value, file_data, obj_value): """This overwrite is neccesary for work with multivalues""" str_value = str_value or None obj_value = obj_value or None return (str_value, None, obj_value)
python
def _clean_data(self, str_value, file_data, obj_value): """This overwrite is neccesary for work with multivalues""" str_value = str_value or None obj_value = obj_value or None return (str_value, None, obj_value)
['def', '_clean_data', '(', 'self', ',', 'str_value', ',', 'file_data', ',', 'obj_value', ')', ':', 'str_value', '=', 'str_value', 'or', 'None', 'obj_value', '=', 'obj_value', 'or', 'None', 'return', '(', 'str_value', ',', 'None', ',', 'obj_value', ')']
This overwrite is neccesary for work with multivalues
['This', 'overwrite', 'is', 'neccesary', 'for', 'work', 'with', 'multivalues']
train
https://github.com/jpscaletti/solution/blob/eabafd8e695bbb0209242e002dbcc05ffb327f43/solution/fields/select.py#L286-L290
6,974
clalancette/pycdlib
pycdlib/udf.py
UDFTimestamp.parse
def parse(self, data): # type: (bytes) -> None ''' Parse the passed in data into a UDF Timestamp. Parameters: data - The data to parse. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Timestamp already initialized') (tz, timetype, self.year, self.month, self.day, self.hour, self.minute, self.second, self.centiseconds, self.hundreds_microseconds, self.microseconds) = struct.unpack_from(self.FMT, data, 0) self.timetype = timetype >> 4 def twos_comp(val, bits): # type: (int, int) -> int ''' Compute the 2's complement of int value val ''' if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255 val = val - (1 << bits) # compute negative value return val # return positive value as is self.tz = twos_comp(((timetype & 0xf) << 8) | tz, 12) if self.tz < -1440 or self.tz > 1440: if self.tz != -2047: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF timezone') if self.year < 1 or self.year > 9999: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF year') if self.month < 1 or self.month > 12: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF month') if self.day < 1 or self.day > 31: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF day') if self.hour < 0 or self.hour > 23: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF hour') if self.minute < 0 or self.minute > 59: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF minute') if self.second < 0 or self.second > 59: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF second') self._initialized = True
python
def parse(self, data): # type: (bytes) -> None ''' Parse the passed in data into a UDF Timestamp. Parameters: data - The data to parse. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Timestamp already initialized') (tz, timetype, self.year, self.month, self.day, self.hour, self.minute, self.second, self.centiseconds, self.hundreds_microseconds, self.microseconds) = struct.unpack_from(self.FMT, data, 0) self.timetype = timetype >> 4 def twos_comp(val, bits): # type: (int, int) -> int ''' Compute the 2's complement of int value val ''' if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255 val = val - (1 << bits) # compute negative value return val # return positive value as is self.tz = twos_comp(((timetype & 0xf) << 8) | tz, 12) if self.tz < -1440 or self.tz > 1440: if self.tz != -2047: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF timezone') if self.year < 1 or self.year > 9999: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF year') if self.month < 1 or self.month > 12: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF month') if self.day < 1 or self.day > 31: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF day') if self.hour < 0 or self.hour > 23: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF hour') if self.minute < 0 or self.minute > 59: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF minute') if self.second < 0 or self.second > 59: raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF second') self._initialized = True
['def', 'parse', '(', 'self', ',', 'data', ')', ':', '# type: (bytes) -> None', 'if', 'self', '.', '_initialized', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInternalError', '(', "'UDF Timestamp already initialized'", ')', '(', 'tz', ',', 'timetype', ',', 'self', '.', 'year', ',', 'self', '.', 'month', ',', 'self', '.', 'day', ',', 'self', '.', 'hour', ',', 'self', '.', 'minute', ',', 'self', '.', 'second', ',', 'self', '.', 'centiseconds', ',', 'self', '.', 'hundreds_microseconds', ',', 'self', '.', 'microseconds', ')', '=', 'struct', '.', 'unpack_from', '(', 'self', '.', 'FMT', ',', 'data', ',', '0', ')', 'self', '.', 'timetype', '=', 'timetype', '>>', '4', 'def', 'twos_comp', '(', 'val', ',', 'bits', ')', ':', '# type: (int, int) -> int', "'''\n Compute the 2's complement of int value val\n '''", 'if', '(', 'val', '&', '(', '1', '<<', '(', 'bits', '-', '1', ')', ')', ')', '!=', '0', ':', '# if sign bit is set e.g., 8bit: 128-255', 'val', '=', 'val', '-', '(', '1', '<<', 'bits', ')', '# compute negative value', 'return', 'val', '# return positive value as is', 'self', '.', 'tz', '=', 'twos_comp', '(', '(', '(', 'timetype', '&', '0xf', ')', '<<', '8', ')', '|', 'tz', ',', '12', ')', 'if', 'self', '.', 'tz', '<', '-', '1440', 'or', 'self', '.', 'tz', '>', '1440', ':', 'if', 'self', '.', 'tz', '!=', '-', '2047', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInvalidISO', '(', "'Invalid UDF timezone'", ')', 'if', 'self', '.', 'year', '<', '1', 'or', 'self', '.', 'year', '>', '9999', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInvalidISO', '(', "'Invalid UDF year'", ')', 'if', 'self', '.', 'month', '<', '1', 'or', 'self', '.', 'month', '>', '12', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInvalidISO', '(', "'Invalid UDF month'", ')', 'if', 'self', '.', 'day', '<', '1', 'or', 'self', '.', 'day', '>', '31', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInvalidISO', '(', "'Invalid UDF day'", ')', 'if', 'self', '.', 'hour', '<', '0', 'or', 'self', '.', 'hour', '>', '23', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInvalidISO', '(', "'Invalid UDF hour'", ')', 'if', 'self', '.', 'minute', '<', '0', 'or', 'self', '.', 'minute', '>', '59', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInvalidISO', '(', "'Invalid UDF minute'", ')', 'if', 'self', '.', 'second', '<', '0', 'or', 'self', '.', 'second', '>', '59', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInvalidISO', '(', "'Invalid UDF second'", ')', 'self', '.', '_initialized', '=', 'True']
Parse the passed in data into a UDF Timestamp. Parameters: data - The data to parse. Returns: Nothing.
['Parse', 'the', 'passed', 'in', 'data', 'into', 'a', 'UDF', 'Timestamp', '.']
train
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/udf.py#L763-L808
6,975
romaryd/python-jsonrepo
jsonrepo/record.py
namedtuple_asdict
def namedtuple_asdict(obj): """ Serializing a nested namedtuple into a Python dict """ if obj is None: return obj if hasattr(obj, "_asdict"): # detect namedtuple return OrderedDict(zip(obj._fields, (namedtuple_asdict(item) for item in obj))) if isinstance(obj, str): # iterables - strings return obj if hasattr(obj, "keys"): # iterables - mapping return OrderedDict(zip(obj.keys(), (namedtuple_asdict(item) for item in obj.values()))) if hasattr(obj, "__iter__"): # iterables - sequence return type(obj)((namedtuple_asdict(item) for item in obj)) # non-iterable cannot contain namedtuples return obj
python
def namedtuple_asdict(obj): """ Serializing a nested namedtuple into a Python dict """ if obj is None: return obj if hasattr(obj, "_asdict"): # detect namedtuple return OrderedDict(zip(obj._fields, (namedtuple_asdict(item) for item in obj))) if isinstance(obj, str): # iterables - strings return obj if hasattr(obj, "keys"): # iterables - mapping return OrderedDict(zip(obj.keys(), (namedtuple_asdict(item) for item in obj.values()))) if hasattr(obj, "__iter__"): # iterables - sequence return type(obj)((namedtuple_asdict(item) for item in obj)) # non-iterable cannot contain namedtuples return obj
['def', 'namedtuple_asdict', '(', 'obj', ')', ':', 'if', 'obj', 'is', 'None', ':', 'return', 'obj', 'if', 'hasattr', '(', 'obj', ',', '"_asdict"', ')', ':', '# detect namedtuple', 'return', 'OrderedDict', '(', 'zip', '(', 'obj', '.', '_fields', ',', '(', 'namedtuple_asdict', '(', 'item', ')', 'for', 'item', 'in', 'obj', ')', ')', ')', 'if', 'isinstance', '(', 'obj', ',', 'str', ')', ':', '# iterables - strings', 'return', 'obj', 'if', 'hasattr', '(', 'obj', ',', '"keys"', ')', ':', '# iterables - mapping', 'return', 'OrderedDict', '(', 'zip', '(', 'obj', '.', 'keys', '(', ')', ',', '(', 'namedtuple_asdict', '(', 'item', ')', 'for', 'item', 'in', 'obj', '.', 'values', '(', ')', ')', ')', ')', 'if', 'hasattr', '(', 'obj', ',', '"__iter__"', ')', ':', '# iterables - sequence', 'return', 'type', '(', 'obj', ')', '(', '(', 'namedtuple_asdict', '(', 'item', ')', 'for', 'item', 'in', 'obj', ')', ')', '# non-iterable cannot contain namedtuples', 'return', 'obj']
Serializing a nested namedtuple into a Python dict
['Serializing', 'a', 'nested', 'namedtuple', 'into', 'a', 'Python', 'dict']
train
https://github.com/romaryd/python-jsonrepo/blob/08a9c039a5bd21e93e9a6d1bce77d43e6e10b57d/jsonrepo/record.py#L11-L28
6,976
kata198/AdvancedHTMLParser
AdvancedHTMLParser/Parser.py
AdvancedHTMLParser.handle_data
def handle_data(self, data): ''' Internal for parsing ''' if data: inTag = self._inTag if len(inTag) > 0: inTag[-1].appendText(data) elif data.strip(): #and not self.getRoot(): # Must be text prior to or after root node raise MultipleRootNodeException()
python
def handle_data(self, data): ''' Internal for parsing ''' if data: inTag = self._inTag if len(inTag) > 0: inTag[-1].appendText(data) elif data.strip(): #and not self.getRoot(): # Must be text prior to or after root node raise MultipleRootNodeException()
['def', 'handle_data', '(', 'self', ',', 'data', ')', ':', 'if', 'data', ':', 'inTag', '=', 'self', '.', '_inTag', 'if', 'len', '(', 'inTag', ')', '>', '0', ':', 'inTag', '[', '-', '1', ']', '.', 'appendText', '(', 'data', ')', 'elif', 'data', '.', 'strip', '(', ')', ':', '#and not self.getRoot():', '# Must be text prior to or after root node', 'raise', 'MultipleRootNodeException', '(', ')']
Internal for parsing
['Internal', 'for', 'parsing']
train
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L176-L186
6,977
rochacbruno/dynaconf
dynaconf/utils/__init__.py
object_merge
def object_merge(old, new, unique=False): """ Recursively merge two data structures. :param unique: When set to True existing list items are not set. """ if isinstance(old, list) and isinstance(new, list): if old == new: return for item in old[::-1]: if unique and item in new: continue new.insert(0, item) if isinstance(old, dict) and isinstance(new, dict): for key, value in old.items(): if key not in new: new[key] = value else: object_merge(value, new[key])
python
def object_merge(old, new, unique=False): """ Recursively merge two data structures. :param unique: When set to True existing list items are not set. """ if isinstance(old, list) and isinstance(new, list): if old == new: return for item in old[::-1]: if unique and item in new: continue new.insert(0, item) if isinstance(old, dict) and isinstance(new, dict): for key, value in old.items(): if key not in new: new[key] = value else: object_merge(value, new[key])
['def', 'object_merge', '(', 'old', ',', 'new', ',', 'unique', '=', 'False', ')', ':', 'if', 'isinstance', '(', 'old', ',', 'list', ')', 'and', 'isinstance', '(', 'new', ',', 'list', ')', ':', 'if', 'old', '==', 'new', ':', 'return', 'for', 'item', 'in', 'old', '[', ':', ':', '-', '1', ']', ':', 'if', 'unique', 'and', 'item', 'in', 'new', ':', 'continue', 'new', '.', 'insert', '(', '0', ',', 'item', ')', 'if', 'isinstance', '(', 'old', ',', 'dict', ')', 'and', 'isinstance', '(', 'new', ',', 'dict', ')', ':', 'for', 'key', ',', 'value', 'in', 'old', '.', 'items', '(', ')', ':', 'if', 'key', 'not', 'in', 'new', ':', 'new', '[', 'key', ']', '=', 'value', 'else', ':', 'object_merge', '(', 'value', ',', 'new', '[', 'key', ']', ')']
Recursively merge two data structures. :param unique: When set to True existing list items are not set.
['Recursively', 'merge', 'two', 'data', 'structures', '.']
train
https://github.com/rochacbruno/dynaconf/blob/5a7cc8f8252251cbdf4f4112965801f9dfe2831d/dynaconf/utils/__init__.py#L20-L38
6,978
qiniu/python-sdk
qiniu/services/storage/bucket.py
BucketManager.move
def move(self, bucket, key, bucket_to, key_to, force='false'): """移动文件: 将资源从一个空间到另一个空间,具体规格参考: http://developer.qiniu.com/docs/v6/api/reference/rs/move.html Args: bucket: 待操作资源所在空间 bucket_to: 目标资源空间名 key: 待操作资源文件名 key_to: 目标资源文件名 Returns: 一个dict变量,成功返回NULL,失败返回{"error": "<errMsg string>"} 一个ResponseInfo对象 """ resource = entry(bucket, key) to = entry(bucket_to, key_to) return self.__rs_do('move', resource, to, 'force/{0}'.format(force))
python
def move(self, bucket, key, bucket_to, key_to, force='false'): """移动文件: 将资源从一个空间到另一个空间,具体规格参考: http://developer.qiniu.com/docs/v6/api/reference/rs/move.html Args: bucket: 待操作资源所在空间 bucket_to: 目标资源空间名 key: 待操作资源文件名 key_to: 目标资源文件名 Returns: 一个dict变量,成功返回NULL,失败返回{"error": "<errMsg string>"} 一个ResponseInfo对象 """ resource = entry(bucket, key) to = entry(bucket_to, key_to) return self.__rs_do('move', resource, to, 'force/{0}'.format(force))
['def', 'move', '(', 'self', ',', 'bucket', ',', 'key', ',', 'bucket_to', ',', 'key_to', ',', 'force', '=', "'false'", ')', ':', 'resource', '=', 'entry', '(', 'bucket', ',', 'key', ')', 'to', '=', 'entry', '(', 'bucket_to', ',', 'key_to', ')', 'return', 'self', '.', '__rs_do', '(', "'move'", ',', 'resource', ',', 'to', ',', "'force/{0}'", '.', 'format', '(', 'force', ')', ')']
移动文件: 将资源从一个空间到另一个空间,具体规格参考: http://developer.qiniu.com/docs/v6/api/reference/rs/move.html Args: bucket: 待操作资源所在空间 bucket_to: 目标资源空间名 key: 待操作资源文件名 key_to: 目标资源文件名 Returns: 一个dict变量,成功返回NULL,失败返回{"error": "<errMsg string>"} 一个ResponseInfo对象
['移动文件', ':']
train
https://github.com/qiniu/python-sdk/blob/a69fbef4e3e6ea1ebe09f4610a5b18bb2c17de59/qiniu/services/storage/bucket.py#L124-L142
6,979
mapbox/mapboxgl-jupyter
mapboxgl/viz.py
VectorMixin.generate_vector_color_map
def generate_vector_color_map(self): """Generate color stops array for use with match expression in mapbox template""" vector_stops = [] # if join data specified as filename or URL, parse JSON to list of Python dicts if type(self.data) == str: self.data = geojson_to_dict_list(self.data) # loop through features in self.data to create join-data map for row in self.data: # map color to JSON feature using color_property color = color_map(row[self.color_property], self.color_stops, self.color_default) # link to vector feature using data_join_property (from JSON object) vector_stops.append([row[self.data_join_property], color]) return vector_stops
python
def generate_vector_color_map(self): """Generate color stops array for use with match expression in mapbox template""" vector_stops = [] # if join data specified as filename or URL, parse JSON to list of Python dicts if type(self.data) == str: self.data = geojson_to_dict_list(self.data) # loop through features in self.data to create join-data map for row in self.data: # map color to JSON feature using color_property color = color_map(row[self.color_property], self.color_stops, self.color_default) # link to vector feature using data_join_property (from JSON object) vector_stops.append([row[self.data_join_property], color]) return vector_stops
['def', 'generate_vector_color_map', '(', 'self', ')', ':', 'vector_stops', '=', '[', ']', '# if join data specified as filename or URL, parse JSON to list of Python dicts', 'if', 'type', '(', 'self', '.', 'data', ')', '==', 'str', ':', 'self', '.', 'data', '=', 'geojson_to_dict_list', '(', 'self', '.', 'data', ')', '# loop through features in self.data to create join-data map', 'for', 'row', 'in', 'self', '.', 'data', ':', '# map color to JSON feature using color_property', 'color', '=', 'color_map', '(', 'row', '[', 'self', '.', 'color_property', ']', ',', 'self', '.', 'color_stops', ',', 'self', '.', 'color_default', ')', '# link to vector feature using data_join_property (from JSON object)', 'vector_stops', '.', 'append', '(', '[', 'row', '[', 'self', '.', 'data_join_property', ']', ',', 'color', ']', ')', 'return', 'vector_stops']
Generate color stops array for use with match expression in mapbox template
['Generate', 'color', 'stops', 'array', 'for', 'use', 'with', 'match', 'expression', 'in', 'mapbox', 'template']
train
https://github.com/mapbox/mapboxgl-jupyter/blob/f6e403c13eaa910e70659c7d179e8e32ce95ae34/mapboxgl/viz.py#L20-L37
6,980
peergradeio/flask-mongo-profiler
flask_mongo_profiler/contrib/flask_admin/formatters/profiling.py
profiling_query_formatter
def profiling_query_formatter(view, context, query_document, name): """Format a ProfilingQuery entry for a ProfilingRequest detail field Parameters ---------- query_document : model.ProfilingQuery """ return Markup( ''.join( [ '<div class="pymongo-query row">', '<div class="col-md-1">', '<a href="{}">'.format(query_document.get_admin_url(_external=True)), mongo_command_name_formatter( view, context, query_document, 'command_name' ), # '<span class="label {}">{}</span>'.format( # command_name_map.get(query_document.command_name, 'label-default'), # query_document.command_name, # ), '</div>', '<div class="col-md-10">', profiling_pure_query_formatter( None, None, query_document, 'command', tag='pre' ), '</div>', '<div class="col-md-1">', '<small>{} ms</small>'.format(query_document.duration), '</a>', '</div>', '</div>', ] ) )
python
def profiling_query_formatter(view, context, query_document, name): """Format a ProfilingQuery entry for a ProfilingRequest detail field Parameters ---------- query_document : model.ProfilingQuery """ return Markup( ''.join( [ '<div class="pymongo-query row">', '<div class="col-md-1">', '<a href="{}">'.format(query_document.get_admin_url(_external=True)), mongo_command_name_formatter( view, context, query_document, 'command_name' ), # '<span class="label {}">{}</span>'.format( # command_name_map.get(query_document.command_name, 'label-default'), # query_document.command_name, # ), '</div>', '<div class="col-md-10">', profiling_pure_query_formatter( None, None, query_document, 'command', tag='pre' ), '</div>', '<div class="col-md-1">', '<small>{} ms</small>'.format(query_document.duration), '</a>', '</div>', '</div>', ] ) )
['def', 'profiling_query_formatter', '(', 'view', ',', 'context', ',', 'query_document', ',', 'name', ')', ':', 'return', 'Markup', '(', "''", '.', 'join', '(', '[', '\'<div class="pymongo-query row">\'', ',', '\'<div class="col-md-1">\'', ',', '\'<a href="{}">\'', '.', 'format', '(', 'query_document', '.', 'get_admin_url', '(', '_external', '=', 'True', ')', ')', ',', 'mongo_command_name_formatter', '(', 'view', ',', 'context', ',', 'query_document', ',', "'command_name'", ')', ',', '# \'<span class="label {}">{}</span>\'.format(', "# command_name_map.get(query_document.command_name, 'label-default'),", '# query_document.command_name,', '# ),', "'</div>'", ',', '\'<div class="col-md-10">\'', ',', 'profiling_pure_query_formatter', '(', 'None', ',', 'None', ',', 'query_document', ',', "'command'", ',', 'tag', '=', "'pre'", ')', ',', "'</div>'", ',', '\'<div class="col-md-1">\'', ',', "'<small>{} ms</small>'", '.', 'format', '(', 'query_document', '.', 'duration', ')', ',', "'</a>'", ',', "'</div>'", ',', "'</div>'", ',', ']', ')', ')']
Format a ProfilingQuery entry for a ProfilingRequest detail field Parameters ---------- query_document : model.ProfilingQuery
['Format', 'a', 'ProfilingQuery', 'entry', 'for', 'a', 'ProfilingRequest', 'detail', 'field']
train
https://github.com/peergradeio/flask-mongo-profiler/blob/a267eeb49fea07c9a24fb370bd9d7a90ed313ccf/flask_mongo_profiler/contrib/flask_admin/formatters/profiling.py#L94-L127
6,981
StackStorm/pybind
pybind/slxos/v17s_1_02/__init__.py
brocade_pw_profile._set_pw_profile
def _set_pw_profile(self, v, load=False): """ Setter method for pw_profile, mapped from YANG variable /pw_profile (list) If this variable is read-only (config: false) in the source YANG file, then _set_pw_profile is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_pw_profile() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("pw_profile_name",pw_profile.pw_profile, yang_name="pw-profile", rest_name="pw-profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pw-profile-name', extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}), is_container='list', yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}, namespace='urn:brocade.com:mgmt:brocade-pw-profile', defining_module='brocade-pw-profile', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """pw_profile must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("pw_profile_name",pw_profile.pw_profile, yang_name="pw-profile", rest_name="pw-profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pw-profile-name', extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}), is_container='list', yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}, namespace='urn:brocade.com:mgmt:brocade-pw-profile', defining_module='brocade-pw-profile', yang_type='list', is_config=True)""", }) self.__pw_profile = t if hasattr(self, '_set'): self._set()
python
def _set_pw_profile(self, v, load=False): """ Setter method for pw_profile, mapped from YANG variable /pw_profile (list) If this variable is read-only (config: false) in the source YANG file, then _set_pw_profile is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_pw_profile() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("pw_profile_name",pw_profile.pw_profile, yang_name="pw-profile", rest_name="pw-profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pw-profile-name', extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}), is_container='list', yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}, namespace='urn:brocade.com:mgmt:brocade-pw-profile', defining_module='brocade-pw-profile', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """pw_profile must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("pw_profile_name",pw_profile.pw_profile, yang_name="pw-profile", rest_name="pw-profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pw-profile-name', extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}), is_container='list', yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}, namespace='urn:brocade.com:mgmt:brocade-pw-profile', defining_module='brocade-pw-profile', yang_type='list', is_config=True)""", }) self.__pw_profile = t if hasattr(self, '_set'): self._set()
['def', '_set_pw_profile', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'YANGListType', '(', '"pw_profile_name"', ',', 'pw_profile', '.', 'pw_profile', ',', 'yang_name', '=', '"pw-profile"', ',', 'rest_name', '=', '"pw-profile"', ',', 'parent', '=', 'self', ',', 'is_container', '=', "'list'", ',', 'user_ordered', '=', 'False', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'yang_keys', '=', "'pw-profile-name'", ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'pw-profile for Node Specific configuration'", ',', "u'callpoint'", ':', "u'PWProfileBasicCallpoint'", ',', "u'cli-mode-name'", ':', "u'config-pw-profile-$(pw-profile-name)'", '}', '}', ')', ',', 'is_container', '=', "'list'", ',', 'yang_name', '=', '"pw-profile"', ',', 'rest_name', '=', '"pw-profile"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'pw-profile for Node Specific configuration'", ',', "u'callpoint'", ':', "u'PWProfileBasicCallpoint'", ',', "u'cli-mode-name'", ':', "u'config-pw-profile-$(pw-profile-name)'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-pw-profile'", ',', 'defining_module', '=', "'brocade-pw-profile'", ',', 'yang_type', '=', "'list'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""pw_profile must be of a type compatible with list"""', ',', "'defined-type'", ':', '"list"', ',', "'generated-type'", ':', '"""YANGDynClass(base=YANGListType("pw_profile_name",pw_profile.pw_profile, yang_name="pw-profile", rest_name="pw-profile", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'pw-profile-name\', extensions={u\'tailf-common\': {u\'info\': u\'pw-profile for Node Specific configuration\', u\'callpoint\': u\'PWProfileBasicCallpoint\', u\'cli-mode-name\': u\'config-pw-profile-$(pw-profile-name)\'}}), is_container=\'list\', yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'pw-profile for Node Specific configuration\', u\'callpoint\': u\'PWProfileBasicCallpoint\', u\'cli-mode-name\': u\'config-pw-profile-$(pw-profile-name)\'}}, namespace=\'urn:brocade.com:mgmt:brocade-pw-profile\', defining_module=\'brocade-pw-profile\', yang_type=\'list\', is_config=True)"""', ',', '}', ')', 'self', '.', '__pw_profile', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for pw_profile, mapped from YANG variable /pw_profile (list) If this variable is read-only (config: false) in the source YANG file, then _set_pw_profile is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_pw_profile() directly.
['Setter', 'method', 'for', 'pw_profile', 'mapped', 'from', 'YANG', 'variable', '/', 'pw_profile', '(', 'list', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_pw_profile', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_pw_profile', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/__init__.py#L11394-L11415
6,982
rigetti/quantumflow
quantumflow/backend/numpybk.py
inner
def inner(tensor0: BKTensor, tensor1: BKTensor) -> BKTensor: """Return the inner product between two tensors""" # Note: Relying on fact that vdot flattens arrays return np.vdot(tensor0, tensor1)
python
def inner(tensor0: BKTensor, tensor1: BKTensor) -> BKTensor: """Return the inner product between two tensors""" # Note: Relying on fact that vdot flattens arrays return np.vdot(tensor0, tensor1)
['def', 'inner', '(', 'tensor0', ':', 'BKTensor', ',', 'tensor1', ':', 'BKTensor', ')', '->', 'BKTensor', ':', '# Note: Relying on fact that vdot flattens arrays', 'return', 'np', '.', 'vdot', '(', 'tensor0', ',', 'tensor1', ')']
Return the inner product between two tensors
['Return', 'the', 'inner', 'product', 'between', 'two', 'tensors']
train
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/backend/numpybk.py#L125-L128
6,983
SHTOOLS/SHTOOLS
pyshtools/utils/datetime.py
_yyyymmdd_to_year_fraction
def _yyyymmdd_to_year_fraction(date): """Convert YYYMMDD.DD date string or float to YYYY.YYY""" date = str(date) if '.' in date: date, residual = str(date).split('.') residual = float('0.' + residual) else: residual = 0.0 date = _datetime.datetime.strptime(date, '%Y%m%d') date += _datetime.timedelta(days=residual) year = date.year year_start = _datetime.datetime(year=year, month=1, day=1) next_year_start = _datetime.datetime(year=year + 1, month=1, day=1) year_duration = next_year_start - year_start year_elapsed = date - year_start fraction = year_elapsed / year_duration return year + fraction
python
def _yyyymmdd_to_year_fraction(date): """Convert YYYMMDD.DD date string or float to YYYY.YYY""" date = str(date) if '.' in date: date, residual = str(date).split('.') residual = float('0.' + residual) else: residual = 0.0 date = _datetime.datetime.strptime(date, '%Y%m%d') date += _datetime.timedelta(days=residual) year = date.year year_start = _datetime.datetime(year=year, month=1, day=1) next_year_start = _datetime.datetime(year=year + 1, month=1, day=1) year_duration = next_year_start - year_start year_elapsed = date - year_start fraction = year_elapsed / year_duration return year + fraction
['def', '_yyyymmdd_to_year_fraction', '(', 'date', ')', ':', 'date', '=', 'str', '(', 'date', ')', 'if', "'.'", 'in', 'date', ':', 'date', ',', 'residual', '=', 'str', '(', 'date', ')', '.', 'split', '(', "'.'", ')', 'residual', '=', 'float', '(', "'0.'", '+', 'residual', ')', 'else', ':', 'residual', '=', '0.0', 'date', '=', '_datetime', '.', 'datetime', '.', 'strptime', '(', 'date', ',', "'%Y%m%d'", ')', 'date', '+=', '_datetime', '.', 'timedelta', '(', 'days', '=', 'residual', ')', 'year', '=', 'date', '.', 'year', 'year_start', '=', '_datetime', '.', 'datetime', '(', 'year', '=', 'year', ',', 'month', '=', '1', ',', 'day', '=', '1', ')', 'next_year_start', '=', '_datetime', '.', 'datetime', '(', 'year', '=', 'year', '+', '1', ',', 'month', '=', '1', ',', 'day', '=', '1', ')', 'year_duration', '=', 'next_year_start', '-', 'year_start', 'year_elapsed', '=', 'date', '-', 'year_start', 'fraction', '=', 'year_elapsed', '/', 'year_duration', 'return', 'year', '+', 'fraction']
Convert YYYMMDD.DD date string or float to YYYY.YYY
['Convert', 'YYYMMDD', '.', 'DD', 'date', 'string', 'or', 'float', 'to', 'YYYY', '.', 'YYY']
train
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/utils/datetime.py#L11-L31
6,984
bachya/py17track
py17track/client.py
Client._request
async def _request( self, method: str, url: str, *, headers: dict = None, params: dict = None, json: dict = None) -> dict: """Make a request against the RainMachine device.""" if not headers: headers = {} try: async with self._websession.request(method, url, headers=headers, params=params, json=json) as resp: resp.raise_for_status() data = await resp.json(content_type=None) return data except ClientError as err: raise RequestError( 'Error requesting data from {}: {}'.format(url, err))
python
async def _request( self, method: str, url: str, *, headers: dict = None, params: dict = None, json: dict = None) -> dict: """Make a request against the RainMachine device.""" if not headers: headers = {} try: async with self._websession.request(method, url, headers=headers, params=params, json=json) as resp: resp.raise_for_status() data = await resp.json(content_type=None) return data except ClientError as err: raise RequestError( 'Error requesting data from {}: {}'.format(url, err))
['async', 'def', '_request', '(', 'self', ',', 'method', ':', 'str', ',', 'url', ':', 'str', ',', '*', ',', 'headers', ':', 'dict', '=', 'None', ',', 'params', ':', 'dict', '=', 'None', ',', 'json', ':', 'dict', '=', 'None', ')', '->', 'dict', ':', 'if', 'not', 'headers', ':', 'headers', '=', '{', '}', 'try', ':', 'async', 'with', 'self', '.', '_websession', '.', 'request', '(', 'method', ',', 'url', ',', 'headers', '=', 'headers', ',', 'params', '=', 'params', ',', 'json', '=', 'json', ')', 'as', 'resp', ':', 'resp', '.', 'raise_for_status', '(', ')', 'data', '=', 'await', 'resp', '.', 'json', '(', 'content_type', '=', 'None', ')', 'return', 'data', 'except', 'ClientError', 'as', 'err', ':', 'raise', 'RequestError', '(', "'Error requesting data from {}: {}'", '.', 'format', '(', 'url', ',', 'err', ')', ')']
Make a request against the RainMachine device.
['Make', 'a', 'request', 'against', 'the', 'RainMachine', 'device', '.']
train
https://github.com/bachya/py17track/blob/e6e64f2a79571433df7ee702cb4ebc4127b7ad6d/py17track/client.py#L22-L43
6,985
PGower/PyCanvas
pycanvas/apis/assignment_groups.py
AssignmentGroupsAPI.create_assignment_group
def create_assignment_group(self, course_id, group_weight=None, integration_data=None, name=None, position=None, rules=None, sis_source_id=None): """ Create an Assignment Group. Create a new assignment group for this course. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - name """The assignment group's name""" if name is not None: data["name"] = name # OPTIONAL - position """The position of this assignment group in relation to the other assignment groups""" if position is not None: data["position"] = position # OPTIONAL - group_weight """The percent of the total grade that this assignment group represents""" if group_weight is not None: data["group_weight"] = group_weight # OPTIONAL - sis_source_id """The sis source id of the Assignment Group""" if sis_source_id is not None: data["sis_source_id"] = sis_source_id # OPTIONAL - integration_data """The integration data of the Assignment Group""" if integration_data is not None: data["integration_data"] = integration_data # OPTIONAL - rules """The grading rules that are applied within this assignment group See the Assignment Group object definition for format""" if rules is not None: data["rules"] = rules self.logger.debug("POST /api/v1/courses/{course_id}/assignment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/assignment_groups".format(**path), data=data, params=params, single_item=True)
python
def create_assignment_group(self, course_id, group_weight=None, integration_data=None, name=None, position=None, rules=None, sis_source_id=None): """ Create an Assignment Group. Create a new assignment group for this course. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - name """The assignment group's name""" if name is not None: data["name"] = name # OPTIONAL - position """The position of this assignment group in relation to the other assignment groups""" if position is not None: data["position"] = position # OPTIONAL - group_weight """The percent of the total grade that this assignment group represents""" if group_weight is not None: data["group_weight"] = group_weight # OPTIONAL - sis_source_id """The sis source id of the Assignment Group""" if sis_source_id is not None: data["sis_source_id"] = sis_source_id # OPTIONAL - integration_data """The integration data of the Assignment Group""" if integration_data is not None: data["integration_data"] = integration_data # OPTIONAL - rules """The grading rules that are applied within this assignment group See the Assignment Group object definition for format""" if rules is not None: data["rules"] = rules self.logger.debug("POST /api/v1/courses/{course_id}/assignment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/assignment_groups".format(**path), data=data, params=params, single_item=True)
['def', 'create_assignment_group', '(', 'self', ',', 'course_id', ',', 'group_weight', '=', 'None', ',', 'integration_data', '=', 'None', ',', 'name', '=', 'None', ',', 'position', '=', 'None', ',', 'rules', '=', 'None', ',', 'sis_source_id', '=', 'None', ')', ':', 'path', '=', '{', '}', 'data', '=', '{', '}', 'params', '=', '{', '}', '# REQUIRED - PATH - course_id\r', '"""ID"""', 'path', '[', '"course_id"', ']', '=', 'course_id', '# OPTIONAL - name\r', '"""The assignment group\'s name"""', 'if', 'name', 'is', 'not', 'None', ':', 'data', '[', '"name"', ']', '=', 'name', '# OPTIONAL - position\r', '"""The position of this assignment group in relation to the other assignment groups"""', 'if', 'position', 'is', 'not', 'None', ':', 'data', '[', '"position"', ']', '=', 'position', '# OPTIONAL - group_weight\r', '"""The percent of the total grade that this assignment group represents"""', 'if', 'group_weight', 'is', 'not', 'None', ':', 'data', '[', '"group_weight"', ']', '=', 'group_weight', '# OPTIONAL - sis_source_id\r', '"""The sis source id of the Assignment Group"""', 'if', 'sis_source_id', 'is', 'not', 'None', ':', 'data', '[', '"sis_source_id"', ']', '=', 'sis_source_id', '# OPTIONAL - integration_data\r', '"""The integration data of the Assignment Group"""', 'if', 'integration_data', 'is', 'not', 'None', ':', 'data', '[', '"integration_data"', ']', '=', 'integration_data', '# OPTIONAL - rules\r', '"""The grading rules that are applied within this assignment group\r\n See the Assignment Group object definition for format"""', 'if', 'rules', 'is', 'not', 'None', ':', 'data', '[', '"rules"', ']', '=', 'rules', 'self', '.', 'logger', '.', 'debug', '(', '"POST /api/v1/courses/{course_id}/assignment_groups with query params: {params} and form data: {data}"', '.', 'format', '(', 'params', '=', 'params', ',', 'data', '=', 'data', ',', '*', '*', 'path', ')', ')', 'return', 'self', '.', 'generic_request', '(', '"POST"', ',', '"/api/v1/courses/{course_id}/assignment_groups"', '.', 'format', '(', '*', '*', 'path', ')', ',', 'data', '=', 'data', ',', 'params', '=', 'params', ',', 'single_item', '=', 'True', ')']
Create an Assignment Group. Create a new assignment group for this course.
['Create', 'an', 'Assignment', 'Group', '.', 'Create', 'a', 'new', 'assignment', 'group', 'for', 'this', 'course', '.']
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignment_groups.py#L112-L158
6,986
bwohlberg/sporco
sporco/admm/cbpdn.py
GenericConvBPDN.ystep
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`. If this method is not overridden, the problem is solved without any regularisation other than the option enforcement of non-negativity of the solution and filter boundary crossing supression. When it is overridden, it should be explicitly called at the end of the overriding method. """ if self.opt['NonNegCoef']: self.Y[self.Y < 0.0] = 0.0 if self.opt['NoBndryCross']: for n in range(0, self.cri.dimN): self.Y[(slice(None),) * n + (slice(1 - self.D.shape[n], None),)] = 0.0
python
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`. If this method is not overridden, the problem is solved without any regularisation other than the option enforcement of non-negativity of the solution and filter boundary crossing supression. When it is overridden, it should be explicitly called at the end of the overriding method. """ if self.opt['NonNegCoef']: self.Y[self.Y < 0.0] = 0.0 if self.opt['NoBndryCross']: for n in range(0, self.cri.dimN): self.Y[(slice(None),) * n + (slice(1 - self.D.shape[n], None),)] = 0.0
['def', 'ystep', '(', 'self', ')', ':', 'if', 'self', '.', 'opt', '[', "'NonNegCoef'", ']', ':', 'self', '.', 'Y', '[', 'self', '.', 'Y', '<', '0.0', ']', '=', '0.0', 'if', 'self', '.', 'opt', '[', "'NoBndryCross'", ']', ':', 'for', 'n', 'in', 'range', '(', '0', ',', 'self', '.', 'cri', '.', 'dimN', ')', ':', 'self', '.', 'Y', '[', '(', 'slice', '(', 'None', ')', ',', ')', '*', 'n', '+', '(', 'slice', '(', '1', '-', 'self', '.', 'D', '.', 'shape', '[', 'n', ']', ',', 'None', ')', ',', ')', ']', '=', '0.0']
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`. If this method is not overridden, the problem is solved without any regularisation other than the option enforcement of non-negativity of the solution and filter boundary crossing supression. When it is overridden, it should be explicitly called at the end of the overriding method.
['r', 'Minimise', 'Augmented', 'Lagrangian', 'with', 'respect', 'to', ':', 'math', ':', '\\', 'mathbf', '{', 'y', '}', '.', 'If', 'this', 'method', 'is', 'not', 'overridden', 'the', 'problem', 'is', 'solved', 'without', 'any', 'regularisation', 'other', 'than', 'the', 'option', 'enforcement', 'of', 'non', '-', 'negativity', 'of', 'the', 'solution', 'and', 'filter', 'boundary', 'crossing', 'supression', '.', 'When', 'it', 'is', 'overridden', 'it', 'should', 'be', 'explicitly', 'called', 'at', 'the', 'end', 'of', 'the', 'overriding', 'method', '.']
train
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/cbpdn.py#L287-L301
6,987
Cognexa/cxflow
cxflow/cli/eval.py
predict
def predict(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None: """ Run prediction from the specified config path. If the config contains a ``predict`` section: - override hooks with ``predict.hooks`` if present - update dataset, model and main loop sections if the respective sections are present :param config_path: path to the config file or the directory in which it is stored :param restore_from: backend-specific path to the already trained model to be restored from. If ``None`` is passed, it is inferred from the configuration file location as the directory it is located in. :param cl_arguments: additional command line arguments which will update the configuration :param output_root: output root in which the training directory will be created """ config = None try: config_path = find_config(config_path) restore_from = restore_from or path.dirname(config_path) config = load_config(config_file=config_path, additional_args=cl_arguments) if 'predict' in config: for section in ['dataset', 'model', 'main_loop']: if section in config['predict']: config[section].update(config['predict'][section]) if 'hooks' in config['predict']: config['hooks'] = config['predict']['hooks'] else: logging.warning('Config does not contain `predict.hooks` section. ' 'No hook will be employed during the prediction.') config['hooks'] = [] validate_config(config) logging.debug('\tLoaded config: %s', config) except Exception as ex: # pylint: disable=broad-except fallback('Loading config failed', ex) run(config=config, output_root=output_root, restore_from=restore_from, eval='predict')
python
def predict(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None: """ Run prediction from the specified config path. If the config contains a ``predict`` section: - override hooks with ``predict.hooks`` if present - update dataset, model and main loop sections if the respective sections are present :param config_path: path to the config file or the directory in which it is stored :param restore_from: backend-specific path to the already trained model to be restored from. If ``None`` is passed, it is inferred from the configuration file location as the directory it is located in. :param cl_arguments: additional command line arguments which will update the configuration :param output_root: output root in which the training directory will be created """ config = None try: config_path = find_config(config_path) restore_from = restore_from or path.dirname(config_path) config = load_config(config_file=config_path, additional_args=cl_arguments) if 'predict' in config: for section in ['dataset', 'model', 'main_loop']: if section in config['predict']: config[section].update(config['predict'][section]) if 'hooks' in config['predict']: config['hooks'] = config['predict']['hooks'] else: logging.warning('Config does not contain `predict.hooks` section. ' 'No hook will be employed during the prediction.') config['hooks'] = [] validate_config(config) logging.debug('\tLoaded config: %s', config) except Exception as ex: # pylint: disable=broad-except fallback('Loading config failed', ex) run(config=config, output_root=output_root, restore_from=restore_from, eval='predict')
['def', 'predict', '(', 'config_path', ':', 'str', ',', 'restore_from', ':', 'Optional', '[', 'str', ']', ',', 'cl_arguments', ':', 'Iterable', '[', 'str', ']', ',', 'output_root', ':', 'str', ')', '->', 'None', ':', 'config', '=', 'None', 'try', ':', 'config_path', '=', 'find_config', '(', 'config_path', ')', 'restore_from', '=', 'restore_from', 'or', 'path', '.', 'dirname', '(', 'config_path', ')', 'config', '=', 'load_config', '(', 'config_file', '=', 'config_path', ',', 'additional_args', '=', 'cl_arguments', ')', 'if', "'predict'", 'in', 'config', ':', 'for', 'section', 'in', '[', "'dataset'", ',', "'model'", ',', "'main_loop'", ']', ':', 'if', 'section', 'in', 'config', '[', "'predict'", ']', ':', 'config', '[', 'section', ']', '.', 'update', '(', 'config', '[', "'predict'", ']', '[', 'section', ']', ')', 'if', "'hooks'", 'in', 'config', '[', "'predict'", ']', ':', 'config', '[', "'hooks'", ']', '=', 'config', '[', "'predict'", ']', '[', "'hooks'", ']', 'else', ':', 'logging', '.', 'warning', '(', "'Config does not contain `predict.hooks` section. '", "'No hook will be employed during the prediction.'", ')', 'config', '[', "'hooks'", ']', '=', '[', ']', 'validate_config', '(', 'config', ')', 'logging', '.', 'debug', '(', "'\\tLoaded config: %s'", ',', 'config', ')', 'except', 'Exception', 'as', 'ex', ':', '# pylint: disable=broad-except', 'fallback', '(', "'Loading config failed'", ',', 'ex', ')', 'run', '(', 'config', '=', 'config', ',', 'output_root', '=', 'output_root', ',', 'restore_from', '=', 'restore_from', ',', 'eval', '=', "'predict'", ')']
Run prediction from the specified config path. If the config contains a ``predict`` section: - override hooks with ``predict.hooks`` if present - update dataset, model and main loop sections if the respective sections are present :param config_path: path to the config file or the directory in which it is stored :param restore_from: backend-specific path to the already trained model to be restored from. If ``None`` is passed, it is inferred from the configuration file location as the directory it is located in. :param cl_arguments: additional command line arguments which will update the configuration :param output_root: output root in which the training directory will be created
['Run', 'prediction', 'from', 'the', 'specified', 'config', 'path', '.']
train
https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/eval.py#L60-L99
6,988
tanghaibao/jcvi
jcvi/assembly/syntenypath.py
bed
def bed(args): """ %prog bed anchorsfile Convert ANCHORS file to BED format. """ from collections import defaultdict from jcvi.compara.synteny import AnchorFile, check_beds from jcvi.formats.bed import Bed from jcvi.formats.base import get_number p = OptionParser(bed.__doc__) p.add_option("--switch", default=False, action="store_true", help="Switch reference and aligned map elements") p.add_option("--scale", type="float", help="Scale the aligned map distance by factor") p.set_beds() p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) anchorsfile, = args switch = opts.switch scale = opts.scale ac = AnchorFile(anchorsfile) pairs = defaultdict(list) for a, b, block_id in ac.iter_pairs(): pairs[a].append(b) qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts) bd = Bed() for q in qbed: qseqid, qstart, qend, qaccn = q.seqid, q.start, q.end, q.accn if qaccn not in pairs: continue for s in pairs[qaccn]: si, s = sorder[s] sseqid, sstart, send, saccn = s.seqid, s.start, s.end, s.accn if switch: qseqid, sseqid = sseqid, qseqid qstart, sstart = sstart, qstart qend, send = send, qend qaccn, saccn = saccn, qaccn if scale: sstart /= scale try: newsseqid = get_number(sseqid) except ValueError: raise ValueError("`{0}` is on `{1}` with no number to extract".\ format(saccn, sseqid)) bedline = "\t".join(str(x) for x in (qseqid, qstart - 1, qend, "{0}:{1}".format(newsseqid, sstart))) bd.add(bedline) bd.print_to_file(filename=opts.outfile, sorted=True)
python
def bed(args): """ %prog bed anchorsfile Convert ANCHORS file to BED format. """ from collections import defaultdict from jcvi.compara.synteny import AnchorFile, check_beds from jcvi.formats.bed import Bed from jcvi.formats.base import get_number p = OptionParser(bed.__doc__) p.add_option("--switch", default=False, action="store_true", help="Switch reference and aligned map elements") p.add_option("--scale", type="float", help="Scale the aligned map distance by factor") p.set_beds() p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) anchorsfile, = args switch = opts.switch scale = opts.scale ac = AnchorFile(anchorsfile) pairs = defaultdict(list) for a, b, block_id in ac.iter_pairs(): pairs[a].append(b) qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts) bd = Bed() for q in qbed: qseqid, qstart, qend, qaccn = q.seqid, q.start, q.end, q.accn if qaccn not in pairs: continue for s in pairs[qaccn]: si, s = sorder[s] sseqid, sstart, send, saccn = s.seqid, s.start, s.end, s.accn if switch: qseqid, sseqid = sseqid, qseqid qstart, sstart = sstart, qstart qend, send = send, qend qaccn, saccn = saccn, qaccn if scale: sstart /= scale try: newsseqid = get_number(sseqid) except ValueError: raise ValueError("`{0}` is on `{1}` with no number to extract".\ format(saccn, sseqid)) bedline = "\t".join(str(x) for x in (qseqid, qstart - 1, qend, "{0}:{1}".format(newsseqid, sstart))) bd.add(bedline) bd.print_to_file(filename=opts.outfile, sorted=True)
['def', 'bed', '(', 'args', ')', ':', 'from', 'collections', 'import', 'defaultdict', 'from', 'jcvi', '.', 'compara', '.', 'synteny', 'import', 'AnchorFile', ',', 'check_beds', 'from', 'jcvi', '.', 'formats', '.', 'bed', 'import', 'Bed', 'from', 'jcvi', '.', 'formats', '.', 'base', 'import', 'get_number', 'p', '=', 'OptionParser', '(', 'bed', '.', '__doc__', ')', 'p', '.', 'add_option', '(', '"--switch"', ',', 'default', '=', 'False', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"Switch reference and aligned map elements"', ')', 'p', '.', 'add_option', '(', '"--scale"', ',', 'type', '=', '"float"', ',', 'help', '=', '"Scale the aligned map distance by factor"', ')', 'p', '.', 'set_beds', '(', ')', 'p', '.', 'set_outfile', '(', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', '!=', '1', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'anchorsfile', ',', '=', 'args', 'switch', '=', 'opts', '.', 'switch', 'scale', '=', 'opts', '.', 'scale', 'ac', '=', 'AnchorFile', '(', 'anchorsfile', ')', 'pairs', '=', 'defaultdict', '(', 'list', ')', 'for', 'a', ',', 'b', ',', 'block_id', 'in', 'ac', '.', 'iter_pairs', '(', ')', ':', 'pairs', '[', 'a', ']', '.', 'append', '(', 'b', ')', 'qbed', ',', 'sbed', ',', 'qorder', ',', 'sorder', ',', 'is_self', '=', 'check_beds', '(', 'anchorsfile', ',', 'p', ',', 'opts', ')', 'bd', '=', 'Bed', '(', ')', 'for', 'q', 'in', 'qbed', ':', 'qseqid', ',', 'qstart', ',', 'qend', ',', 'qaccn', '=', 'q', '.', 'seqid', ',', 'q', '.', 'start', ',', 'q', '.', 'end', ',', 'q', '.', 'accn', 'if', 'qaccn', 'not', 'in', 'pairs', ':', 'continue', 'for', 's', 'in', 'pairs', '[', 'qaccn', ']', ':', 'si', ',', 's', '=', 'sorder', '[', 's', ']', 'sseqid', ',', 'sstart', ',', 'send', ',', 'saccn', '=', 's', '.', 'seqid', ',', 's', '.', 'start', ',', 's', '.', 'end', ',', 's', '.', 'accn', 'if', 'switch', ':', 'qseqid', ',', 'sseqid', '=', 'sseqid', ',', 'qseqid', 'qstart', ',', 'sstart', '=', 'sstart', ',', 'qstart', 'qend', ',', 'send', '=', 'send', ',', 'qend', 'qaccn', ',', 'saccn', '=', 'saccn', ',', 'qaccn', 'if', 'scale', ':', 'sstart', '/=', 'scale', 'try', ':', 'newsseqid', '=', 'get_number', '(', 'sseqid', ')', 'except', 'ValueError', ':', 'raise', 'ValueError', '(', '"`{0}` is on `{1}` with no number to extract"', '.', 'format', '(', 'saccn', ',', 'sseqid', ')', ')', 'bedline', '=', '"\\t"', '.', 'join', '(', 'str', '(', 'x', ')', 'for', 'x', 'in', '(', 'qseqid', ',', 'qstart', '-', '1', ',', 'qend', ',', '"{0}:{1}"', '.', 'format', '(', 'newsseqid', ',', 'sstart', ')', ')', ')', 'bd', '.', 'add', '(', 'bedline', ')', 'bd', '.', 'print_to_file', '(', 'filename', '=', 'opts', '.', 'outfile', ',', 'sorted', '=', 'True', ')']
%prog bed anchorsfile Convert ANCHORS file to BED format.
['%prog', 'bed', 'anchorsfile']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/syntenypath.py#L128-L184
6,989
ssanderson/interface
interface/interface.py
InterfaceMeta._invalid_implementation
def _invalid_implementation(self, t, missing, mistyped, mismatched): """ Make a TypeError explaining why ``t`` doesn't implement our interface. """ assert missing or mistyped or mismatched, "Implementation wasn't invalid." message = "\nclass {C} failed to implement interface {I}:".format( C=getname(t), I=getname(self), ) if missing: message += dedent( """ The following methods of {I} were not implemented: {missing_methods}""" ).format( I=getname(self), missing_methods=self._format_missing_methods(missing) ) if mistyped: message += dedent( """ The following methods of {I} were implemented with incorrect types: {mismatched_types}""" ).format( I=getname(self), mismatched_types=self._format_mismatched_types(mistyped), ) if mismatched: message += dedent( """ The following methods of {I} were implemented with invalid signatures: {mismatched_methods}""" ).format( I=getname(self), mismatched_methods=self._format_mismatched_methods(mismatched), ) return InvalidImplementation(message)
python
def _invalid_implementation(self, t, missing, mistyped, mismatched): """ Make a TypeError explaining why ``t`` doesn't implement our interface. """ assert missing or mistyped or mismatched, "Implementation wasn't invalid." message = "\nclass {C} failed to implement interface {I}:".format( C=getname(t), I=getname(self), ) if missing: message += dedent( """ The following methods of {I} were not implemented: {missing_methods}""" ).format( I=getname(self), missing_methods=self._format_missing_methods(missing) ) if mistyped: message += dedent( """ The following methods of {I} were implemented with incorrect types: {mismatched_types}""" ).format( I=getname(self), mismatched_types=self._format_mismatched_types(mistyped), ) if mismatched: message += dedent( """ The following methods of {I} were implemented with invalid signatures: {mismatched_methods}""" ).format( I=getname(self), mismatched_methods=self._format_mismatched_methods(mismatched), ) return InvalidImplementation(message)
['def', '_invalid_implementation', '(', 'self', ',', 't', ',', 'missing', ',', 'mistyped', ',', 'mismatched', ')', ':', 'assert', 'missing', 'or', 'mistyped', 'or', 'mismatched', ',', '"Implementation wasn\'t invalid."', 'message', '=', '"\\nclass {C} failed to implement interface {I}:"', '.', 'format', '(', 'C', '=', 'getname', '(', 't', ')', ',', 'I', '=', 'getname', '(', 'self', ')', ',', ')', 'if', 'missing', ':', 'message', '+=', 'dedent', '(', '"""\n\n The following methods of {I} were not implemented:\n {missing_methods}"""', ')', '.', 'format', '(', 'I', '=', 'getname', '(', 'self', ')', ',', 'missing_methods', '=', 'self', '.', '_format_missing_methods', '(', 'missing', ')', ')', 'if', 'mistyped', ':', 'message', '+=', 'dedent', '(', '"""\n\n The following methods of {I} were implemented with incorrect types:\n {mismatched_types}"""', ')', '.', 'format', '(', 'I', '=', 'getname', '(', 'self', ')', ',', 'mismatched_types', '=', 'self', '.', '_format_mismatched_types', '(', 'mistyped', ')', ',', ')', 'if', 'mismatched', ':', 'message', '+=', 'dedent', '(', '"""\n\n The following methods of {I} were implemented with invalid signatures:\n {mismatched_methods}"""', ')', '.', 'format', '(', 'I', '=', 'getname', '(', 'self', ')', ',', 'mismatched_methods', '=', 'self', '.', '_format_mismatched_methods', '(', 'mismatched', ')', ',', ')', 'return', 'InvalidImplementation', '(', 'message', ')']
Make a TypeError explaining why ``t`` doesn't implement our interface.
['Make', 'a', 'TypeError', 'explaining', 'why', 't', 'doesn', 't', 'implement', 'our', 'interface', '.']
train
https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L189-L231
6,990
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_tracker.py
TrackerModule.mavlink_packet
def mavlink_packet(self, m): '''handle an incoming mavlink packet from the master vehicle. Relay it to the tracker if it is a GLOBAL_POSITION_INT''' if m.get_type() in ['GLOBAL_POSITION_INT', 'SCALED_PRESSURE']: connection = self.find_connection() if not connection: return if m.get_srcSystem() != connection.target_system: connection.mav.send(m)
python
def mavlink_packet(self, m): '''handle an incoming mavlink packet from the master vehicle. Relay it to the tracker if it is a GLOBAL_POSITION_INT''' if m.get_type() in ['GLOBAL_POSITION_INT', 'SCALED_PRESSURE']: connection = self.find_connection() if not connection: return if m.get_srcSystem() != connection.target_system: connection.mav.send(m)
['def', 'mavlink_packet', '(', 'self', ',', 'm', ')', ':', 'if', 'm', '.', 'get_type', '(', ')', 'in', '[', "'GLOBAL_POSITION_INT'", ',', "'SCALED_PRESSURE'", ']', ':', 'connection', '=', 'self', '.', 'find_connection', '(', ')', 'if', 'not', 'connection', ':', 'return', 'if', 'm', '.', 'get_srcSystem', '(', ')', '!=', 'connection', '.', 'target_system', ':', 'connection', '.', 'mav', '.', 'send', '(', 'm', ')']
handle an incoming mavlink packet from the master vehicle. Relay it to the tracker if it is a GLOBAL_POSITION_INT
['handle', 'an', 'incoming', 'mavlink', 'packet', 'from', 'the', 'master', 'vehicle', '.', 'Relay', 'it', 'to', 'the', 'tracker', 'if', 'it', 'is', 'a', 'GLOBAL_POSITION_INT']
train
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_tracker.py#L131-L139
6,991
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_all_reminders
def get_all_reminders(self, params=None): """ Get all reminders This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ if not params: params = {} return self._iterate_through_pages(self.get_reminders_per_page, resource=REMINDERS, **{'params': params})
python
def get_all_reminders(self, params=None): """ Get all reminders This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ if not params: params = {} return self._iterate_through_pages(self.get_reminders_per_page, resource=REMINDERS, **{'params': params})
['def', 'get_all_reminders', '(', 'self', ',', 'params', '=', 'None', ')', ':', 'if', 'not', 'params', ':', 'params', '=', '{', '}', 'return', 'self', '.', '_iterate_through_pages', '(', 'self', '.', 'get_reminders_per_page', ',', 'resource', '=', 'REMINDERS', ',', '*', '*', '{', "'params'", ':', 'params', '}', ')']
Get all reminders This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list
['Get', 'all', 'reminders', 'This', 'will', 'iterate', 'over', 'all', 'pages', 'until', 'it', 'gets', 'all', 'elements', '.', 'So', 'if', 'the', 'rate', 'limit', 'exceeded', 'it', 'will', 'throw', 'an', 'Exception', 'and', 'you', 'will', 'get', 'nothing']
train
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3250-L3261
6,992
tensorflow/tensorboard
tensorboard/plugins/graph/graphs_plugin.py
GraphsPlugin.info_impl
def info_impl(self): """Returns a dict of all runs and tags and their data availabilities.""" result = {} def add_row_item(run, tag=None): run_item = result.setdefault(run, { 'run': run, 'tags': {}, # A run-wide GraphDef of ops. 'run_graph': False}) tag_item = None if tag: tag_item = run_item.get('tags').setdefault(tag, { 'tag': tag, 'conceptual_graph': False, # A tagged GraphDef of ops. 'op_graph': False, 'profile': False}) return (run_item, tag_item) mapping = self._multiplexer.PluginRunToTagToContent( _PLUGIN_NAME_RUN_METADATA_WITH_GRAPH) for run_name, tag_to_content in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): # The Summary op is defined in TensorFlow and does not use a stringified proto # as a content of plugin data. It contains single string that denotes a version. # https://github.com/tensorflow/tensorflow/blob/11f4ecb54708865ec757ca64e4805957b05d7570/tensorflow/python/ops/summary_ops_v2.py#L789-L790 if content != b'1': logger.warn('Ignoring unrecognizable version of RunMetadata.') continue (_, tag_item) = add_row_item(run_name, tag) tag_item['op_graph'] = True # Tensors associated with plugin name _PLUGIN_NAME_RUN_METADATA contain # both op graph and profile information. mapping = self._multiplexer.PluginRunToTagToContent( _PLUGIN_NAME_RUN_METADATA) for run_name, tag_to_content in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): if content != b'1': logger.warn('Ignoring unrecognizable version of RunMetadata.') continue (_, tag_item) = add_row_item(run_name, tag) tag_item['profile'] = True tag_item['op_graph'] = True # Tensors associated with plugin name _PLUGIN_NAME_KERAS_MODEL contain # serialized Keras model in JSON format. mapping = self._multiplexer.PluginRunToTagToContent( _PLUGIN_NAME_KERAS_MODEL) for run_name, tag_to_content in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): if content != b'1': logger.warn('Ignoring unrecognizable version of RunMetadata.') continue (_, tag_item) = add_row_item(run_name, tag) tag_item['conceptual_graph'] = True for (run_name, run_data) in six.iteritems(self._multiplexer.Runs()): if run_data.get(event_accumulator.GRAPH): (run_item, _) = add_row_item(run_name, None) run_item['run_graph'] = True for (run_name, run_data) in six.iteritems(self._multiplexer.Runs()): if event_accumulator.RUN_METADATA in run_data: for tag in run_data[event_accumulator.RUN_METADATA]: (_, tag_item) = add_row_item(run_name, tag) tag_item['profile'] = True return result
python
def info_impl(self): """Returns a dict of all runs and tags and their data availabilities.""" result = {} def add_row_item(run, tag=None): run_item = result.setdefault(run, { 'run': run, 'tags': {}, # A run-wide GraphDef of ops. 'run_graph': False}) tag_item = None if tag: tag_item = run_item.get('tags').setdefault(tag, { 'tag': tag, 'conceptual_graph': False, # A tagged GraphDef of ops. 'op_graph': False, 'profile': False}) return (run_item, tag_item) mapping = self._multiplexer.PluginRunToTagToContent( _PLUGIN_NAME_RUN_METADATA_WITH_GRAPH) for run_name, tag_to_content in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): # The Summary op is defined in TensorFlow and does not use a stringified proto # as a content of plugin data. It contains single string that denotes a version. # https://github.com/tensorflow/tensorflow/blob/11f4ecb54708865ec757ca64e4805957b05d7570/tensorflow/python/ops/summary_ops_v2.py#L789-L790 if content != b'1': logger.warn('Ignoring unrecognizable version of RunMetadata.') continue (_, tag_item) = add_row_item(run_name, tag) tag_item['op_graph'] = True # Tensors associated with plugin name _PLUGIN_NAME_RUN_METADATA contain # both op graph and profile information. mapping = self._multiplexer.PluginRunToTagToContent( _PLUGIN_NAME_RUN_METADATA) for run_name, tag_to_content in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): if content != b'1': logger.warn('Ignoring unrecognizable version of RunMetadata.') continue (_, tag_item) = add_row_item(run_name, tag) tag_item['profile'] = True tag_item['op_graph'] = True # Tensors associated with plugin name _PLUGIN_NAME_KERAS_MODEL contain # serialized Keras model in JSON format. mapping = self._multiplexer.PluginRunToTagToContent( _PLUGIN_NAME_KERAS_MODEL) for run_name, tag_to_content in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): if content != b'1': logger.warn('Ignoring unrecognizable version of RunMetadata.') continue (_, tag_item) = add_row_item(run_name, tag) tag_item['conceptual_graph'] = True for (run_name, run_data) in six.iteritems(self._multiplexer.Runs()): if run_data.get(event_accumulator.GRAPH): (run_item, _) = add_row_item(run_name, None) run_item['run_graph'] = True for (run_name, run_data) in six.iteritems(self._multiplexer.Runs()): if event_accumulator.RUN_METADATA in run_data: for tag in run_data[event_accumulator.RUN_METADATA]: (_, tag_item) = add_row_item(run_name, tag) tag_item['profile'] = True return result
['def', 'info_impl', '(', 'self', ')', ':', 'result', '=', '{', '}', 'def', 'add_row_item', '(', 'run', ',', 'tag', '=', 'None', ')', ':', 'run_item', '=', 'result', '.', 'setdefault', '(', 'run', ',', '{', "'run'", ':', 'run', ',', "'tags'", ':', '{', '}', ',', '# A run-wide GraphDef of ops.', "'run_graph'", ':', 'False', '}', ')', 'tag_item', '=', 'None', 'if', 'tag', ':', 'tag_item', '=', 'run_item', '.', 'get', '(', "'tags'", ')', '.', 'setdefault', '(', 'tag', ',', '{', "'tag'", ':', 'tag', ',', "'conceptual_graph'", ':', 'False', ',', '# A tagged GraphDef of ops.', "'op_graph'", ':', 'False', ',', "'profile'", ':', 'False', '}', ')', 'return', '(', 'run_item', ',', 'tag_item', ')', 'mapping', '=', 'self', '.', '_multiplexer', '.', 'PluginRunToTagToContent', '(', '_PLUGIN_NAME_RUN_METADATA_WITH_GRAPH', ')', 'for', 'run_name', ',', 'tag_to_content', 'in', 'six', '.', 'iteritems', '(', 'mapping', ')', ':', 'for', '(', 'tag', ',', 'content', ')', 'in', 'six', '.', 'iteritems', '(', 'tag_to_content', ')', ':', '# The Summary op is defined in TensorFlow and does not use a stringified proto', '# as a content of plugin data. It contains single string that denotes a version.', '# https://github.com/tensorflow/tensorflow/blob/11f4ecb54708865ec757ca64e4805957b05d7570/tensorflow/python/ops/summary_ops_v2.py#L789-L790', 'if', 'content', '!=', "b'1'", ':', 'logger', '.', 'warn', '(', "'Ignoring unrecognizable version of RunMetadata.'", ')', 'continue', '(', '_', ',', 'tag_item', ')', '=', 'add_row_item', '(', 'run_name', ',', 'tag', ')', 'tag_item', '[', "'op_graph'", ']', '=', 'True', '# Tensors associated with plugin name _PLUGIN_NAME_RUN_METADATA contain', '# both op graph and profile information.', 'mapping', '=', 'self', '.', '_multiplexer', '.', 'PluginRunToTagToContent', '(', '_PLUGIN_NAME_RUN_METADATA', ')', 'for', 'run_name', ',', 'tag_to_content', 'in', 'six', '.', 'iteritems', '(', 'mapping', ')', ':', 'for', '(', 'tag', ',', 'content', ')', 'in', 'six', '.', 'iteritems', '(', 'tag_to_content', ')', ':', 'if', 'content', '!=', "b'1'", ':', 'logger', '.', 'warn', '(', "'Ignoring unrecognizable version of RunMetadata.'", ')', 'continue', '(', '_', ',', 'tag_item', ')', '=', 'add_row_item', '(', 'run_name', ',', 'tag', ')', 'tag_item', '[', "'profile'", ']', '=', 'True', 'tag_item', '[', "'op_graph'", ']', '=', 'True', '# Tensors associated with plugin name _PLUGIN_NAME_KERAS_MODEL contain', '# serialized Keras model in JSON format.', 'mapping', '=', 'self', '.', '_multiplexer', '.', 'PluginRunToTagToContent', '(', '_PLUGIN_NAME_KERAS_MODEL', ')', 'for', 'run_name', ',', 'tag_to_content', 'in', 'six', '.', 'iteritems', '(', 'mapping', ')', ':', 'for', '(', 'tag', ',', 'content', ')', 'in', 'six', '.', 'iteritems', '(', 'tag_to_content', ')', ':', 'if', 'content', '!=', "b'1'", ':', 'logger', '.', 'warn', '(', "'Ignoring unrecognizable version of RunMetadata.'", ')', 'continue', '(', '_', ',', 'tag_item', ')', '=', 'add_row_item', '(', 'run_name', ',', 'tag', ')', 'tag_item', '[', "'conceptual_graph'", ']', '=', 'True', 'for', '(', 'run_name', ',', 'run_data', ')', 'in', 'six', '.', 'iteritems', '(', 'self', '.', '_multiplexer', '.', 'Runs', '(', ')', ')', ':', 'if', 'run_data', '.', 'get', '(', 'event_accumulator', '.', 'GRAPH', ')', ':', '(', 'run_item', ',', '_', ')', '=', 'add_row_item', '(', 'run_name', ',', 'None', ')', 'run_item', '[', "'run_graph'", ']', '=', 'True', 'for', '(', 'run_name', ',', 'run_data', ')', 'in', 'six', '.', 'iteritems', '(', 'self', '.', '_multiplexer', '.', 'Runs', '(', ')', ')', ':', 'if', 'event_accumulator', '.', 'RUN_METADATA', 'in', 'run_data', ':', 'for', 'tag', 'in', 'run_data', '[', 'event_accumulator', '.', 'RUN_METADATA', ']', ':', '(', '_', ',', 'tag_item', ')', '=', 'add_row_item', '(', 'run_name', ',', 'tag', ')', 'tag_item', '[', "'profile'", ']', '=', 'True', 'return', 'result']
Returns a dict of all runs and tags and their data availabilities.
['Returns', 'a', 'dict', 'of', 'all', 'runs', 'and', 'tags', 'and', 'their', 'data', 'availabilities', '.']
train
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/graph/graphs_plugin.py#L74-L143
6,993
rootpy/rootpy
rootpy/stats/histfactory/utils.py
make_measurement
def make_measurement(name, channels, lumi=1.0, lumi_rel_error=0.1, output_prefix='./histfactory', POI=None, const_params=None, verbose=False): """ Create a Measurement from a list of Channels """ if verbose: llog = log['make_measurement'] llog.info("creating measurement {0}".format(name)) if not isinstance(channels, (list, tuple)): channels = [channels] # Create the measurement meas = Measurement('measurement_{0}'.format(name), '') meas.SetOutputFilePrefix(output_prefix) if POI is not None: if isinstance(POI, string_types): if verbose: llog.info("setting POI {0}".format(POI)) meas.SetPOI(POI) else: if verbose: llog.info("adding POIs {0}".format(', '.join(POI))) for p in POI: meas.AddPOI(p) if verbose: llog.info("setting lumi={0:f} +/- {1:f}".format(lumi, lumi_rel_error)) meas.lumi = lumi meas.lumi_rel_error = lumi_rel_error for channel in channels: if verbose: llog.info("adding channel {0}".format(channel.GetName())) meas.AddChannel(channel) if const_params is not None: if verbose: llog.info("adding constant parameters {0}".format( ', '.join(const_params))) for param in const_params: meas.AddConstantParam(param) return meas
python
def make_measurement(name, channels, lumi=1.0, lumi_rel_error=0.1, output_prefix='./histfactory', POI=None, const_params=None, verbose=False): """ Create a Measurement from a list of Channels """ if verbose: llog = log['make_measurement'] llog.info("creating measurement {0}".format(name)) if not isinstance(channels, (list, tuple)): channels = [channels] # Create the measurement meas = Measurement('measurement_{0}'.format(name), '') meas.SetOutputFilePrefix(output_prefix) if POI is not None: if isinstance(POI, string_types): if verbose: llog.info("setting POI {0}".format(POI)) meas.SetPOI(POI) else: if verbose: llog.info("adding POIs {0}".format(', '.join(POI))) for p in POI: meas.AddPOI(p) if verbose: llog.info("setting lumi={0:f} +/- {1:f}".format(lumi, lumi_rel_error)) meas.lumi = lumi meas.lumi_rel_error = lumi_rel_error for channel in channels: if verbose: llog.info("adding channel {0}".format(channel.GetName())) meas.AddChannel(channel) if const_params is not None: if verbose: llog.info("adding constant parameters {0}".format( ', '.join(const_params))) for param in const_params: meas.AddConstantParam(param) return meas
['def', 'make_measurement', '(', 'name', ',', 'channels', ',', 'lumi', '=', '1.0', ',', 'lumi_rel_error', '=', '0.1', ',', 'output_prefix', '=', "'./histfactory'", ',', 'POI', '=', 'None', ',', 'const_params', '=', 'None', ',', 'verbose', '=', 'False', ')', ':', 'if', 'verbose', ':', 'llog', '=', 'log', '[', "'make_measurement'", ']', 'llog', '.', 'info', '(', '"creating measurement {0}"', '.', 'format', '(', 'name', ')', ')', 'if', 'not', 'isinstance', '(', 'channels', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'channels', '=', '[', 'channels', ']', '# Create the measurement', 'meas', '=', 'Measurement', '(', "'measurement_{0}'", '.', 'format', '(', 'name', ')', ',', "''", ')', 'meas', '.', 'SetOutputFilePrefix', '(', 'output_prefix', ')', 'if', 'POI', 'is', 'not', 'None', ':', 'if', 'isinstance', '(', 'POI', ',', 'string_types', ')', ':', 'if', 'verbose', ':', 'llog', '.', 'info', '(', '"setting POI {0}"', '.', 'format', '(', 'POI', ')', ')', 'meas', '.', 'SetPOI', '(', 'POI', ')', 'else', ':', 'if', 'verbose', ':', 'llog', '.', 'info', '(', '"adding POIs {0}"', '.', 'format', '(', "', '", '.', 'join', '(', 'POI', ')', ')', ')', 'for', 'p', 'in', 'POI', ':', 'meas', '.', 'AddPOI', '(', 'p', ')', 'if', 'verbose', ':', 'llog', '.', 'info', '(', '"setting lumi={0:f} +/- {1:f}"', '.', 'format', '(', 'lumi', ',', 'lumi_rel_error', ')', ')', 'meas', '.', 'lumi', '=', 'lumi', 'meas', '.', 'lumi_rel_error', '=', 'lumi_rel_error', 'for', 'channel', 'in', 'channels', ':', 'if', 'verbose', ':', 'llog', '.', 'info', '(', '"adding channel {0}"', '.', 'format', '(', 'channel', '.', 'GetName', '(', ')', ')', ')', 'meas', '.', 'AddChannel', '(', 'channel', ')', 'if', 'const_params', 'is', 'not', 'None', ':', 'if', 'verbose', ':', 'llog', '.', 'info', '(', '"adding constant parameters {0}"', '.', 'format', '(', "', '", '.', 'join', '(', 'const_params', ')', ')', ')', 'for', 'param', 'in', 'const_params', ':', 'meas', '.', 'AddConstantParam', '(', 'param', ')', 'return', 'meas']
Create a Measurement from a list of Channels
['Create', 'a', 'Measurement', 'from', 'a', 'list', 'of', 'Channels']
train
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/stats/histfactory/utils.py#L56-L104
6,994
joke2k/faker
faker/providers/internet/__init__.py
Provider.ipv4_private
def ipv4_private(self, network=False, address_class=None): """ Returns a private IPv4. :param network: Network address :param address_class: IPv4 address class (a, b, or c) :returns: Private IPv4 """ # compute private networks from given class supernet = _IPv4Constants._network_classes[ address_class or self.ipv4_network_class() ] private_networks = [ subnet for subnet in _IPv4Constants._private_networks if subnet.overlaps(supernet) ] # exclude special networks private_networks = self._exclude_ipv4_networks( private_networks, _IPv4Constants._excluded_networks, ) # choose random private network from the list private_network = self.generator.random.choice(private_networks) return self._random_ipv4_address_from_subnet(private_network, network)
python
def ipv4_private(self, network=False, address_class=None): """ Returns a private IPv4. :param network: Network address :param address_class: IPv4 address class (a, b, or c) :returns: Private IPv4 """ # compute private networks from given class supernet = _IPv4Constants._network_classes[ address_class or self.ipv4_network_class() ] private_networks = [ subnet for subnet in _IPv4Constants._private_networks if subnet.overlaps(supernet) ] # exclude special networks private_networks = self._exclude_ipv4_networks( private_networks, _IPv4Constants._excluded_networks, ) # choose random private network from the list private_network = self.generator.random.choice(private_networks) return self._random_ipv4_address_from_subnet(private_network, network)
['def', 'ipv4_private', '(', 'self', ',', 'network', '=', 'False', ',', 'address_class', '=', 'None', ')', ':', '# compute private networks from given class', 'supernet', '=', '_IPv4Constants', '.', '_network_classes', '[', 'address_class', 'or', 'self', '.', 'ipv4_network_class', '(', ')', ']', 'private_networks', '=', '[', 'subnet', 'for', 'subnet', 'in', '_IPv4Constants', '.', '_private_networks', 'if', 'subnet', '.', 'overlaps', '(', 'supernet', ')', ']', '# exclude special networks', 'private_networks', '=', 'self', '.', '_exclude_ipv4_networks', '(', 'private_networks', ',', '_IPv4Constants', '.', '_excluded_networks', ',', ')', '# choose random private network from the list', 'private_network', '=', 'self', '.', 'generator', '.', 'random', '.', 'choice', '(', 'private_networks', ')', 'return', 'self', '.', '_random_ipv4_address_from_subnet', '(', 'private_network', ',', 'network', ')']
Returns a private IPv4. :param network: Network address :param address_class: IPv4 address class (a, b, or c) :returns: Private IPv4
['Returns', 'a', 'private', 'IPv4', '.']
train
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/internet/__init__.py#L363-L390
6,995
pr-omethe-us/PyKED
pyked/chemked.py
DataPoint.process_quantity
def process_quantity(self, properties): """Process the uncertainty information from a given quantity and return it """ quant = Q_(properties[0]) if len(properties) > 1: unc = properties[1] uncertainty = unc.get('uncertainty', False) upper_uncertainty = unc.get('upper-uncertainty', False) lower_uncertainty = unc.get('lower-uncertainty', False) uncertainty_type = unc.get('uncertainty-type') if uncertainty_type == 'relative': if uncertainty: quant = quant.plus_minus(float(uncertainty), relative=True) elif upper_uncertainty and lower_uncertainty: warn('Asymmetric uncertainties are not supported. The ' 'maximum of lower-uncertainty and upper-uncertainty ' 'has been used as the symmetric uncertainty.') uncertainty = max(float(upper_uncertainty), float(lower_uncertainty)) quant = quant.plus_minus(uncertainty, relative=True) else: raise ValueError('Either "uncertainty" or "upper-uncertainty" and ' '"lower-uncertainty" need to be specified.') elif uncertainty_type == 'absolute': if uncertainty: uncertainty = Q_(uncertainty) quant = quant.plus_minus(uncertainty.to(quant.units).magnitude) elif upper_uncertainty and lower_uncertainty: warn('Asymmetric uncertainties are not supported. The ' 'maximum of lower-uncertainty and upper-uncertainty ' 'has been used as the symmetric uncertainty.') uncertainty = max(Q_(upper_uncertainty), Q_(lower_uncertainty)) quant = quant.plus_minus(uncertainty.to(quant.units).magnitude) else: raise ValueError('Either "uncertainty" or "upper-uncertainty" and ' '"lower-uncertainty" need to be specified.') else: raise ValueError('uncertainty-type must be one of "absolute" or "relative"') return quant
python
def process_quantity(self, properties): """Process the uncertainty information from a given quantity and return it """ quant = Q_(properties[0]) if len(properties) > 1: unc = properties[1] uncertainty = unc.get('uncertainty', False) upper_uncertainty = unc.get('upper-uncertainty', False) lower_uncertainty = unc.get('lower-uncertainty', False) uncertainty_type = unc.get('uncertainty-type') if uncertainty_type == 'relative': if uncertainty: quant = quant.plus_minus(float(uncertainty), relative=True) elif upper_uncertainty and lower_uncertainty: warn('Asymmetric uncertainties are not supported. The ' 'maximum of lower-uncertainty and upper-uncertainty ' 'has been used as the symmetric uncertainty.') uncertainty = max(float(upper_uncertainty), float(lower_uncertainty)) quant = quant.plus_minus(uncertainty, relative=True) else: raise ValueError('Either "uncertainty" or "upper-uncertainty" and ' '"lower-uncertainty" need to be specified.') elif uncertainty_type == 'absolute': if uncertainty: uncertainty = Q_(uncertainty) quant = quant.plus_minus(uncertainty.to(quant.units).magnitude) elif upper_uncertainty and lower_uncertainty: warn('Asymmetric uncertainties are not supported. The ' 'maximum of lower-uncertainty and upper-uncertainty ' 'has been used as the symmetric uncertainty.') uncertainty = max(Q_(upper_uncertainty), Q_(lower_uncertainty)) quant = quant.plus_minus(uncertainty.to(quant.units).magnitude) else: raise ValueError('Either "uncertainty" or "upper-uncertainty" and ' '"lower-uncertainty" need to be specified.') else: raise ValueError('uncertainty-type must be one of "absolute" or "relative"') return quant
['def', 'process_quantity', '(', 'self', ',', 'properties', ')', ':', 'quant', '=', 'Q_', '(', 'properties', '[', '0', ']', ')', 'if', 'len', '(', 'properties', ')', '>', '1', ':', 'unc', '=', 'properties', '[', '1', ']', 'uncertainty', '=', 'unc', '.', 'get', '(', "'uncertainty'", ',', 'False', ')', 'upper_uncertainty', '=', 'unc', '.', 'get', '(', "'upper-uncertainty'", ',', 'False', ')', 'lower_uncertainty', '=', 'unc', '.', 'get', '(', "'lower-uncertainty'", ',', 'False', ')', 'uncertainty_type', '=', 'unc', '.', 'get', '(', "'uncertainty-type'", ')', 'if', 'uncertainty_type', '==', "'relative'", ':', 'if', 'uncertainty', ':', 'quant', '=', 'quant', '.', 'plus_minus', '(', 'float', '(', 'uncertainty', ')', ',', 'relative', '=', 'True', ')', 'elif', 'upper_uncertainty', 'and', 'lower_uncertainty', ':', 'warn', '(', "'Asymmetric uncertainties are not supported. The '", "'maximum of lower-uncertainty and upper-uncertainty '", "'has been used as the symmetric uncertainty.'", ')', 'uncertainty', '=', 'max', '(', 'float', '(', 'upper_uncertainty', ')', ',', 'float', '(', 'lower_uncertainty', ')', ')', 'quant', '=', 'quant', '.', 'plus_minus', '(', 'uncertainty', ',', 'relative', '=', 'True', ')', 'else', ':', 'raise', 'ValueError', '(', '\'Either "uncertainty" or "upper-uncertainty" and \'', '\'"lower-uncertainty" need to be specified.\'', ')', 'elif', 'uncertainty_type', '==', "'absolute'", ':', 'if', 'uncertainty', ':', 'uncertainty', '=', 'Q_', '(', 'uncertainty', ')', 'quant', '=', 'quant', '.', 'plus_minus', '(', 'uncertainty', '.', 'to', '(', 'quant', '.', 'units', ')', '.', 'magnitude', ')', 'elif', 'upper_uncertainty', 'and', 'lower_uncertainty', ':', 'warn', '(', "'Asymmetric uncertainties are not supported. The '", "'maximum of lower-uncertainty and upper-uncertainty '", "'has been used as the symmetric uncertainty.'", ')', 'uncertainty', '=', 'max', '(', 'Q_', '(', 'upper_uncertainty', ')', ',', 'Q_', '(', 'lower_uncertainty', ')', ')', 'quant', '=', 'quant', '.', 'plus_minus', '(', 'uncertainty', '.', 'to', '(', 'quant', '.', 'units', ')', '.', 'magnitude', ')', 'else', ':', 'raise', 'ValueError', '(', '\'Either "uncertainty" or "upper-uncertainty" and \'', '\'"lower-uncertainty" need to be specified.\'', ')', 'else', ':', 'raise', 'ValueError', '(', '\'uncertainty-type must be one of "absolute" or "relative"\'', ')', 'return', 'quant']
Process the uncertainty information from a given quantity and return it
['Process', 'the', 'uncertainty', 'information', 'from', 'a', 'given', 'quantity', 'and', 'return', 'it']
train
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/chemked.py#L722-L760
6,996
lawsie/guizero
guizero/event.py
EventManager.remove_event
def remove_event(self, ref): """ Removes an event for a ref (reference), """ # is this reference one which has been setup? if ref in self._refs: self._refs[ref].remove_callback(ref)
python
def remove_event(self, ref): """ Removes an event for a ref (reference), """ # is this reference one which has been setup? if ref in self._refs: self._refs[ref].remove_callback(ref)
['def', 'remove_event', '(', 'self', ',', 'ref', ')', ':', '# is this reference one which has been setup?', 'if', 'ref', 'in', 'self', '.', '_refs', ':', 'self', '.', '_refs', '[', 'ref', ']', '.', 'remove_callback', '(', 'ref', ')']
Removes an event for a ref (reference),
['Removes', 'an', 'event', 'for', 'a', 'ref', '(', 'reference', ')']
train
https://github.com/lawsie/guizero/blob/84c7f0b314fa86f9fc88eb11c9a0f6c4b57155e2/guizero/event.py#L192-L198
6,997
KelSolaar/Umbra
umbra/ui/models.py
GraphModel.vertical_headers
def vertical_headers(self, value): """ Setter for **self.__vertical_headers** attribute. :param value: Attribute value. :type value: OrderedDict """ if value is not None: assert type(value) is OrderedDict, "'{0}' attribute: '{1}' type is not 'OrderedDict'!".format( "vertical_headers", value) self.__vertical_headers = value
python
def vertical_headers(self, value): """ Setter for **self.__vertical_headers** attribute. :param value: Attribute value. :type value: OrderedDict """ if value is not None: assert type(value) is OrderedDict, "'{0}' attribute: '{1}' type is not 'OrderedDict'!".format( "vertical_headers", value) self.__vertical_headers = value
['def', 'vertical_headers', '(', 'self', ',', 'value', ')', ':', 'if', 'value', 'is', 'not', 'None', ':', 'assert', 'type', '(', 'value', ')', 'is', 'OrderedDict', ',', '"\'{0}\' attribute: \'{1}\' type is not \'OrderedDict\'!"', '.', 'format', '(', '"vertical_headers"', ',', 'value', ')', 'self', '.', '__vertical_headers', '=', 'value']
Setter for **self.__vertical_headers** attribute. :param value: Attribute value. :type value: OrderedDict
['Setter', 'for', '**', 'self', '.', '__vertical_headers', '**', 'attribute', '.']
train
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/models.py#L208-L219
6,998
yyuu/botornado
boto/ec2/autoscale/group.py
AutoScalingGroup.get_activities
def get_activities(self, activity_ids=None, max_records=50): """ Get all activies for this group. """ return self.connection.get_all_activities(self, activity_ids, max_records)
python
def get_activities(self, activity_ids=None, max_records=50): """ Get all activies for this group. """ return self.connection.get_all_activities(self, activity_ids, max_records)
['def', 'get_activities', '(', 'self', ',', 'activity_ids', '=', 'None', ',', 'max_records', '=', '50', ')', ':', 'return', 'self', '.', 'connection', '.', 'get_all_activities', '(', 'self', ',', 'activity_ids', ',', 'max_records', ')']
Get all activies for this group.
['Get', 'all', 'activies', 'for', 'this', 'group', '.']
train
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/autoscale/group.py#L266-L271
6,999
dwavesystems/dimod
dimod/binary_quadratic_model.py
BinaryQuadraticModel.from_pandas_dataframe
def from_pandas_dataframe(cls, bqm_df, offset=0.0, interactions=None): """Create a binary quadratic model from a QUBO model formatted as a pandas DataFrame. Args: bqm_df (:class:`pandas.DataFrame`): Quadratic unconstrained binary optimization (QUBO) model formatted as a pandas DataFrame. Row and column indices label the QUBO variables; values are QUBO coefficients. offset (optional, default=0.0): Constant offset for the binary quadratic model. interactions (iterable, optional, default=[]): Any additional 0.0-bias interactions to be added to the binary quadratic model. Returns: :class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to :class:`vartype.BINARY`. Examples: This example creates a binary quadratic model from a QUBO in pandas DataFrame format while adding an interaction and setting a constant offset. >>> import dimod >>> import pandas as pd >>> pd_qubo = pd.DataFrame(data={0: [-1, 0], 1: [2, -1]}) >>> pd_qubo 0 1 0 -1 2 1 0 -1 >>> model = dimod.BinaryQuadraticModel.from_pandas_dataframe(pd_qubo, ... offset = 2.5, ... interactions = {(0,2), (1,2)}) >>> model.linear # doctest: +SKIP {0: -1, 1: -1.0, 2: 0.0} >>> model.quadratic # doctest: +SKIP {(0, 1): 2, (0, 2): 0.0, (1, 2): 0.0} >>> model.offset 2.5 >>> model.vartype <Vartype.BINARY: frozenset({0, 1})> """ if interactions is None: interactions = [] bqm = cls({}, {}, offset, Vartype.BINARY) for u, row in bqm_df.iterrows(): for v, bias in row.iteritems(): if u == v: bqm.add_variable(u, bias) elif bias: bqm.add_interaction(u, v, bias) for u, v in interactions: bqm.add_interaction(u, v, 0.0) return bqm
python
def from_pandas_dataframe(cls, bqm_df, offset=0.0, interactions=None): """Create a binary quadratic model from a QUBO model formatted as a pandas DataFrame. Args: bqm_df (:class:`pandas.DataFrame`): Quadratic unconstrained binary optimization (QUBO) model formatted as a pandas DataFrame. Row and column indices label the QUBO variables; values are QUBO coefficients. offset (optional, default=0.0): Constant offset for the binary quadratic model. interactions (iterable, optional, default=[]): Any additional 0.0-bias interactions to be added to the binary quadratic model. Returns: :class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to :class:`vartype.BINARY`. Examples: This example creates a binary quadratic model from a QUBO in pandas DataFrame format while adding an interaction and setting a constant offset. >>> import dimod >>> import pandas as pd >>> pd_qubo = pd.DataFrame(data={0: [-1, 0], 1: [2, -1]}) >>> pd_qubo 0 1 0 -1 2 1 0 -1 >>> model = dimod.BinaryQuadraticModel.from_pandas_dataframe(pd_qubo, ... offset = 2.5, ... interactions = {(0,2), (1,2)}) >>> model.linear # doctest: +SKIP {0: -1, 1: -1.0, 2: 0.0} >>> model.quadratic # doctest: +SKIP {(0, 1): 2, (0, 2): 0.0, (1, 2): 0.0} >>> model.offset 2.5 >>> model.vartype <Vartype.BINARY: frozenset({0, 1})> """ if interactions is None: interactions = [] bqm = cls({}, {}, offset, Vartype.BINARY) for u, row in bqm_df.iterrows(): for v, bias in row.iteritems(): if u == v: bqm.add_variable(u, bias) elif bias: bqm.add_interaction(u, v, bias) for u, v in interactions: bqm.add_interaction(u, v, 0.0) return bqm
['def', 'from_pandas_dataframe', '(', 'cls', ',', 'bqm_df', ',', 'offset', '=', '0.0', ',', 'interactions', '=', 'None', ')', ':', 'if', 'interactions', 'is', 'None', ':', 'interactions', '=', '[', ']', 'bqm', '=', 'cls', '(', '{', '}', ',', '{', '}', ',', 'offset', ',', 'Vartype', '.', 'BINARY', ')', 'for', 'u', ',', 'row', 'in', 'bqm_df', '.', 'iterrows', '(', ')', ':', 'for', 'v', ',', 'bias', 'in', 'row', '.', 'iteritems', '(', ')', ':', 'if', 'u', '==', 'v', ':', 'bqm', '.', 'add_variable', '(', 'u', ',', 'bias', ')', 'elif', 'bias', ':', 'bqm', '.', 'add_interaction', '(', 'u', ',', 'v', ',', 'bias', ')', 'for', 'u', ',', 'v', 'in', 'interactions', ':', 'bqm', '.', 'add_interaction', '(', 'u', ',', 'v', ',', '0.0', ')', 'return', 'bqm']
Create a binary quadratic model from a QUBO model formatted as a pandas DataFrame. Args: bqm_df (:class:`pandas.DataFrame`): Quadratic unconstrained binary optimization (QUBO) model formatted as a pandas DataFrame. Row and column indices label the QUBO variables; values are QUBO coefficients. offset (optional, default=0.0): Constant offset for the binary quadratic model. interactions (iterable, optional, default=[]): Any additional 0.0-bias interactions to be added to the binary quadratic model. Returns: :class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to :class:`vartype.BINARY`. Examples: This example creates a binary quadratic model from a QUBO in pandas DataFrame format while adding an interaction and setting a constant offset. >>> import dimod >>> import pandas as pd >>> pd_qubo = pd.DataFrame(data={0: [-1, 0], 1: [2, -1]}) >>> pd_qubo 0 1 0 -1 2 1 0 -1 >>> model = dimod.BinaryQuadraticModel.from_pandas_dataframe(pd_qubo, ... offset = 2.5, ... interactions = {(0,2), (1,2)}) >>> model.linear # doctest: +SKIP {0: -1, 1: -1.0, 2: 0.0} >>> model.quadratic # doctest: +SKIP {(0, 1): 2, (0, 2): 0.0, (1, 2): 0.0} >>> model.offset 2.5 >>> model.vartype <Vartype.BINARY: frozenset({0, 1})>
['Create', 'a', 'binary', 'quadratic', 'model', 'from', 'a', 'QUBO', 'model', 'formatted', 'as', 'a', 'pandas', 'DataFrame', '.']
train
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L2456-L2514