body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def is_revoked(events, token_data): 'Check if a token matches a revocation event.\n\n Compare a token against every revocation event. If the token matches an\n event in the `events` list, the token is revoked. If the token is compared\n against every item in the list without a match, it is not considered\n revoked from the `revoke_api`.\n\n :param events: a list of RevokeEvent instances\n :param token_data: map based on a flattened view of the token. The required\n fields are `expires_at`,`user_id`, `project_id`,\n `identity_domain_id`, `assignment_domain_id`,\n `trust_id`, `trustor_id`, `trustee_id` `consumer_id` and\n `access_token_id`\n :returns: True if the token matches an existing revocation event, meaning\n the token is revoked. False is returned if the token does not\n match any revocation events, meaning the token is considered\n valid by the revocation API.\n ' return any([matches(e, token_data) for e in events])
-2,733,786,438,827,986,000
Check if a token matches a revocation event. Compare a token against every revocation event. If the token matches an event in the `events` list, the token is revoked. If the token is compared against every item in the list without a match, it is not considered revoked from the `revoke_api`. :param events: a list of RevokeEvent instances :param token_data: map based on a flattened view of the token. The required fields are `expires_at`,`user_id`, `project_id`, `identity_domain_id`, `assignment_domain_id`, `trust_id`, `trustor_id`, `trustee_id` `consumer_id` and `access_token_id` :returns: True if the token matches an existing revocation event, meaning the token is revoked. False is returned if the token does not match any revocation events, meaning the token is considered valid by the revocation API.
keystone/models/revoke_model.py
is_revoked
ISCAS-VDI/keystone
python
def is_revoked(events, token_data): 'Check if a token matches a revocation event.\n\n Compare a token against every revocation event. If the token matches an\n event in the `events` list, the token is revoked. If the token is compared\n against every item in the list without a match, it is not considered\n revoked from the `revoke_api`.\n\n :param events: a list of RevokeEvent instances\n :param token_data: map based on a flattened view of the token. The required\n fields are `expires_at`,`user_id`, `project_id`,\n `identity_domain_id`, `assignment_domain_id`,\n `trust_id`, `trustor_id`, `trustee_id` `consumer_id` and\n `access_token_id`\n :returns: True if the token matches an existing revocation event, meaning\n the token is revoked. False is returned if the token does not\n match any revocation events, meaning the token is considered\n valid by the revocation API.\n ' return any([matches(e, token_data) for e in events])
def matches(event, token_values): 'See if the token matches the revocation event.\n\n A brute force approach to checking.\n Compare each attribute from the event with the corresponding\n value from the token. If the event does not have a value for\n the attribute, a match is still possible. If the event has a\n value for the attribute, and it does not match the token, no match\n is possible, so skip the remaining checks.\n\n :param event: a RevokeEvent instance\n :param token_values: dictionary with set of values taken from the\n token\n :returns: True if the token matches the revocation event, indicating the\n token has been revoked\n ' if (event.user_id is not None): if all(((event.user_id != token_values[attribute_name]) for attribute_name in ['user_id', 'trustor_id', 'trustee_id'])): return False if (event.domain_id is not None): if all(((event.domain_id != token_values[attribute_name]) for attribute_name in ['identity_domain_id', 'assignment_domain_id'])): return False if (event.domain_scope_id is not None): if (event.domain_scope_id != token_values['assignment_domain_id']): return False attribute_names = ['project_id', 'expires_at', 'trust_id', 'consumer_id', 'access_token_id', 'audit_id', 'audit_chain_id'] for attribute_name in attribute_names: if (getattr(event, attribute_name) is not None): if (getattr(event, attribute_name) != token_values[attribute_name]): return False if (event.role_id is not None): roles = token_values['roles'] if all(((event.role_id != role) for role in roles)): return False if (token_values['issued_at'] > event.issued_before): return False return True
8,244,953,039,747,746,000
See if the token matches the revocation event. A brute force approach to checking. Compare each attribute from the event with the corresponding value from the token. If the event does not have a value for the attribute, a match is still possible. If the event has a value for the attribute, and it does not match the token, no match is possible, so skip the remaining checks. :param event: a RevokeEvent instance :param token_values: dictionary with set of values taken from the token :returns: True if the token matches the revocation event, indicating the token has been revoked
keystone/models/revoke_model.py
matches
ISCAS-VDI/keystone
python
def matches(event, token_values): 'See if the token matches the revocation event.\n\n A brute force approach to checking.\n Compare each attribute from the event with the corresponding\n value from the token. If the event does not have a value for\n the attribute, a match is still possible. If the event has a\n value for the attribute, and it does not match the token, no match\n is possible, so skip the remaining checks.\n\n :param event: a RevokeEvent instance\n :param token_values: dictionary with set of values taken from the\n token\n :returns: True if the token matches the revocation event, indicating the\n token has been revoked\n ' if (event.user_id is not None): if all(((event.user_id != token_values[attribute_name]) for attribute_name in ['user_id', 'trustor_id', 'trustee_id'])): return False if (event.domain_id is not None): if all(((event.domain_id != token_values[attribute_name]) for attribute_name in ['identity_domain_id', 'assignment_domain_id'])): return False if (event.domain_scope_id is not None): if (event.domain_scope_id != token_values['assignment_domain_id']): return False attribute_names = ['project_id', 'expires_at', 'trust_id', 'consumer_id', 'access_token_id', 'audit_id', 'audit_chain_id'] for attribute_name in attribute_names: if (getattr(event, attribute_name) is not None): if (getattr(event, attribute_name) != token_values[attribute_name]): return False if (event.role_id is not None): roles = token_values['roles'] if all(((event.role_id != role) for role in roles)): return False if (token_values['issued_at'] > event.issued_before): return False return True
def connect(**kwargs): 'Opens a new connection to a Vertica database.' return Connection(kwargs)
-8,169,109,808,066,380,000
Opens a new connection to a Vertica database.
vertica_python/vertica/connection.py
connect
uber/vertica-python
python
def connect(**kwargs): return Connection(kwargs)
def parse_dsn(dsn): 'Parse connection string into a dictionary of keywords and values.\n Connection string format:\n vertica://<user>:<password>@<host>:<port>/<database>?k1=v1&k2=v2&...\n ' url = urlparse(dsn) if (url.scheme != 'vertica'): raise ValueError('Only vertica:// scheme is supported.') result = {k: v for (k, v) in (('host', url.hostname), ('port', url.port), ('user', url.username), ('password', url.password), ('database', url.path[1:])) if v} for (key, values) in parse_qs(url.query, keep_blank_values=True).items(): for i in reversed(range(len(values))): value = values[i] if (value != ''): break if ((value == '') and (key != 'log_path')): continue elif (key == 'backup_server_node'): continue elif (key in ('connection_load_balance', 'use_prepared_statements', 'disable_copy_local', 'ssl', 'autocommit')): lower = value.lower() if (lower in ('true', 'on', '1')): result[key] = True elif (lower in ('false', 'off', '0')): result[key] = False elif (key == 'connection_timeout'): result[key] = float(value) elif ((key == 'log_level') and value.isdigit()): result[key] = int(value) else: result[key] = value return result
-8,685,461,749,772,006,000
Parse connection string into a dictionary of keywords and values. Connection string format: vertica://<user>:<password>@<host>:<port>/<database>?k1=v1&k2=v2&...
vertica_python/vertica/connection.py
parse_dsn
uber/vertica-python
python
def parse_dsn(dsn): 'Parse connection string into a dictionary of keywords and values.\n Connection string format:\n vertica://<user>:<password>@<host>:<port>/<database>?k1=v1&k2=v2&...\n ' url = urlparse(dsn) if (url.scheme != 'vertica'): raise ValueError('Only vertica:// scheme is supported.') result = {k: v for (k, v) in (('host', url.hostname), ('port', url.port), ('user', url.username), ('password', url.password), ('database', url.path[1:])) if v} for (key, values) in parse_qs(url.query, keep_blank_values=True).items(): for i in reversed(range(len(values))): value = values[i] if (value != ): break if ((value == ) and (key != 'log_path')): continue elif (key == 'backup_server_node'): continue elif (key in ('connection_load_balance', 'use_prepared_statements', 'disable_copy_local', 'ssl', 'autocommit')): lower = value.lower() if (lower in ('true', 'on', '1')): result[key] = True elif (lower in ('false', 'off', '0')): result[key] = False elif (key == 'connection_timeout'): result[key] = float(value) elif ((key == 'log_level') and value.isdigit()): result[key] = int(value) else: result[key] = value return result
def __init__(self, host, port, backup_nodes, logger): 'Creates a new deque with the primary host first, followed by any backup hosts' self._logger = logger self.address_deque = deque() self._append(host, port) if (not isinstance(backup_nodes, list)): err_msg = 'Connection option "backup_server_node" must be a list' self._logger.error(err_msg) raise TypeError(err_msg) for node in backup_nodes: if isinstance(node, string_types): self._append(node, DEFAULT_PORT) elif (isinstance(node, tuple) and (len(node) == 2)): self._append(node[0], node[1]) else: err_msg = 'Each item of connection option "backup_server_node" must be a host string or a (host, port) tuple' self._logger.error(err_msg) raise TypeError(err_msg) self._logger.debug('Address list: {0}'.format(list(self.address_deque)))
4,716,196,150,234,105,000
Creates a new deque with the primary host first, followed by any backup hosts
vertica_python/vertica/connection.py
__init__
uber/vertica-python
python
def __init__(self, host, port, backup_nodes, logger): self._logger = logger self.address_deque = deque() self._append(host, port) if (not isinstance(backup_nodes, list)): err_msg = 'Connection option "backup_server_node" must be a list' self._logger.error(err_msg) raise TypeError(err_msg) for node in backup_nodes: if isinstance(node, string_types): self._append(node, DEFAULT_PORT) elif (isinstance(node, tuple) and (len(node) == 2)): self._append(node[0], node[1]) else: err_msg = 'Each item of connection option "backup_server_node" must be a host string or a (host, port) tuple' self._logger.error(err_msg) raise TypeError(err_msg) self._logger.debug('Address list: {0}'.format(list(self.address_deque)))
@property def autocommit(self): "Read the connection's AUTOCOMMIT setting from cache" return (self.parameters.get('auto_commit', 'off') == 'on')
-9,071,740,125,276,144,000
Read the connection's AUTOCOMMIT setting from cache
vertica_python/vertica/connection.py
autocommit
uber/vertica-python
python
@property def autocommit(self): return (self.parameters.get('auto_commit', 'off') == 'on')
@autocommit.setter def autocommit(self, value): "Change the connection's AUTOCOMMIT setting" if (self.autocommit is value): return val = ('on' if value else 'off') cur = self.cursor() cur.execute('SET SESSION AUTOCOMMIT TO {}'.format(val), use_prepared_statements=False) cur.fetchall()
1,149,883,264,255,221,600
Change the connection's AUTOCOMMIT setting
vertica_python/vertica/connection.py
autocommit
uber/vertica-python
python
@autocommit.setter def autocommit(self, value): if (self.autocommit is value): return val = ('on' if value else 'off') cur = self.cursor() cur.execute('SET SESSION AUTOCOMMIT TO {}'.format(val), use_prepared_statements=False) cur.fetchall()
def cancel(self): 'Cancel the current database operation. This can be called from a\n different thread than the one currently executing a database operation.\n ' if self.closed(): raise errors.ConnectionError('Connection is closed') self._logger.info('Canceling the current database operation') temp_socket = self.establish_socket_connection(self.address_list) self.write(CancelRequest(self.backend_pid, self.backend_key), temp_socket) temp_socket.close() self._logger.info('Cancel request issued')
-2,056,104,082,724,521,700
Cancel the current database operation. This can be called from a different thread than the one currently executing a database operation.
vertica_python/vertica/connection.py
cancel
uber/vertica-python
python
def cancel(self): 'Cancel the current database operation. This can be called from a\n different thread than the one currently executing a database operation.\n ' if self.closed(): raise errors.ConnectionError('Connection is closed') self._logger.info('Canceling the current database operation') temp_socket = self.establish_socket_connection(self.address_list) self.write(CancelRequest(self.backend_pid, self.backend_key), temp_socket) temp_socket.close() self._logger.info('Cancel request issued')
def create_socket(self, family): 'Create a TCP socket object' raw_socket = socket.socket(family, socket.SOCK_STREAM) raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) connection_timeout = self.options.get('connection_timeout') if (connection_timeout is not None): self._logger.debug('Set socket connection timeout: {0}'.format(connection_timeout)) raw_socket.settimeout(connection_timeout) return raw_socket
6,969,502,913,483,730,000
Create a TCP socket object
vertica_python/vertica/connection.py
create_socket
uber/vertica-python
python
def create_socket(self, family): raw_socket = socket.socket(family, socket.SOCK_STREAM) raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) connection_timeout = self.options.get('connection_timeout') if (connection_timeout is not None): self._logger.debug('Set socket connection timeout: {0}'.format(connection_timeout)) raw_socket.settimeout(connection_timeout) return raw_socket
def establish_socket_connection(self, address_list): 'Given a list of database node addresses, establish the socket\n connection to the database server. Return a connected socket object.\n ' addrinfo = address_list.peek() raw_socket = None last_exception = None while addrinfo: (family, socktype, proto, canonname, sockaddr) = addrinfo last_exception = None host = sockaddr[0] port = sockaddr[1] self._logger.info('Establishing connection to host "{0}" on port {1}'.format(host, port)) try: raw_socket = self.create_socket(family) raw_socket.connect(sockaddr) break except Exception as e: self._logger.info('Failed to connect to host "{0}" on port {1}: {2}'.format(host, port, e)) last_exception = e address_list.pop() addrinfo = address_list.peek() raw_socket.close() if ((raw_socket is None) or last_exception): err_msg = 'Failed to establish a connection to the primary server or any backup address.' self._logger.error(err_msg) raise errors.ConnectionError(err_msg) return raw_socket
3,240,512,285,882,664,000
Given a list of database node addresses, establish the socket connection to the database server. Return a connected socket object.
vertica_python/vertica/connection.py
establish_socket_connection
uber/vertica-python
python
def establish_socket_connection(self, address_list): 'Given a list of database node addresses, establish the socket\n connection to the database server. Return a connected socket object.\n ' addrinfo = address_list.peek() raw_socket = None last_exception = None while addrinfo: (family, socktype, proto, canonname, sockaddr) = addrinfo last_exception = None host = sockaddr[0] port = sockaddr[1] self._logger.info('Establishing connection to host "{0}" on port {1}'.format(host, port)) try: raw_socket = self.create_socket(family) raw_socket.connect(sockaddr) break except Exception as e: self._logger.info('Failed to connect to host "{0}" on port {1}: {2}'.format(host, port, e)) last_exception = e address_list.pop() addrinfo = address_list.peek() raw_socket.close() if ((raw_socket is None) or last_exception): err_msg = 'Failed to establish a connection to the primary server or any backup address.' self._logger.error(err_msg) raise errors.ConnectionError(err_msg) return raw_socket
def test_clone(self): 'Test cloning a Timestamp instance.' t1 = Timestamp.utcnow() t2 = t1.clone() self.assertEqual(t1, t2) self.assertIsInstance(t2, Timestamp)
-3,574,128,908,683,884,500
Test cloning a Timestamp instance.
tests/timestamp_tests.py
test_clone
LeesahMasko/piwikibot
python
def test_clone(self): t1 = Timestamp.utcnow() t2 = t1.clone() self.assertEqual(t1, t2) self.assertIsInstance(t2, Timestamp)
def test_instantiate_from_instance(self): 'Test passing instance to factory methods works.' t1 = Timestamp.utcnow() self.assertIsNot(t1, Timestamp.fromISOformat(t1)) self.assertEqual(t1, Timestamp.fromISOformat(t1)) self.assertIsInstance(Timestamp.fromISOformat(t1), Timestamp) self.assertIsNot(t1, Timestamp.fromtimestampformat(t1)) self.assertEqual(t1, Timestamp.fromtimestampformat(t1)) self.assertIsInstance(Timestamp.fromtimestampformat(t1), Timestamp)
-5,481,197,439,969,814,000
Test passing instance to factory methods works.
tests/timestamp_tests.py
test_instantiate_from_instance
LeesahMasko/piwikibot
python
def test_instantiate_from_instance(self): t1 = Timestamp.utcnow() self.assertIsNot(t1, Timestamp.fromISOformat(t1)) self.assertEqual(t1, Timestamp.fromISOformat(t1)) self.assertIsInstance(Timestamp.fromISOformat(t1), Timestamp) self.assertIsNot(t1, Timestamp.fromtimestampformat(t1)) self.assertEqual(t1, Timestamp.fromtimestampformat(t1)) self.assertIsInstance(Timestamp.fromtimestampformat(t1), Timestamp)
def test_iso_format(self): 'Test conversion from and to ISO format.' sep = 'T' t1 = Timestamp.utcnow() if (not t1.microsecond): t1 = t1.replace(microsecond=1) ts1 = t1.isoformat() t2 = Timestamp.fromISOformat(ts1) ts2 = t2.isoformat() self.assertNotEqual(t1, t2) t1 = t1.replace(microsecond=0) self.assertEqual(t1, t2) self.assertEqual(ts1, ts2) (date, sep, time) = ts1.partition(sep) time = time.rstrip('Z') self.assertEqual(date, str(t1.date())) self.assertEqual(time, str(t1.time()))
-6,779,347,275,526,204,000
Test conversion from and to ISO format.
tests/timestamp_tests.py
test_iso_format
LeesahMasko/piwikibot
python
def test_iso_format(self): sep = 'T' t1 = Timestamp.utcnow() if (not t1.microsecond): t1 = t1.replace(microsecond=1) ts1 = t1.isoformat() t2 = Timestamp.fromISOformat(ts1) ts2 = t2.isoformat() self.assertNotEqual(t1, t2) t1 = t1.replace(microsecond=0) self.assertEqual(t1, t2) self.assertEqual(ts1, ts2) (date, sep, time) = ts1.partition(sep) time = time.rstrip('Z') self.assertEqual(date, str(t1.date())) self.assertEqual(time, str(t1.time()))
def test_iso_format_with_sep(self): 'Test conversion from and to ISO format with separator.' sep = '*' t1 = Timestamp.utcnow().replace(microsecond=0) ts1 = t1.isoformat(sep=sep) t2 = Timestamp.fromISOformat(ts1, sep=sep) ts2 = t2.isoformat(sep=sep) self.assertEqual(t1, t2) self.assertEqual(t1, t2) self.assertEqual(ts1, ts2) (date, sep, time) = ts1.partition(sep) time = time.rstrip('Z') self.assertEqual(date, str(t1.date())) self.assertEqual(time, str(t1.time()))
5,182,066,370,592,766,000
Test conversion from and to ISO format with separator.
tests/timestamp_tests.py
test_iso_format_with_sep
LeesahMasko/piwikibot
python
def test_iso_format_with_sep(self): sep = '*' t1 = Timestamp.utcnow().replace(microsecond=0) ts1 = t1.isoformat(sep=sep) t2 = Timestamp.fromISOformat(ts1, sep=sep) ts2 = t2.isoformat(sep=sep) self.assertEqual(t1, t2) self.assertEqual(t1, t2) self.assertEqual(ts1, ts2) (date, sep, time) = ts1.partition(sep) time = time.rstrip('Z') self.assertEqual(date, str(t1.date())) self.assertEqual(time, str(t1.time()))
def test_iso_format_property(self): 'Test iso format properties.' self.assertEqual(Timestamp.ISO8601Format, Timestamp._ISO8601Format()) self.assertEqual(re.sub('[\\-:TZ]', '', Timestamp.ISO8601Format), Timestamp.mediawikiTSFormat)
-4,886,496,288,491,828,000
Test iso format properties.
tests/timestamp_tests.py
test_iso_format_property
LeesahMasko/piwikibot
python
def test_iso_format_property(self): self.assertEqual(Timestamp.ISO8601Format, Timestamp._ISO8601Format()) self.assertEqual(re.sub('[\\-:TZ]', , Timestamp.ISO8601Format), Timestamp.mediawikiTSFormat)
def test_mediawiki_format(self): 'Test conversion from and to Timestamp format.' t1 = Timestamp.utcnow() if (not t1.microsecond): t1 = t1.replace(microsecond=1000) ts1 = t1.totimestampformat() t2 = Timestamp.fromtimestampformat(ts1) ts2 = t2.totimestampformat() self.assertNotEqual(t1, t2) t1 = t1.replace(microsecond=0) self.assertEqual(t1, t2) self.assertEqual(ts1, ts2)
8,077,372,880,063,929,000
Test conversion from and to Timestamp format.
tests/timestamp_tests.py
test_mediawiki_format
LeesahMasko/piwikibot
python
def test_mediawiki_format(self): t1 = Timestamp.utcnow() if (not t1.microsecond): t1 = t1.replace(microsecond=1000) ts1 = t1.totimestampformat() t2 = Timestamp.fromtimestampformat(ts1) ts2 = t2.totimestampformat() self.assertNotEqual(t1, t2) t1 = t1.replace(microsecond=0) self.assertEqual(t1, t2) self.assertEqual(ts1, ts2)
def test_short_mediawiki_format(self): 'Test short mw timestamp conversion from and to Timestamp format.' t1 = Timestamp(2018, 12, 17) t2 = Timestamp.fromtimestampformat('20181217') ts1 = t1.totimestampformat() ts2 = t2.totimestampformat() self.assertEqual(t1, t2) self.assertEqual(ts1, ts2)
-2,261,167,477,131,814,400
Test short mw timestamp conversion from and to Timestamp format.
tests/timestamp_tests.py
test_short_mediawiki_format
LeesahMasko/piwikibot
python
def test_short_mediawiki_format(self): t1 = Timestamp(2018, 12, 17) t2 = Timestamp.fromtimestampformat('20181217') ts1 = t1.totimestampformat() ts2 = t2.totimestampformat() self.assertEqual(t1, t2) self.assertEqual(ts1, ts2)
def test_add_timedelta(self): 'Test addin a timedelta to a Timestamp.' t1 = Timestamp.utcnow() t2 = (t1 + datetime.timedelta(days=1)) if (t1.month != t2.month): self.assertEqual(1, t2.day) else: self.assertEqual((t1.day + 1), t2.day) self.assertIsInstance(t2, Timestamp)
-8,972,745,366,684,781,000
Test addin a timedelta to a Timestamp.
tests/timestamp_tests.py
test_add_timedelta
LeesahMasko/piwikibot
python
def test_add_timedelta(self): t1 = Timestamp.utcnow() t2 = (t1 + datetime.timedelta(days=1)) if (t1.month != t2.month): self.assertEqual(1, t2.day) else: self.assertEqual((t1.day + 1), t2.day) self.assertIsInstance(t2, Timestamp)
def test_add_timedate(self): 'Test unsupported additions raise NotImplemented.' t1 = datetime.datetime.utcnow() t2 = (t1 + datetime.timedelta(days=1)) t3 = t1.__add__(t2) self.assertIs(t3, NotImplemented) t1 = Timestamp.utcnow() t2 = (t1 + datetime.timedelta(days=1)) t3 = t1.__add__(t2) self.assertIs(t3, NotImplemented)
9,136,707,267,426,688,000
Test unsupported additions raise NotImplemented.
tests/timestamp_tests.py
test_add_timedate
LeesahMasko/piwikibot
python
def test_add_timedate(self): t1 = datetime.datetime.utcnow() t2 = (t1 + datetime.timedelta(days=1)) t3 = t1.__add__(t2) self.assertIs(t3, NotImplemented) t1 = Timestamp.utcnow() t2 = (t1 + datetime.timedelta(days=1)) t3 = t1.__add__(t2) self.assertIs(t3, NotImplemented)
def test_sub_timedelta(self): 'Test subtracting a timedelta from a Timestamp.' t1 = Timestamp.utcnow() t2 = (t1 - datetime.timedelta(days=1)) if (t1.month != t2.month): self.assertEqual(calendar.monthrange(t2.year, t2.month)[1], t2.day) else: self.assertEqual((t1.day - 1), t2.day) self.assertIsInstance(t2, Timestamp)
3,450,087,203,098,203,600
Test subtracting a timedelta from a Timestamp.
tests/timestamp_tests.py
test_sub_timedelta
LeesahMasko/piwikibot
python
def test_sub_timedelta(self): t1 = Timestamp.utcnow() t2 = (t1 - datetime.timedelta(days=1)) if (t1.month != t2.month): self.assertEqual(calendar.monthrange(t2.year, t2.month)[1], t2.day) else: self.assertEqual((t1.day - 1), t2.day) self.assertIsInstance(t2, Timestamp)
def test_sub_timedate(self): 'Test subtracting two timestamps.' t1 = Timestamp.utcnow() t2 = (t1 - datetime.timedelta(days=1)) td = (t1 - t2) self.assertIsInstance(td, datetime.timedelta) self.assertEqual((t2 + td), t1)
-7,567,995,634,436,964,000
Test subtracting two timestamps.
tests/timestamp_tests.py
test_sub_timedate
LeesahMasko/piwikibot
python
def test_sub_timedate(self): t1 = Timestamp.utcnow() t2 = (t1 - datetime.timedelta(days=1)) td = (t1 - t2) self.assertIsInstance(td, datetime.timedelta) self.assertEqual((t2 + td), t1)
def __init__(self): '\n initialize your data structure here.\n ' self.stack1 = [] self.stack2 = []
7,825,434,695,264,292,000
initialize your data structure here.
LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/155_Min_Stack.py
__init__
Sycamore-City-passerby/ML
python
def __init__(self): '\n \n ' self.stack1 = [] self.stack2 = []
def push(self, x): '\n :type x: int\n :rtype: void\n ' self.stack1.append(x) if ((len(self.stack2) == 0) or (x <= self.stack2[(- 1)])): self.stack2.append(x)
-6,401,598,814,599,230,000
:type x: int :rtype: void
LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/155_Min_Stack.py
push
Sycamore-City-passerby/ML
python
def push(self, x): '\n :type x: int\n :rtype: void\n ' self.stack1.append(x) if ((len(self.stack2) == 0) or (x <= self.stack2[(- 1)])): self.stack2.append(x)
def pop(self): '\n :rtype: void\n ' top = self.stack1[(- 1)] self.stack1.pop() if (top == self.stack2[(- 1)]): self.stack2.pop()
-8,236,398,851,309,514,000
:rtype: void
LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/155_Min_Stack.py
pop
Sycamore-City-passerby/ML
python
def pop(self): '\n \n ' top = self.stack1[(- 1)] self.stack1.pop() if (top == self.stack2[(- 1)]): self.stack2.pop()
def top(self): '\n :rtype: int\n ' return self.stack1[(- 1)]
260,721,099,785,025,470
:rtype: int
LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/155_Min_Stack.py
top
Sycamore-City-passerby/ML
python
def top(self): '\n \n ' return self.stack1[(- 1)]
def getMin(self): '\n :rtype: int\n ' return self.stack2[(- 1)]
-8,238,747,038,127,991,000
:rtype: int
LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/155_Min_Stack.py
getMin
Sycamore-City-passerby/ML
python
def getMin(self): '\n \n ' return self.stack2[(- 1)]
def get_drop_connect_rate(init_rate, block_num, total_blocks): 'Get drop connect rate for the ith block.' if (init_rate is not None): return ((init_rate * float(block_num)) / total_blocks) else: return None
4,564,176,001,601,319,000
Get drop connect rate for the ith block.
models/official/detection/modeling/architecture/resnet.py
get_drop_connect_rate
hoangphucITJP/tpu
python
def get_drop_connect_rate(init_rate, block_num, total_blocks): if (init_rate is not None): return ((init_rate * float(block_num)) / total_blocks) else: return None
def block_group(inputs, filters, strides, use_projection, block_fn, block_repeats, batch_norm_relu=nn_ops.BatchNormRelu(), dropblock=nn_ops.Dropblock(), drop_connect_rate=None, data_format='channels_last', name=None, is_training=False): "Builds one group of blocks.\n\n Args:\n inputs: a `Tensor` of size `[batch, channels, height, width]`.\n filters: an `int` number of filters for the first two convolutions.\n strides: an `int` block stride. If greater than 1, this block will\n ultimately downsample the input.\n use_projection: a `bool` for whether this block should use a projection\n shortcut (versus the default identity shortcut). This is usually `True`\n for the first block of a block group, which may change the number of\n filters and the resolution.\n block_fn: the `function` for the block to use within the model\n block_repeats: an `int` number of blocks to repeat in the group.\n batch_norm_relu: an operation that is added after convolutions, including a\n batch norm layer and an optional relu activation.\n dropblock: a drop block layer that is added after convluations. Note that\n the default implementation does not apply any drop block.\n drop_connect_rate: a 'float' number that specifies the drop connection rate\n of the block. Note that the default `None` means no drop connection is\n applied.\n data_format: a `str` that specifies the data format.\n name: a `str` name for the Tensor output of the block layer.\n is_training: a `bool` if True, the model is in training mode.\n\n Returns:\n The output `Tensor` of the block layer.\n " inputs = block_fn(inputs, filters, strides, use_projection=use_projection, batch_norm_relu=batch_norm_relu, dropblock=dropblock, drop_connect_rate=drop_connect_rate, data_format=data_format, is_training=is_training) for _ in range(1, block_repeats): inputs = block_fn(inputs, filters, 1, use_projection=False, batch_norm_relu=batch_norm_relu, dropblock=dropblock, drop_connect_rate=drop_connect_rate, data_format=data_format, is_training=is_training) return tf.identity(inputs, name)
3,270,517,964,845,363,700
Builds one group of blocks. Args: inputs: a `Tensor` of size `[batch, channels, height, width]`. filters: an `int` number of filters for the first two convolutions. strides: an `int` block stride. If greater than 1, this block will ultimately downsample the input. use_projection: a `bool` for whether this block should use a projection shortcut (versus the default identity shortcut). This is usually `True` for the first block of a block group, which may change the number of filters and the resolution. block_fn: the `function` for the block to use within the model block_repeats: an `int` number of blocks to repeat in the group. batch_norm_relu: an operation that is added after convolutions, including a batch norm layer and an optional relu activation. dropblock: a drop block layer that is added after convluations. Note that the default implementation does not apply any drop block. drop_connect_rate: a 'float' number that specifies the drop connection rate of the block. Note that the default `None` means no drop connection is applied. data_format: a `str` that specifies the data format. name: a `str` name for the Tensor output of the block layer. is_training: a `bool` if True, the model is in training mode. Returns: The output `Tensor` of the block layer.
models/official/detection/modeling/architecture/resnet.py
block_group
hoangphucITJP/tpu
python
def block_group(inputs, filters, strides, use_projection, block_fn, block_repeats, batch_norm_relu=nn_ops.BatchNormRelu(), dropblock=nn_ops.Dropblock(), drop_connect_rate=None, data_format='channels_last', name=None, is_training=False): "Builds one group of blocks.\n\n Args:\n inputs: a `Tensor` of size `[batch, channels, height, width]`.\n filters: an `int` number of filters for the first two convolutions.\n strides: an `int` block stride. If greater than 1, this block will\n ultimately downsample the input.\n use_projection: a `bool` for whether this block should use a projection\n shortcut (versus the default identity shortcut). This is usually `True`\n for the first block of a block group, which may change the number of\n filters and the resolution.\n block_fn: the `function` for the block to use within the model\n block_repeats: an `int` number of blocks to repeat in the group.\n batch_norm_relu: an operation that is added after convolutions, including a\n batch norm layer and an optional relu activation.\n dropblock: a drop block layer that is added after convluations. Note that\n the default implementation does not apply any drop block.\n drop_connect_rate: a 'float' number that specifies the drop connection rate\n of the block. Note that the default `None` means no drop connection is\n applied.\n data_format: a `str` that specifies the data format.\n name: a `str` name for the Tensor output of the block layer.\n is_training: a `bool` if True, the model is in training mode.\n\n Returns:\n The output `Tensor` of the block layer.\n " inputs = block_fn(inputs, filters, strides, use_projection=use_projection, batch_norm_relu=batch_norm_relu, dropblock=dropblock, drop_connect_rate=drop_connect_rate, data_format=data_format, is_training=is_training) for _ in range(1, block_repeats): inputs = block_fn(inputs, filters, 1, use_projection=False, batch_norm_relu=batch_norm_relu, dropblock=dropblock, drop_connect_rate=drop_connect_rate, data_format=data_format, is_training=is_training) return tf.identity(inputs, name)
def __init__(self, resnet_depth, dropblock=nn_ops.Dropblock(), batch_norm_relu=nn_ops.BatchNormRelu(), init_drop_connect_rate=None, data_format='channels_last'): 'ResNet initialization function.\n\n Args:\n resnet_depth: `int` depth of ResNet backbone model.\n dropblock: a dropblock layer.\n batch_norm_relu: an operation that includes a batch normalization layer\n followed by a relu layer(optional).\n init_drop_connect_rate: a \'float\' number that specifies the initial drop\n connection rate. Note that the default `None` means no drop connection\n is applied.\n data_format: `str` either "channels_first" for `[batch, channels, height,\n width]` or "channels_last for `[batch, height, width, channels]`.\n ' self._resnet_depth = resnet_depth self._dropblock = dropblock self._batch_norm_relu = batch_norm_relu self._init_drop_connect_rate = init_drop_connect_rate self._data_format = data_format model_params = {10: {'block': nn_blocks.residual_block, 'layers': [1, 1, 1, 1]}, 18: {'block': nn_blocks.residual_block, 'layers': [2, 2, 2, 2]}, 34: {'block': nn_blocks.residual_block, 'layers': [3, 4, 6, 3]}, 50: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 6, 3]}, 101: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 23, 3]}, 152: {'block': nn_blocks.bottleneck_block, 'layers': [3, 8, 36, 3]}, 200: {'block': nn_blocks.bottleneck_block, 'layers': [3, 24, 36, 3]}} if (resnet_depth not in model_params): valid_resnet_depths = ', '.join([str(depth) for depth in sorted(model_params.keys())]) raise ValueError(('The resnet_depth should be in [%s]. Not a valid resnet_depth:' % valid_resnet_depths), self._resnet_depth) params = model_params[resnet_depth] self._resnet_fn = self.resnet_v1_generator(params['block'], params['layers'])
-516,347,992,290,980,400
ResNet initialization function. Args: resnet_depth: `int` depth of ResNet backbone model. dropblock: a dropblock layer. batch_norm_relu: an operation that includes a batch normalization layer followed by a relu layer(optional). init_drop_connect_rate: a 'float' number that specifies the initial drop connection rate. Note that the default `None` means no drop connection is applied. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`.
models/official/detection/modeling/architecture/resnet.py
__init__
hoangphucITJP/tpu
python
def __init__(self, resnet_depth, dropblock=nn_ops.Dropblock(), batch_norm_relu=nn_ops.BatchNormRelu(), init_drop_connect_rate=None, data_format='channels_last'): 'ResNet initialization function.\n\n Args:\n resnet_depth: `int` depth of ResNet backbone model.\n dropblock: a dropblock layer.\n batch_norm_relu: an operation that includes a batch normalization layer\n followed by a relu layer(optional).\n init_drop_connect_rate: a \'float\' number that specifies the initial drop\n connection rate. Note that the default `None` means no drop connection\n is applied.\n data_format: `str` either "channels_first" for `[batch, channels, height,\n width]` or "channels_last for `[batch, height, width, channels]`.\n ' self._resnet_depth = resnet_depth self._dropblock = dropblock self._batch_norm_relu = batch_norm_relu self._init_drop_connect_rate = init_drop_connect_rate self._data_format = data_format model_params = {10: {'block': nn_blocks.residual_block, 'layers': [1, 1, 1, 1]}, 18: {'block': nn_blocks.residual_block, 'layers': [2, 2, 2, 2]}, 34: {'block': nn_blocks.residual_block, 'layers': [3, 4, 6, 3]}, 50: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 6, 3]}, 101: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 23, 3]}, 152: {'block': nn_blocks.bottleneck_block, 'layers': [3, 8, 36, 3]}, 200: {'block': nn_blocks.bottleneck_block, 'layers': [3, 24, 36, 3]}} if (resnet_depth not in model_params): valid_resnet_depths = ', '.join([str(depth) for depth in sorted(model_params.keys())]) raise ValueError(('The resnet_depth should be in [%s]. Not a valid resnet_depth:' % valid_resnet_depths), self._resnet_depth) params = model_params[resnet_depth] self._resnet_fn = self.resnet_v1_generator(params['block'], params['layers'])
def __call__(self, inputs, is_training=False): 'Returns the ResNet model for a given size and number of output classes.\n\n Args:\n inputs: a `Tesnor` with shape [batch_size, height, width, 3] representing\n a batch of images.\n is_training: `bool` if True, the model is in training mode.\n\n Returns:\n a `dict` containing `int` keys for continuous feature levels [2, 3, 4, 5].\n The values are corresponding feature hierarchy in ResNet with shape\n [batch_size, height_l, width_l, num_filters].\n ' with tf.variable_scope(('resnet%s' % self._resnet_depth)): return self._resnet_fn(inputs, is_training)
-7,815,563,324,686,673,000
Returns the ResNet model for a given size and number of output classes. Args: inputs: a `Tesnor` with shape [batch_size, height, width, 3] representing a batch of images. is_training: `bool` if True, the model is in training mode. Returns: a `dict` containing `int` keys for continuous feature levels [2, 3, 4, 5]. The values are corresponding feature hierarchy in ResNet with shape [batch_size, height_l, width_l, num_filters].
models/official/detection/modeling/architecture/resnet.py
__call__
hoangphucITJP/tpu
python
def __call__(self, inputs, is_training=False): 'Returns the ResNet model for a given size and number of output classes.\n\n Args:\n inputs: a `Tesnor` with shape [batch_size, height, width, 3] representing\n a batch of images.\n is_training: `bool` if True, the model is in training mode.\n\n Returns:\n a `dict` containing `int` keys for continuous feature levels [2, 3, 4, 5].\n The values are corresponding feature hierarchy in ResNet with shape\n [batch_size, height_l, width_l, num_filters].\n ' with tf.variable_scope(('resnet%s' % self._resnet_depth)): return self._resnet_fn(inputs, is_training)
def resnet_v1_generator(self, block_fn, layers): 'Generator for ResNet v1 models.\n\n Args:\n block_fn: `function` for the block to use within the model. Either\n `residual_block` or `bottleneck_block`.\n layers: list of 4 `int`s denoting the number of blocks to include in each\n of the 4 block groups. Each group consists of blocks that take inputs of\n the same resolution.\n\n Returns:\n Model `function` that takes in `inputs` and `is_training` and returns the\n output `Tensor` of the ResNet model.\n ' def model(inputs, is_training=False): 'Creation of the model graph.' inputs = nn_ops.conv2d_fixed_padding(inputs=inputs, filters=64, kernel_size=7, strides=2, data_format=self._data_format) inputs = tf.identity(inputs, 'initial_conv') inputs = self._batch_norm_relu(inputs, is_training=is_training) inputs = tf.layers.max_pooling2d(inputs=inputs, pool_size=3, strides=2, padding='SAME', data_format=self._data_format) inputs = tf.identity(inputs, 'initial_max_pool') c2 = block_group(inputs=inputs, filters=64, strides=1, use_projection=True, block_fn=block_fn, block_repeats=layers[0], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 2, 5), name='block_group1', is_training=is_training) c3 = block_group(inputs=c2, filters=128, strides=2, use_projection=True, block_fn=block_fn, block_repeats=layers[1], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 3, 5), name='block_group2', is_training=is_training) c4 = block_group(inputs=c3, filters=256, strides=2, use_projection=True, block_fn=block_fn, block_repeats=layers[2], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 4, 5), name='block_group3', is_training=is_training) c5 = block_group(inputs=c4, filters=512, strides=2, use_projection=True, block_fn=block_fn, block_repeats=layers[3], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 5, 5), name='block_group4', is_training=is_training) return {2: c2, 3: c3, 4: c4, 5: c5} return model
-2,155,756,164,103,213,800
Generator for ResNet v1 models. Args: block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layers: list of 4 `int`s denoting the number of blocks to include in each of the 4 block groups. Each group consists of blocks that take inputs of the same resolution. Returns: Model `function` that takes in `inputs` and `is_training` and returns the output `Tensor` of the ResNet model.
models/official/detection/modeling/architecture/resnet.py
resnet_v1_generator
hoangphucITJP/tpu
python
def resnet_v1_generator(self, block_fn, layers): 'Generator for ResNet v1 models.\n\n Args:\n block_fn: `function` for the block to use within the model. Either\n `residual_block` or `bottleneck_block`.\n layers: list of 4 `int`s denoting the number of blocks to include in each\n of the 4 block groups. Each group consists of blocks that take inputs of\n the same resolution.\n\n Returns:\n Model `function` that takes in `inputs` and `is_training` and returns the\n output `Tensor` of the ResNet model.\n ' def model(inputs, is_training=False): 'Creation of the model graph.' inputs = nn_ops.conv2d_fixed_padding(inputs=inputs, filters=64, kernel_size=7, strides=2, data_format=self._data_format) inputs = tf.identity(inputs, 'initial_conv') inputs = self._batch_norm_relu(inputs, is_training=is_training) inputs = tf.layers.max_pooling2d(inputs=inputs, pool_size=3, strides=2, padding='SAME', data_format=self._data_format) inputs = tf.identity(inputs, 'initial_max_pool') c2 = block_group(inputs=inputs, filters=64, strides=1, use_projection=True, block_fn=block_fn, block_repeats=layers[0], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 2, 5), name='block_group1', is_training=is_training) c3 = block_group(inputs=c2, filters=128, strides=2, use_projection=True, block_fn=block_fn, block_repeats=layers[1], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 3, 5), name='block_group2', is_training=is_training) c4 = block_group(inputs=c3, filters=256, strides=2, use_projection=True, block_fn=block_fn, block_repeats=layers[2], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 4, 5), name='block_group3', is_training=is_training) c5 = block_group(inputs=c4, filters=512, strides=2, use_projection=True, block_fn=block_fn, block_repeats=layers[3], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 5, 5), name='block_group4', is_training=is_training) return {2: c2, 3: c3, 4: c4, 5: c5} return model
def model(inputs, is_training=False): 'Creation of the model graph.' inputs = nn_ops.conv2d_fixed_padding(inputs=inputs, filters=64, kernel_size=7, strides=2, data_format=self._data_format) inputs = tf.identity(inputs, 'initial_conv') inputs = self._batch_norm_relu(inputs, is_training=is_training) inputs = tf.layers.max_pooling2d(inputs=inputs, pool_size=3, strides=2, padding='SAME', data_format=self._data_format) inputs = tf.identity(inputs, 'initial_max_pool') c2 = block_group(inputs=inputs, filters=64, strides=1, use_projection=True, block_fn=block_fn, block_repeats=layers[0], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 2, 5), name='block_group1', is_training=is_training) c3 = block_group(inputs=c2, filters=128, strides=2, use_projection=True, block_fn=block_fn, block_repeats=layers[1], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 3, 5), name='block_group2', is_training=is_training) c4 = block_group(inputs=c3, filters=256, strides=2, use_projection=True, block_fn=block_fn, block_repeats=layers[2], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 4, 5), name='block_group3', is_training=is_training) c5 = block_group(inputs=c4, filters=512, strides=2, use_projection=True, block_fn=block_fn, block_repeats=layers[3], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 5, 5), name='block_group4', is_training=is_training) return {2: c2, 3: c3, 4: c4, 5: c5}
2,307,457,148,709,569,500
Creation of the model graph.
models/official/detection/modeling/architecture/resnet.py
model
hoangphucITJP/tpu
python
def model(inputs, is_training=False): inputs = nn_ops.conv2d_fixed_padding(inputs=inputs, filters=64, kernel_size=7, strides=2, data_format=self._data_format) inputs = tf.identity(inputs, 'initial_conv') inputs = self._batch_norm_relu(inputs, is_training=is_training) inputs = tf.layers.max_pooling2d(inputs=inputs, pool_size=3, strides=2, padding='SAME', data_format=self._data_format) inputs = tf.identity(inputs, 'initial_max_pool') c2 = block_group(inputs=inputs, filters=64, strides=1, use_projection=True, block_fn=block_fn, block_repeats=layers[0], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 2, 5), name='block_group1', is_training=is_training) c3 = block_group(inputs=c2, filters=128, strides=2, use_projection=True, block_fn=block_fn, block_repeats=layers[1], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 3, 5), name='block_group2', is_training=is_training) c4 = block_group(inputs=c3, filters=256, strides=2, use_projection=True, block_fn=block_fn, block_repeats=layers[2], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 4, 5), name='block_group3', is_training=is_training) c5 = block_group(inputs=c4, filters=512, strides=2, use_projection=True, block_fn=block_fn, block_repeats=layers[3], batch_norm_relu=self._batch_norm_relu, dropblock=self._dropblock, drop_connect_rate=get_drop_connect_rate(self._init_drop_connect_rate, 5, 5), name='block_group4', is_training=is_training) return {2: c2, 3: c3, 4: c4, 5: c5}
def null_boolean_form_value(bool_value): '\n Return the value for a NullBooleanSelect wigit based on bool_value\n ' return {True: '2', False: '3', None: '1'}.get(bool_value)
3,008,597,734,390,281,000
Return the value for a NullBooleanSelect wigit based on bool_value
utils/utils.py
null_boolean_form_value
tperrier/mwachx
python
def null_boolean_form_value(bool_value): '\n \n ' return {True: '2', False: '3', None: '1'}.get(bool_value)
def null_boolean_from_form(form_value): '\n Return the boolean value based on a NullBooleanSelect form value\n ' return {'1': None, '2': True, '3': False}.get(form_value)
4,283,649,419,290,576,400
Return the boolean value based on a NullBooleanSelect form value
utils/utils.py
null_boolean_from_form
tperrier/mwachx
python
def null_boolean_from_form(form_value): '\n \n ' return {'1': None, '2': True, '3': False}.get(form_value)
def days_as_str(days): ' Return a short string version of days ' if ((- 7) <= days <= 7): return '{:d}d'.format(days) return '{:d}w'.format(int(round((days / 7.0))))
556,966,761,621,087,900
Return a short string version of days
utils/utils.py
days_as_str
tperrier/mwachx
python
def days_as_str(days): ' ' if ((- 7) <= days <= 7): return '{:d}d'.format(days) return '{:d}w'.format(int(round((days / 7.0))))
def sqlite_date_diff(start_date, end_date, days=False): ' return a DjanoORM Expression for the number of seconds/days between start_date and end_data ' scale = (86400 if (days is False) else 1) return db.ExpressionWrapper(((SQLiteDate(end_date) - SQLiteDate(start_date)) * scale), db.IntegerField())
3,487,949,514,431,362,600
return a DjanoORM Expression for the number of seconds/days between start_date and end_data
utils/utils.py
sqlite_date_diff
tperrier/mwachx
python
def sqlite_date_diff(start_date, end_date, days=False): ' ' scale = (86400 if (days is False) else 1) return db.ExpressionWrapper(((SQLiteDate(end_date) - SQLiteDate(start_date)) * scale), db.IntegerField())
def sql_count_when(*qargs, **kwargs): ' qargs : list of models.Q objects\n kwargs : filter_term=value dict\n ' condition = db.Q(**kwargs) for q in qargs: condition &= q return db.Count(db.Case(db.When(condition, then=1), output_field=db.IntegerField()))
686,636,396,861,094,700
qargs : list of models.Q objects kwargs : filter_term=value dict
utils/utils.py
sql_count_when
tperrier/mwachx
python
def sql_count_when(*qargs, **kwargs): ' qargs : list of models.Q objects\n kwargs : filter_term=value dict\n ' condition = db.Q(**kwargs) for q in qargs: condition &= q return db.Count(db.Case(db.When(condition, then=1), output_field=db.IntegerField()))
def make_policy(representation_dim: int, action_dim: int, distribution: str, hidden_dimensions: List[int], nonlinearity: str, num_components: Optional[int]=None, num_actions: Optional[int]=None, action_bound: Optional[float]=None, layernorm: bool=False, log_param_min: float=(- 5), log_param_max: float=2) -> Union[(DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy)]: 'Constructs a policy network from a given config.\n\n The following config keys need to be specified:\n - "representation_dim": int\n - "action_dim": int\n - "distribution": str\n - "num_components": int\n - "action_bound": float\n - "hidden_dimensions": List[int]\n - "nonlinearity": str\n - "layernorm": bool\n - "log_param_min": Optional[float]\n - "log_param_max": Optional[float]\n\n Parameters\n ----------\n representation_dim: int\n Dimensionality of the vector state space of the environment.\n action_dim: int\n Number of action dimensions in the environment.\n distribution: str\n Name of the policy distribution as string ["discrete", "beta", "normal"].\n hidden_dimensions: List[int]\n List specification of the MLP policy. Each int element in the list represents a hidden\n layer in the network with the respective number of neurons.\n nonlinearity: str\n Nonlinearity (activation function) used in the policy network.\n num_components: Optional[int] = None\n Number of components for mixture distributions.\n num_actions: Optional[int] = None\n Number of available actions. Used in the discrete policy.\n action_bound: Optional[float] = None\n Action bounds for the squashed normal or squashed GMM policy.\n layernorm: bool = False\n Use Layernorm in the policy network if set to True.\n log_param_min: float = -5\n Lower bound of the learned log parameters (standard deviation for Normal distributions).\n log_param_max: float = 2\n Upper bound of the learned log parameters.\n\n Returns\n -------\n Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]\n Policy network intance.\n ' distribution = _process_str(distribution) nonlinearity = _process_str(nonlinearity) if (distribution == 'discrete'): return DiscretePolicy(representation_dim=representation_dim, action_dim=action_dim, num_actions=cast(int, num_actions), hidden_dimensions=hidden_dimensions, nonlinearity=nonlinearity, layernorm=layernorm) elif (distribution == 'beta'): assert num_components return GeneralizedBetaPolicy(representation_dim=representation_dim, action_dim=action_dim, action_bound=cast(float, action_bound), hidden_dimensions=hidden_dimensions, nonlinearity=nonlinearity, layernorm=layernorm, log_param_min=log_param_min, log_param_max=log_param_max) else: assert num_components if (1 < num_components): return DiagonalGMMPolicy(representation_dim=representation_dim, action_dim=action_dim, num_components=num_components, action_bound=action_bound, hidden_dimensions=hidden_dimensions, nonlinearity=nonlinearity, layernorm=layernorm, log_param_min=log_param_min, log_param_max=log_param_max) else: return DiagonalNormalPolicy(representation_dim=representation_dim, action_dim=action_dim, action_bound=action_bound, hidden_dimensions=hidden_dimensions, nonlinearity=nonlinearity, layernorm=layernorm, log_param_min=log_param_min, log_param_max=log_param_max)
-2,696,701,415,997,946,400
Constructs a policy network from a given config. The following config keys need to be specified: - "representation_dim": int - "action_dim": int - "distribution": str - "num_components": int - "action_bound": float - "hidden_dimensions": List[int] - "nonlinearity": str - "layernorm": bool - "log_param_min": Optional[float] - "log_param_max": Optional[float] Parameters ---------- representation_dim: int Dimensionality of the vector state space of the environment. action_dim: int Number of action dimensions in the environment. distribution: str Name of the policy distribution as string ["discrete", "beta", "normal"]. hidden_dimensions: List[int] List specification of the MLP policy. Each int element in the list represents a hidden layer in the network with the respective number of neurons. nonlinearity: str Nonlinearity (activation function) used in the policy network. num_components: Optional[int] = None Number of components for mixture distributions. num_actions: Optional[int] = None Number of available actions. Used in the discrete policy. action_bound: Optional[float] = None Action bounds for the squashed normal or squashed GMM policy. layernorm: bool = False Use Layernorm in the policy network if set to True. log_param_min: float = -5 Lower bound of the learned log parameters (standard deviation for Normal distributions). log_param_max: float = 2 Upper bound of the learned log parameters. Returns ------- Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy] Policy network intance.
alphazero/network/policies.py
make_policy
timoklein/A0C
python
def make_policy(representation_dim: int, action_dim: int, distribution: str, hidden_dimensions: List[int], nonlinearity: str, num_components: Optional[int]=None, num_actions: Optional[int]=None, action_bound: Optional[float]=None, layernorm: bool=False, log_param_min: float=(- 5), log_param_max: float=2) -> Union[(DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy)]: 'Constructs a policy network from a given config.\n\n The following config keys need to be specified:\n - "representation_dim": int\n - "action_dim": int\n - "distribution": str\n - "num_components": int\n - "action_bound": float\n - "hidden_dimensions": List[int]\n - "nonlinearity": str\n - "layernorm": bool\n - "log_param_min": Optional[float]\n - "log_param_max": Optional[float]\n\n Parameters\n ----------\n representation_dim: int\n Dimensionality of the vector state space of the environment.\n action_dim: int\n Number of action dimensions in the environment.\n distribution: str\n Name of the policy distribution as string ["discrete", "beta", "normal"].\n hidden_dimensions: List[int]\n List specification of the MLP policy. Each int element in the list represents a hidden\n layer in the network with the respective number of neurons.\n nonlinearity: str\n Nonlinearity (activation function) used in the policy network.\n num_components: Optional[int] = None\n Number of components for mixture distributions.\n num_actions: Optional[int] = None\n Number of available actions. Used in the discrete policy.\n action_bound: Optional[float] = None\n Action bounds for the squashed normal or squashed GMM policy.\n layernorm: bool = False\n Use Layernorm in the policy network if set to True.\n log_param_min: float = -5\n Lower bound of the learned log parameters (standard deviation for Normal distributions).\n log_param_max: float = 2\n Upper bound of the learned log parameters.\n\n Returns\n -------\n Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]\n Policy network intance.\n ' distribution = _process_str(distribution) nonlinearity = _process_str(nonlinearity) if (distribution == 'discrete'): return DiscretePolicy(representation_dim=representation_dim, action_dim=action_dim, num_actions=cast(int, num_actions), hidden_dimensions=hidden_dimensions, nonlinearity=nonlinearity, layernorm=layernorm) elif (distribution == 'beta'): assert num_components return GeneralizedBetaPolicy(representation_dim=representation_dim, action_dim=action_dim, action_bound=cast(float, action_bound), hidden_dimensions=hidden_dimensions, nonlinearity=nonlinearity, layernorm=layernorm, log_param_min=log_param_min, log_param_max=log_param_max) else: assert num_components if (1 < num_components): return DiagonalGMMPolicy(representation_dim=representation_dim, action_dim=action_dim, num_components=num_components, action_bound=action_bound, hidden_dimensions=hidden_dimensions, nonlinearity=nonlinearity, layernorm=layernorm, log_param_min=log_param_min, log_param_max=log_param_max) else: return DiagonalNormalPolicy(representation_dim=representation_dim, action_dim=action_dim, action_bound=action_bound, hidden_dimensions=hidden_dimensions, nonlinearity=nonlinearity, layernorm=layernorm, log_param_min=log_param_min, log_param_max=log_param_max)
def __repr__(self) -> str: '\n Returns\n -------\n str\n String representation of this instance.\n ' components: int = getattr(self, 'num_components', 1) return f'class={type(self).__name__}, distribution={self.distribution_type}, components={components}, state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}'
7,866,934,010,203,949,000
Returns ------- str String representation of this instance.
alphazero/network/policies.py
__repr__
timoklein/A0C
python
def __repr__(self) -> str: '\n Returns\n -------\n str\n String representation of this instance.\n ' components: int = getattr(self, 'num_components', 1) return f'class={type(self).__name__}, distribution={self.distribution_type}, components={components}, state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}'
def __repr__(self) -> str: '\n Returns\n -------\n str\n String representation of this instance.\n ' return f'class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, state_dim={self.state_dim}, action_dim={self.action_dim}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}'
5,086,200,092,164,458,000
Returns ------- str String representation of this instance.
alphazero/network/policies.py
__repr__
timoklein/A0C
python
def __repr__(self) -> str: '\n Returns\n -------\n str\n String representation of this instance.\n ' return f'class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, state_dim={self.state_dim}, action_dim={self.action_dim}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}'
def _get_dist_params(self, x: torch.Tensor) -> Tuple[(torch.FloatTensor, torch.FloatTensor)]: 'Returns the learned paremters of the distribution.\n\n Parameters\n ----------\n x : torch.FloatTensor\n Input state tensor.\n\n Returns\n -------\n Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]\n Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).\n ' x = self.trunk(x) V_hat = self.value_head(x) pi_logits = self.dist_head(x) return (pi_logits, V_hat)
1,357,116,296,710,905,900
Returns the learned paremters of the distribution. Parameters ---------- x : torch.FloatTensor Input state tensor. Returns ------- Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor] Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
alphazero/network/policies.py
_get_dist_params
timoklein/A0C
python
def _get_dist_params(self, x: torch.Tensor) -> Tuple[(torch.FloatTensor, torch.FloatTensor)]: 'Returns the learned paremters of the distribution.\n\n Parameters\n ----------\n x : torch.FloatTensor\n Input state tensor.\n\n Returns\n -------\n Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]\n Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).\n ' x = self.trunk(x) V_hat = self.value_head(x) pi_logits = self.dist_head(x) return (pi_logits, V_hat)
def forward(self, x: torch.FloatTensor) -> Tuple[(D.Categorical, torch.FloatTensor)]: 'Forward pass of the model.\n\n Parameters\n ----------\n x : torch.FloatTensor\n Input state tensor.\n\n Returns\n -------\n Tuple[Normallike, torch.FloatTensor]\n Normal or squashed Normal distribution (dist), State value estimate (V_hat).\n ' (pi_logits, V_hat) = self._get_dist_params(x) dist = D.Categorical(logits=pi_logits) return (dist, V_hat)
-4,870,456,434,438,947,000
Forward pass of the model. Parameters ---------- x : torch.FloatTensor Input state tensor. Returns ------- Tuple[Normallike, torch.FloatTensor] Normal or squashed Normal distribution (dist), State value estimate (V_hat).
alphazero/network/policies.py
forward
timoklein/A0C
python
def forward(self, x: torch.FloatTensor) -> Tuple[(D.Categorical, torch.FloatTensor)]: 'Forward pass of the model.\n\n Parameters\n ----------\n x : torch.FloatTensor\n Input state tensor.\n\n Returns\n -------\n Tuple[Normallike, torch.FloatTensor]\n Normal or squashed Normal distribution (dist), State value estimate (V_hat).\n ' (pi_logits, V_hat) = self._get_dist_params(x) dist = D.Categorical(logits=pi_logits) return (dist, V_hat)
def forward(self, x: torch.FloatTensor) -> Tuple[(torch.FloatTensor, torch.FloatTensor, torch.FloatTensor)]: 'Returns the learned paremters of the distribution.\n\n Parameters\n ----------\n x : torch.FloatTensor\n Input state tensor.\n\n Returns\n -------\n Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]\n Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).\n ' x = self.trunk(x) V_hat = self.value_head(x) (mu, log_std) = self.dist_head(x).chunk(2, dim=(- 1)) log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max) sigma = log_std.exp() return (mu, sigma, V_hat)
-357,320,804,641,296,960
Returns the learned paremters of the distribution. Parameters ---------- x : torch.FloatTensor Input state tensor. Returns ------- Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor] Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
alphazero/network/policies.py
forward
timoklein/A0C
python
def forward(self, x: torch.FloatTensor) -> Tuple[(torch.FloatTensor, torch.FloatTensor, torch.FloatTensor)]: 'Returns the learned paremters of the distribution.\n\n Parameters\n ----------\n x : torch.FloatTensor\n Input state tensor.\n\n Returns\n -------\n Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]\n Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).\n ' x = self.trunk(x) V_hat = self.value_head(x) (mu, log_std) = self.dist_head(x).chunk(2, dim=(- 1)) log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max) sigma = log_std.exp() return (mu, sigma, V_hat)
def forward(self, x: torch.FloatTensor) -> Tuple[(torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor)]: 'Returns the learned paremters of the distribution.\n\n Parameters\n ----------\n x : torch.FloatTensor\n Input state tensor.\n\n Returns\n -------\n Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]\n Distribution mean (mu), Distribution standard deviation (sigma),\n Logits for the categorical distribution parameterizing the components (log_coeffs),\n State value estimate (V_hat).\n ' x = self.trunk(x) V_hat = self.value_head(x) mixture_params = self.dist_head(x) dist_params = mixture_params[..., :((self.num_components * 2) * self.action_dim)].view(x.shape[0], (- 1)) log_coeff = mixture_params[..., (- self.num_components):] (mu, log_std) = dist_params.chunk(2, dim=(- 1)) log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max) sigma = log_std.exp() return (mu, sigma, log_coeff, V_hat)
-252,120,947,399,175,230
Returns the learned paremters of the distribution. Parameters ---------- x : torch.FloatTensor Input state tensor. Returns ------- Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor] Distribution mean (mu), Distribution standard deviation (sigma), Logits for the categorical distribution parameterizing the components (log_coeffs), State value estimate (V_hat).
alphazero/network/policies.py
forward
timoklein/A0C
python
def forward(self, x: torch.FloatTensor) -> Tuple[(torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor)]: 'Returns the learned paremters of the distribution.\n\n Parameters\n ----------\n x : torch.FloatTensor\n Input state tensor.\n\n Returns\n -------\n Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]\n Distribution mean (mu), Distribution standard deviation (sigma),\n Logits for the categorical distribution parameterizing the components (log_coeffs),\n State value estimate (V_hat).\n ' x = self.trunk(x) V_hat = self.value_head(x) mixture_params = self.dist_head(x) dist_params = mixture_params[..., :((self.num_components * 2) * self.action_dim)].view(x.shape[0], (- 1)) log_coeff = mixture_params[..., (- self.num_components):] (mu, log_std) = dist_params.chunk(2, dim=(- 1)) log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max) sigma = log_std.exp() return (mu, sigma, log_coeff, V_hat)
def forward(self, x: torch.FloatTensor) -> Tuple[(torch.FloatTensor, torch.FloatTensor, torch.FloatTensor)]: 'Returns the learned paremters of the distribution.\n\n Parameters\n ----------\n x : torch.FloatTensor\n Input state tensor.\n\n Returns\n -------\n Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]\n Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).\n ' x = self.trunk(x) V_hat = self.value_head(x) dist_params = self.dist_head(x) dist_params = torch.clamp(dist_params, min=self.log_param_min, max=self.log_param_max) (alpha, beta) = dist_params.exp().chunk(2, dim=(- 1)) return (alpha, beta, V_hat)
-6,677,820,801,602,087,000
Returns the learned paremters of the distribution. Parameters ---------- x : torch.FloatTensor Input state tensor. Returns ------- Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor] Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
alphazero/network/policies.py
forward
timoklein/A0C
python
def forward(self, x: torch.FloatTensor) -> Tuple[(torch.FloatTensor, torch.FloatTensor, torch.FloatTensor)]: 'Returns the learned paremters of the distribution.\n\n Parameters\n ----------\n x : torch.FloatTensor\n Input state tensor.\n\n Returns\n -------\n Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]\n Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).\n ' x = self.trunk(x) V_hat = self.value_head(x) dist_params = self.dist_head(x) dist_params = torch.clamp(dist_params, min=self.log_param_min, max=self.log_param_max) (alpha, beta) = dist_params.exp().chunk(2, dim=(- 1)) return (alpha, beta, V_hat)
def register(cls): '\n A decorator to register new table configuration classes.\n ' TABLE_LIST.append(cls) return cls
-7,601,627,589,921,386,000
A decorator to register new table configuration classes.
census_data_downloader/core/decorators.py
register
JoeGermuska/census-data-downloader
python
def register(cls): '\n \n ' TABLE_LIST.append(cls) return cls
def downloader(func): '\n A decorator to download data inside a table configuration class.\n ' def inner(*args, **kwargs): table_config = args[0] downloader_klass = func(table_config) for year in table_config.years_to_download: downloader = downloader_klass(table_config, year) downloader.download() downloader.process() return inner
2,947,467,455,539,673,000
A decorator to download data inside a table configuration class.
census_data_downloader/core/decorators.py
downloader
JoeGermuska/census-data-downloader
python
def downloader(func): '\n \n ' def inner(*args, **kwargs): table_config = args[0] downloader_klass = func(table_config) for year in table_config.years_to_download: downloader = downloader_klass(table_config, year) downloader.download() downloader.process() return inner
def __init__(self): 'Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method' self.setup_clean_chain = False self.nodes = [] self.mocktime = 0 self.rpc_timewait = 600 self.supports_cli = False self.set_test_params() assert hasattr(self, 'num_nodes'), 'Test must set self.num_nodes in set_test_params()'
4,240,132,569,780,004,000
Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method
test/functional/test_framework/test_framework.py
__init__
THYMESIA-SECURITIES/T-Notes
python
def __init__(self): self.setup_clean_chain = False self.nodes = [] self.mocktime = 0 self.rpc_timewait = 600 self.supports_cli = False self.set_test_params() assert hasattr(self, 'num_nodes'), 'Test must set self.num_nodes in set_test_params()'
def main(self): 'Main function. This should not be overridden by the subclass test scripts.' parser = optparse.OptionParser(usage='%prog [options]') parser.add_option('--nocleanup', dest='nocleanup', default=False, action='store_true', help='Leave t_notesds and test.* datadir on exit or error') parser.add_option('--noshutdown', dest='noshutdown', default=False, action='store_true', help="Don't stop t_notesds after the test execution") parser.add_option('--srcdir', dest='srcdir', default=os.path.normpath((os.path.dirname(os.path.realpath(__file__)) + '/../../../src')), help='Source directory containing t_notesd/t_notes-cli (default: %default)') parser.add_option('--cachedir', dest='cachedir', default=os.path.normpath((os.path.dirname(os.path.realpath(__file__)) + '/../../cache')), help='Directory for caching pregenerated datadirs') parser.add_option('--tmpdir', dest='tmpdir', help='Root directory for datadirs') parser.add_option('-l', '--loglevel', dest='loglevel', default='INFO', help='log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.') parser.add_option('--tracerpc', dest='trace_rpc', default=False, action='store_true', help='Print out all RPC calls as they are made') parser.add_option('--portseed', dest='port_seed', default=os.getpid(), type='int', help='The seed to use for assigning port numbers (default: current process id)') parser.add_option('--coveragedir', dest='coveragedir', help='Write tested RPC commands into this directory') parser.add_option('--configfile', dest='configfile', help='Location of the test framework config file') parser.add_option('--legacywallet', dest='legacywallet', default=False, action='store_true', help='create pre-HD wallets only') parser.add_option('--tiertwo', dest='tiertwo', default=False, action='store_true', help='run tier two tests only') parser.add_option('--sapling', dest='sapling', default=False, action='store_true', help='run tier two tests only') parser.add_option('--pdbonfailure', dest='pdbonfailure', default=False, action='store_true', help='Attach a python debugger if test fails') parser.add_option('--usecli', dest='usecli', default=False, action='store_true', help='use t_notes-cli instead of RPC for all commands') self.add_options(parser) (self.options, self.args) = parser.parse_args() PortSeed.n = self.options.port_seed os.environ['PATH'] = ((((self.options.srcdir + ':') + self.options.srcdir) + '/qt:') + os.environ['PATH']) check_json_precision() self.options.cachedir = os.path.abspath(self.options.cachedir) if self.options.tmpdir: self.options.tmpdir = os.path.abspath(self.options.tmpdir) os.makedirs(self.options.tmpdir, exist_ok=False) else: self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) self._start_logging() success = TestStatus.FAILED try: if (self.options.usecli and (not self.supports_cli)): raise SkipTest('--usecli specified but test does not support using CLI') self.setup_chain() self.setup_network() time.sleep(5) self.run_test() success = TestStatus.PASSED except JSONRPCException as e: self.log.exception('JSONRPC error') except SkipTest as e: self.log.warning(('Test Skipped: %s' % e.message)) success = TestStatus.SKIPPED except AssertionError as e: self.log.exception('Assertion failed') except KeyError as e: self.log.exception('Key error') except Exception as e: self.log.exception('Unexpected exception caught during testing') except KeyboardInterrupt as e: self.log.warning('Exiting after keyboard interrupt') if ((success == TestStatus.FAILED) and self.options.pdbonfailure): print('Testcase failed. Attaching python debugger. Enter ? for help') pdb.set_trace() if (not self.options.noshutdown): self.log.info('Stopping nodes') if self.nodes: self.stop_nodes() else: for node in self.nodes: node.cleanup_on_exit = False self.log.info('Note: t_notesds were not stopped and may still be running') if ((not self.options.nocleanup) and (not self.options.noshutdown) and (success != TestStatus.FAILED)): self.log.info('Cleaning up') shutil.rmtree(self.options.tmpdir) else: self.log.warning(('Not cleaning up dir %s' % self.options.tmpdir)) if (success == TestStatus.PASSED): self.log.info('Tests successful') exit_code = TEST_EXIT_PASSED elif (success == TestStatus.SKIPPED): self.log.info('Test skipped') exit_code = TEST_EXIT_SKIPPED else: self.log.error('Test failed. Test logging available at %s/test_framework.log', self.options.tmpdir) self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath((os.path.dirname(os.path.realpath(__file__)) + '/../combine_logs.py')), self.options.tmpdir)) exit_code = TEST_EXIT_FAILED logging.shutdown() sys.exit(exit_code)
2,440,348,207,718,470,700
Main function. This should not be overridden by the subclass test scripts.
test/functional/test_framework/test_framework.py
main
THYMESIA-SECURITIES/T-Notes
python
def main(self): parser = optparse.OptionParser(usage='%prog [options]') parser.add_option('--nocleanup', dest='nocleanup', default=False, action='store_true', help='Leave t_notesds and test.* datadir on exit or error') parser.add_option('--noshutdown', dest='noshutdown', default=False, action='store_true', help="Don't stop t_notesds after the test execution") parser.add_option('--srcdir', dest='srcdir', default=os.path.normpath((os.path.dirname(os.path.realpath(__file__)) + '/../../../src')), help='Source directory containing t_notesd/t_notes-cli (default: %default)') parser.add_option('--cachedir', dest='cachedir', default=os.path.normpath((os.path.dirname(os.path.realpath(__file__)) + '/../../cache')), help='Directory for caching pregenerated datadirs') parser.add_option('--tmpdir', dest='tmpdir', help='Root directory for datadirs') parser.add_option('-l', '--loglevel', dest='loglevel', default='INFO', help='log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.') parser.add_option('--tracerpc', dest='trace_rpc', default=False, action='store_true', help='Print out all RPC calls as they are made') parser.add_option('--portseed', dest='port_seed', default=os.getpid(), type='int', help='The seed to use for assigning port numbers (default: current process id)') parser.add_option('--coveragedir', dest='coveragedir', help='Write tested RPC commands into this directory') parser.add_option('--configfile', dest='configfile', help='Location of the test framework config file') parser.add_option('--legacywallet', dest='legacywallet', default=False, action='store_true', help='create pre-HD wallets only') parser.add_option('--tiertwo', dest='tiertwo', default=False, action='store_true', help='run tier two tests only') parser.add_option('--sapling', dest='sapling', default=False, action='store_true', help='run tier two tests only') parser.add_option('--pdbonfailure', dest='pdbonfailure', default=False, action='store_true', help='Attach a python debugger if test fails') parser.add_option('--usecli', dest='usecli', default=False, action='store_true', help='use t_notes-cli instead of RPC for all commands') self.add_options(parser) (self.options, self.args) = parser.parse_args() PortSeed.n = self.options.port_seed os.environ['PATH'] = ((((self.options.srcdir + ':') + self.options.srcdir) + '/qt:') + os.environ['PATH']) check_json_precision() self.options.cachedir = os.path.abspath(self.options.cachedir) if self.options.tmpdir: self.options.tmpdir = os.path.abspath(self.options.tmpdir) os.makedirs(self.options.tmpdir, exist_ok=False) else: self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) self._start_logging() success = TestStatus.FAILED try: if (self.options.usecli and (not self.supports_cli)): raise SkipTest('--usecli specified but test does not support using CLI') self.setup_chain() self.setup_network() time.sleep(5) self.run_test() success = TestStatus.PASSED except JSONRPCException as e: self.log.exception('JSONRPC error') except SkipTest as e: self.log.warning(('Test Skipped: %s' % e.message)) success = TestStatus.SKIPPED except AssertionError as e: self.log.exception('Assertion failed') except KeyError as e: self.log.exception('Key error') except Exception as e: self.log.exception('Unexpected exception caught during testing') except KeyboardInterrupt as e: self.log.warning('Exiting after keyboard interrupt') if ((success == TestStatus.FAILED) and self.options.pdbonfailure): print('Testcase failed. Attaching python debugger. Enter ? for help') pdb.set_trace() if (not self.options.noshutdown): self.log.info('Stopping nodes') if self.nodes: self.stop_nodes() else: for node in self.nodes: node.cleanup_on_exit = False self.log.info('Note: t_notesds were not stopped and may still be running') if ((not self.options.nocleanup) and (not self.options.noshutdown) and (success != TestStatus.FAILED)): self.log.info('Cleaning up') shutil.rmtree(self.options.tmpdir) else: self.log.warning(('Not cleaning up dir %s' % self.options.tmpdir)) if (success == TestStatus.PASSED): self.log.info('Tests successful') exit_code = TEST_EXIT_PASSED elif (success == TestStatus.SKIPPED): self.log.info('Test skipped') exit_code = TEST_EXIT_SKIPPED else: self.log.error('Test failed. Test logging available at %s/test_framework.log', self.options.tmpdir) self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath((os.path.dirname(os.path.realpath(__file__)) + '/../combine_logs.py')), self.options.tmpdir)) exit_code = TEST_EXIT_FAILED logging.shutdown() sys.exit(exit_code)
def set_test_params(self): 'Tests must this method to change default values for number of nodes, topology, etc' raise NotImplementedError
-2,804,650,915,158,870,000
Tests must this method to change default values for number of nodes, topology, etc
test/functional/test_framework/test_framework.py
set_test_params
THYMESIA-SECURITIES/T-Notes
python
def set_test_params(self): raise NotImplementedError
def add_options(self, parser): 'Override this method to add command-line options to the test' pass
5,143,883,123,028,089,000
Override this method to add command-line options to the test
test/functional/test_framework/test_framework.py
add_options
THYMESIA-SECURITIES/T-Notes
python
def add_options(self, parser): pass
def setup_chain(self): 'Override this method to customize blockchain setup' self.log.info(('Initializing test directory ' + self.options.tmpdir)) if self.setup_clean_chain: self._initialize_chain_clean() else: self._initialize_chain()
8,333,220,049,645,672,000
Override this method to customize blockchain setup
test/functional/test_framework/test_framework.py
setup_chain
THYMESIA-SECURITIES/T-Notes
python
def setup_chain(self): self.log.info(('Initializing test directory ' + self.options.tmpdir)) if self.setup_clean_chain: self._initialize_chain_clean() else: self._initialize_chain()
def setup_network(self): 'Override this method to customize test network topology' self.setup_nodes() for i in range((self.num_nodes - 1)): connect_nodes(self.nodes[(i + 1)], i) self.sync_all()
3,102,635,630,890,468,400
Override this method to customize test network topology
test/functional/test_framework/test_framework.py
setup_network
THYMESIA-SECURITIES/T-Notes
python
def setup_network(self): self.setup_nodes() for i in range((self.num_nodes - 1)): connect_nodes(self.nodes[(i + 1)], i) self.sync_all()
def setup_nodes(self): 'Override this method to customize test node setup' extra_args = None if hasattr(self, 'extra_args'): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args) self.start_nodes()
-4,141,827,770,806,860,000
Override this method to customize test node setup
test/functional/test_framework/test_framework.py
setup_nodes
THYMESIA-SECURITIES/T-Notes
python
def setup_nodes(self): extra_args = None if hasattr(self, 'extra_args'): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args) self.start_nodes()
def run_test(self): 'Tests must override this method to define test logic' raise NotImplementedError
1,254,020,197,943,498,200
Tests must override this method to define test logic
test/functional/test_framework/test_framework.py
run_test
THYMESIA-SECURITIES/T-Notes
python
def run_test(self): raise NotImplementedError
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None): 'Instantiate TestNode objects' if (extra_args is None): extra_args = ([[]] * num_nodes) if self.options.legacywallet: for arg in extra_args: arg.append('-legacywallet') self.log.info('Running test with legacy (pre-HD) wallet') if (binary is None): binary = ([None] * num_nodes) assert_equal(len(extra_args), num_nodes) assert_equal(len(binary), num_nodes) for i in range(num_nodes): self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=self.rpc_timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
-513,096,944,268,560,830
Instantiate TestNode objects
test/functional/test_framework/test_framework.py
add_nodes
THYMESIA-SECURITIES/T-Notes
python
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None): if (extra_args is None): extra_args = ([[]] * num_nodes) if self.options.legacywallet: for arg in extra_args: arg.append('-legacywallet') self.log.info('Running test with legacy (pre-HD) wallet') if (binary is None): binary = ([None] * num_nodes) assert_equal(len(extra_args), num_nodes) assert_equal(len(binary), num_nodes) for i in range(num_nodes): self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=self.rpc_timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs): 'Start a t_notesd' node = self.nodes[i] node.start(*args, **kwargs) node.wait_for_rpc_connection() time.sleep(10) if (self.options.coveragedir is not None): coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
-3,549,843,185,344,423,400
Start a t_notesd
test/functional/test_framework/test_framework.py
start_node
THYMESIA-SECURITIES/T-Notes
python
def start_node(self, i, *args, **kwargs): node = self.nodes[i] node.start(*args, **kwargs) node.wait_for_rpc_connection() time.sleep(10) if (self.options.coveragedir is not None): coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs): 'Start multiple t_notesds' if (extra_args is None): extra_args = ([None] * self.num_nodes) assert_equal(len(extra_args), self.num_nodes) try: for (i, node) in enumerate(self.nodes): node.start(extra_args[i], *args, **kwargs) for node in self.nodes: node.wait_for_rpc_connection() except: self.stop_nodes() raise time.sleep(10) if (self.options.coveragedir is not None): for node in self.nodes: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
3,588,767,066,100,310,000
Start multiple t_notesds
test/functional/test_framework/test_framework.py
start_nodes
THYMESIA-SECURITIES/T-Notes
python
def start_nodes(self, extra_args=None, *args, **kwargs): if (extra_args is None): extra_args = ([None] * self.num_nodes) assert_equal(len(extra_args), self.num_nodes) try: for (i, node) in enumerate(self.nodes): node.start(extra_args[i], *args, **kwargs) for node in self.nodes: node.wait_for_rpc_connection() except: self.stop_nodes() raise time.sleep(10) if (self.options.coveragedir is not None): for node in self.nodes: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i): 'Stop a t_notesd test node' self.nodes[i].stop_node() self.nodes[i].wait_until_stopped()
2,822,523,695,218,503,700
Stop a t_notesd test node
test/functional/test_framework/test_framework.py
stop_node
THYMESIA-SECURITIES/T-Notes
python
def stop_node(self, i): self.nodes[i].stop_node() self.nodes[i].wait_until_stopped()
def stop_nodes(self): 'Stop multiple t_notesd test nodes' for node in self.nodes: node.stop_node() for node in self.nodes: time.sleep(5) node.wait_until_stopped()
-3,249,961,233,590,234,600
Stop multiple t_notesd test nodes
test/functional/test_framework/test_framework.py
stop_nodes
THYMESIA-SECURITIES/T-Notes
python
def stop_nodes(self): for node in self.nodes: node.stop_node() for node in self.nodes: time.sleep(5) node.wait_until_stopped()
def restart_node(self, i, extra_args=None): 'Stop and start a test node' self.stop_node(i) self.start_node(i, extra_args)
-9,179,066,947,174,348
Stop and start a test node
test/functional/test_framework/test_framework.py
restart_node
THYMESIA-SECURITIES/T-Notes
python
def restart_node(self, i, extra_args=None): self.stop_node(i) self.start_node(i, extra_args)
def split_network(self): '\n Split the network of four nodes into nodes 0/1 and 2/3.\n ' disconnect_nodes(self.nodes[1], 2) disconnect_nodes(self.nodes[2], 1) self.sync_all(self.nodes[:2]) self.sync_all(self.nodes[2:])
-3,659,264,479,422,162,000
Split the network of four nodes into nodes 0/1 and 2/3.
test/functional/test_framework/test_framework.py
split_network
THYMESIA-SECURITIES/T-Notes
python
def split_network(self): '\n \n ' disconnect_nodes(self.nodes[1], 2) disconnect_nodes(self.nodes[2], 1) self.sync_all(self.nodes[:2]) self.sync_all(self.nodes[2:])
def join_network(self): '\n Join the (previously split) network halves together.\n ' connect_nodes(self.nodes[1], 2) self.sync_all()
7,109,203,827,622,544,000
Join the (previously split) network halves together.
test/functional/test_framework/test_framework.py
join_network
THYMESIA-SECURITIES/T-Notes
python
def join_network(self): '\n \n ' connect_nodes(self.nodes[1], 2) self.sync_all()
def sync_blocks(self, nodes=None, wait=1, timeout=60): "\n Wait until everybody has the same tip.\n sync_blocks needs to be called with an rpc_connections set that has least\n one node already synced to the latest, stable tip, otherwise there's a\n chance it might return before all nodes are stably synced.\n " rpc_connections = (nodes or self.nodes) stop_time = (time.time() + timeout) while (time.time() <= stop_time): best_hash = [x.getbestblockhash() for x in rpc_connections] if (best_hash.count(best_hash[0]) == len(rpc_connections)): return assert all([len(x.getpeerinfo()) for x in rpc_connections]) time.sleep(wait) raise AssertionError('Block sync timed out after {}s:{}'.format(timeout, ''.join(('\n {!r}'.format(b) for b in best_hash))))
7,808,380,102,412,062,000
Wait until everybody has the same tip. sync_blocks needs to be called with an rpc_connections set that has least one node already synced to the latest, stable tip, otherwise there's a chance it might return before all nodes are stably synced.
test/functional/test_framework/test_framework.py
sync_blocks
THYMESIA-SECURITIES/T-Notes
python
def sync_blocks(self, nodes=None, wait=1, timeout=60): "\n Wait until everybody has the same tip.\n sync_blocks needs to be called with an rpc_connections set that has least\n one node already synced to the latest, stable tip, otherwise there's a\n chance it might return before all nodes are stably synced.\n " rpc_connections = (nodes or self.nodes) stop_time = (time.time() + timeout) while (time.time() <= stop_time): best_hash = [x.getbestblockhash() for x in rpc_connections] if (best_hash.count(best_hash[0]) == len(rpc_connections)): return assert all([len(x.getpeerinfo()) for x in rpc_connections]) time.sleep(wait) raise AssertionError('Block sync timed out after {}s:{}'.format(timeout, .join(('\n {!r}'.format(b) for b in best_hash))))
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True): '\n Wait until everybody has the same transactions in their memory\n pools\n ' rpc_connections = (nodes or self.nodes) stop_time = (time.time() + timeout) while (time.time() <= stop_time): pool = [set(r.getrawmempool()) for r in rpc_connections] if (pool.count(pool[0]) == len(rpc_connections)): if flush_scheduler: for r in rpc_connections: r.syncwithvalidationinterfacequeue() return assert all([len(x.getpeerinfo()) for x in rpc_connections]) time.sleep(wait) raise AssertionError('Mempool sync timed out after {}s:{}'.format(timeout, ''.join(('\n {!r}'.format(m) for m in pool))))
1,533,758,400,024,637,700
Wait until everybody has the same transactions in their memory pools
test/functional/test_framework/test_framework.py
sync_mempools
THYMESIA-SECURITIES/T-Notes
python
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True): '\n Wait until everybody has the same transactions in their memory\n pools\n ' rpc_connections = (nodes or self.nodes) stop_time = (time.time() + timeout) while (time.time() <= stop_time): pool = [set(r.getrawmempool()) for r in rpc_connections] if (pool.count(pool[0]) == len(rpc_connections)): if flush_scheduler: for r in rpc_connections: r.syncwithvalidationinterfacequeue() return assert all([len(x.getpeerinfo()) for x in rpc_connections]) time.sleep(wait) raise AssertionError('Mempool sync timed out after {}s:{}'.format(timeout, .join(('\n {!r}'.format(m) for m in pool))))
def enable_mocktime(self): 'Enable mocktime for the script.\n\n mocktime may be needed for scripts that use the cached version of the\n blockchain. If the cached version of the blockchain is used without\n mocktime then the mempools will not sync due to IBD.\n\n Sets mocktime to Tuesday, October 31, 2017 6:21:20 PM GMT (1572546080)\n ' self.mocktime = 1572546080
2,325,546,710,299,151,000
Enable mocktime for the script. mocktime may be needed for scripts that use the cached version of the blockchain. If the cached version of the blockchain is used without mocktime then the mempools will not sync due to IBD. Sets mocktime to Tuesday, October 31, 2017 6:21:20 PM GMT (1572546080)
test/functional/test_framework/test_framework.py
enable_mocktime
THYMESIA-SECURITIES/T-Notes
python
def enable_mocktime(self): 'Enable mocktime for the script.\n\n mocktime may be needed for scripts that use the cached version of the\n blockchain. If the cached version of the blockchain is used without\n mocktime then the mempools will not sync due to IBD.\n\n Sets mocktime to Tuesday, October 31, 2017 6:21:20 PM GMT (1572546080)\n ' self.mocktime = 1572546080
def _initialize_chain(self): 'Initialize a pre-mined blockchain for use by the test.' def create_cachedir(cachedir): if os.path.isdir(cachedir): shutil.rmtree(cachedir) os.makedirs(cachedir) def copy_cachedir(origin, destination, num_nodes=MAX_NODES): for i in range(num_nodes): from_dir = get_datadir_path(origin, i) to_dir = get_datadir_path(destination, i) shutil.copytree(from_dir, to_dir) initialize_datadir(destination, i) def clone_cache_from_node_1(cachedir, from_num=4): " Clones cache subdir from node 1 to nodes from 'from_num' to MAX_NODES" def copy_and_overwrite(from_path, to_path): if os.path.exists(to_path): shutil.rmtree(to_path) shutil.copytree(from_path, to_path) assert (from_num < MAX_NODES) node_0_datadir = os.path.join(get_datadir_path(cachedir, 0), 'regtest') for i in range(from_num, MAX_NODES): node_i_datadir = os.path.join(get_datadir_path(cachedir, i), 'regtest') for subdir in ['blocks', 'chainstate', 'sporks']: copy_and_overwrite(os.path.join(node_0_datadir, subdir), os.path.join(node_i_datadir, subdir)) initialize_datadir(cachedir, i) def cachedir_valid(cachedir): for i in range(MAX_NODES): if (not os.path.isdir(get_datadir_path(cachedir, i))): return False return (not os.path.exists(os.path.join(get_datadir_path(cachedir, 0), '.incomplete'))) def clean_cache_subdir(cachedir): os.remove(os.path.join(get_datadir_path(cachedir, 0), '.incomplete')) def cache_path(n, *paths): return os.path.join(get_datadir_path(cachedir, n), 'regtest', *paths) for i in range(MAX_NODES): for entry in os.listdir(cache_path(i)): if (entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'backups']): os.remove(cache_path(i, entry)) def clean_cache_dir(): if os.path.isdir(self.options.cachedir): if cachedir_valid(self.options.cachedir): powcachedir = os.path.join(self.options.cachedir, 'pow') self.log.info(('Found old cachedir. Migrating to %s' % str(powcachedir))) copy_cachedir(self.options.cachedir, powcachedir) for entry in os.listdir(self.options.cachedir): if (entry != 'pow'): entry_path = os.path.join(self.options.cachedir, entry) if os.path.isfile(entry_path): os.remove(entry_path) elif os.path.isdir(entry_path): shutil.rmtree(entry_path) else: os.makedirs(self.options.cachedir) def start_nodes_from_dir(ddir, num_nodes=MAX_NODES): self.log.info(('Starting %d nodes...' % num_nodes)) for i in range(num_nodes): datadir = initialize_datadir(ddir, i) if (i == 0): open(os.path.join(datadir, '.incomplete'), 'a').close() args = [os.getenv('BITCOIND', 't_notesd'), '-spendzeroconfchange=1', '-server', '-keypool=1', ('-datadir=' + datadir), '-discover=0'] self.nodes.append(TestNode(i, ddir, extra_args=[], rpchost=None, timewait=self.rpc_timewait, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None)) self.nodes[i].args = args self.start_node(i) self.log.info(('Node %d started.' % i)) self.log.info('Nodes started. Waiting for RPC connections...') for node in range(4): self.nodes[node].wait_for_rpc_connection() self.log.info('Connecting nodes') connect_nodes_clique(self.nodes) def stop_and_clean_cache_dir(ddir): self.stop_nodes() self.nodes = [] self.log.info('Copying cache dir to non-started nodes') clone_cache_from_node_1(ddir) self.log.info('Cleaning up.') clean_cache_subdir(ddir) def generate_pow_cache(): create_cachedir(powcachedir) self.log.info("Creating 'PoW-chain': 200 blocks") start_nodes_from_dir(powcachedir, 4) self.log.info('Mining 200 blocks') self.enable_mocktime() block_time = (self.mocktime - (331 * 60)) for i in range(2): for peer in range(4): for j in range(25): set_node_times(self.nodes, block_time) self.nodes[peer].generate(1) block_time += 60 self.sync_blocks() self.log.info('Stopping nodes') stop_and_clean_cache_dir(powcachedir) self.log.info('---> pow cache created') self.disable_mocktime() assert (self.num_nodes <= MAX_NODES) clean_cache_dir() powcachedir = os.path.join(self.options.cachedir, 'pow') is_powcache_valid = cachedir_valid(powcachedir) if (not is_powcache_valid): self.log.info('PoW-CACHE NOT FOUND or INVALID.') self.log.info('Creating new cached blockchain data.') generate_pow_cache() else: self.log.info('CACHE FOUND.') self.log.info(('Copying datadir from %s to %s' % (powcachedir, self.options.tmpdir))) copy_cachedir(powcachedir, self.options.tmpdir, self.num_nodes)
-4,283,486,815,522,228,000
Initialize a pre-mined blockchain for use by the test.
test/functional/test_framework/test_framework.py
_initialize_chain
THYMESIA-SECURITIES/T-Notes
python
def _initialize_chain(self): def create_cachedir(cachedir): if os.path.isdir(cachedir): shutil.rmtree(cachedir) os.makedirs(cachedir) def copy_cachedir(origin, destination, num_nodes=MAX_NODES): for i in range(num_nodes): from_dir = get_datadir_path(origin, i) to_dir = get_datadir_path(destination, i) shutil.copytree(from_dir, to_dir) initialize_datadir(destination, i) def clone_cache_from_node_1(cachedir, from_num=4): " Clones cache subdir from node 1 to nodes from 'from_num' to MAX_NODES" def copy_and_overwrite(from_path, to_path): if os.path.exists(to_path): shutil.rmtree(to_path) shutil.copytree(from_path, to_path) assert (from_num < MAX_NODES) node_0_datadir = os.path.join(get_datadir_path(cachedir, 0), 'regtest') for i in range(from_num, MAX_NODES): node_i_datadir = os.path.join(get_datadir_path(cachedir, i), 'regtest') for subdir in ['blocks', 'chainstate', 'sporks']: copy_and_overwrite(os.path.join(node_0_datadir, subdir), os.path.join(node_i_datadir, subdir)) initialize_datadir(cachedir, i) def cachedir_valid(cachedir): for i in range(MAX_NODES): if (not os.path.isdir(get_datadir_path(cachedir, i))): return False return (not os.path.exists(os.path.join(get_datadir_path(cachedir, 0), '.incomplete'))) def clean_cache_subdir(cachedir): os.remove(os.path.join(get_datadir_path(cachedir, 0), '.incomplete')) def cache_path(n, *paths): return os.path.join(get_datadir_path(cachedir, n), 'regtest', *paths) for i in range(MAX_NODES): for entry in os.listdir(cache_path(i)): if (entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'backups']): os.remove(cache_path(i, entry)) def clean_cache_dir(): if os.path.isdir(self.options.cachedir): if cachedir_valid(self.options.cachedir): powcachedir = os.path.join(self.options.cachedir, 'pow') self.log.info(('Found old cachedir. Migrating to %s' % str(powcachedir))) copy_cachedir(self.options.cachedir, powcachedir) for entry in os.listdir(self.options.cachedir): if (entry != 'pow'): entry_path = os.path.join(self.options.cachedir, entry) if os.path.isfile(entry_path): os.remove(entry_path) elif os.path.isdir(entry_path): shutil.rmtree(entry_path) else: os.makedirs(self.options.cachedir) def start_nodes_from_dir(ddir, num_nodes=MAX_NODES): self.log.info(('Starting %d nodes...' % num_nodes)) for i in range(num_nodes): datadir = initialize_datadir(ddir, i) if (i == 0): open(os.path.join(datadir, '.incomplete'), 'a').close() args = [os.getenv('BITCOIND', 't_notesd'), '-spendzeroconfchange=1', '-server', '-keypool=1', ('-datadir=' + datadir), '-discover=0'] self.nodes.append(TestNode(i, ddir, extra_args=[], rpchost=None, timewait=self.rpc_timewait, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None)) self.nodes[i].args = args self.start_node(i) self.log.info(('Node %d started.' % i)) self.log.info('Nodes started. Waiting for RPC connections...') for node in range(4): self.nodes[node].wait_for_rpc_connection() self.log.info('Connecting nodes') connect_nodes_clique(self.nodes) def stop_and_clean_cache_dir(ddir): self.stop_nodes() self.nodes = [] self.log.info('Copying cache dir to non-started nodes') clone_cache_from_node_1(ddir) self.log.info('Cleaning up.') clean_cache_subdir(ddir) def generate_pow_cache(): create_cachedir(powcachedir) self.log.info("Creating 'PoW-chain': 200 blocks") start_nodes_from_dir(powcachedir, 4) self.log.info('Mining 200 blocks') self.enable_mocktime() block_time = (self.mocktime - (331 * 60)) for i in range(2): for peer in range(4): for j in range(25): set_node_times(self.nodes, block_time) self.nodes[peer].generate(1) block_time += 60 self.sync_blocks() self.log.info('Stopping nodes') stop_and_clean_cache_dir(powcachedir) self.log.info('---> pow cache created') self.disable_mocktime() assert (self.num_nodes <= MAX_NODES) clean_cache_dir() powcachedir = os.path.join(self.options.cachedir, 'pow') is_powcache_valid = cachedir_valid(powcachedir) if (not is_powcache_valid): self.log.info('PoW-CACHE NOT FOUND or INVALID.') self.log.info('Creating new cached blockchain data.') generate_pow_cache() else: self.log.info('CACHE FOUND.') self.log.info(('Copying datadir from %s to %s' % (powcachedir, self.options.tmpdir))) copy_cachedir(powcachedir, self.options.tmpdir, self.num_nodes)
def _initialize_chain_clean(self): 'Initialize empty blockchain for use by the test.\n\n Create an empty blockchain and num_nodes wallets.\n Useful if a test case wants complete control over initialization.' for i in range(self.num_nodes): initialize_datadir(self.options.tmpdir, i)
755,699,836,406,072,200
Initialize empty blockchain for use by the test. Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization.
test/functional/test_framework/test_framework.py
_initialize_chain_clean
THYMESIA-SECURITIES/T-Notes
python
def _initialize_chain_clean(self): 'Initialize empty blockchain for use by the test.\n\n Create an empty blockchain and num_nodes wallets.\n Useful if a test case wants complete control over initialization.' for i in range(self.num_nodes): initialize_datadir(self.options.tmpdir, i)
def get_prevouts(self, node_id, utxo_list): ' get prevouts (map) for each utxo in a list\n :param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxos.\n utxo_list: (JSON list) utxos returned from listunspent used as input\n :return: prevouts: ({bytes --> (int, bytes, int)} dictionary)\n maps CStake "uniqueness" (i.e. serialized COutPoint)\n to (amount, prevScript, timeBlockFrom).\n ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] prevouts = {} for utxo in utxo_list: outPoint = COutPoint(int(utxo['txid'], 16), utxo['vout']) outValue = (int(utxo['amount']) * COIN) prevtx_json = rpc_conn.getrawtransaction(utxo['txid'], 1) prevTx = CTransaction() prevTx.deserialize(BytesIO(hex_str_to_bytes(prevtx_json['hex']))) if ((prevTx.is_coinbase() or prevTx.is_coinstake()) and (utxo['confirmations'] < 100)): continue prevScript = prevtx_json['vout'][utxo['vout']]['scriptPubKey']['hex'] prevTime = prevtx_json['blocktime'] prevouts[outPoint.serialize_uniqueness()] = (outValue, prevScript, prevTime) return prevouts
6,947,453,711,636,926,000
get prevouts (map) for each utxo in a list :param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxos. utxo_list: (JSON list) utxos returned from listunspent used as input :return: prevouts: ({bytes --> (int, bytes, int)} dictionary) maps CStake "uniqueness" (i.e. serialized COutPoint) to (amount, prevScript, timeBlockFrom).
test/functional/test_framework/test_framework.py
get_prevouts
THYMESIA-SECURITIES/T-Notes
python
def get_prevouts(self, node_id, utxo_list): ' get prevouts (map) for each utxo in a list\n :param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxos.\n utxo_list: (JSON list) utxos returned from listunspent used as input\n :return: prevouts: ({bytes --> (int, bytes, int)} dictionary)\n maps CStake "uniqueness" (i.e. serialized COutPoint)\n to (amount, prevScript, timeBlockFrom).\n ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] prevouts = {} for utxo in utxo_list: outPoint = COutPoint(int(utxo['txid'], 16), utxo['vout']) outValue = (int(utxo['amount']) * COIN) prevtx_json = rpc_conn.getrawtransaction(utxo['txid'], 1) prevTx = CTransaction() prevTx.deserialize(BytesIO(hex_str_to_bytes(prevtx_json['hex']))) if ((prevTx.is_coinbase() or prevTx.is_coinstake()) and (utxo['confirmations'] < 100)): continue prevScript = prevtx_json['vout'][utxo['vout']]['scriptPubKey']['hex'] prevTime = prevtx_json['blocktime'] prevouts[outPoint.serialize_uniqueness()] = (outValue, prevScript, prevTime) return prevouts
def make_txes(self, node_id, spendingPrevOuts, to_pubKey): ' makes a list of CTransactions each spending an input from spending PrevOuts to an output to_pubKey\n :param node_id: (int) index of the CTestNode used as rpc connection. Must own spendingPrevOuts.\n spendingPrevouts: ({bytes --> (int, bytes, int)} dictionary)\n maps CStake "uniqueness" (i.e. serialized COutPoint)\n to (amount, prevScript, timeBlockFrom).\n to_pubKey (bytes) recipient public key\n :return: block_txes: ([CTransaction] list)\n ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] block_txes = [] for uniqueness in spendingPrevOuts: value_out = int((spendingPrevOuts[uniqueness][0] - (DEFAULT_FEE * COIN))) scriptPubKey = CScript([to_pubKey, OP_CHECKSIG]) prevout = COutPoint() prevout.deserialize_uniqueness(BytesIO(uniqueness)) tx = create_transaction_from_outpoint(prevout, b'', value_out, scriptPubKey) raw_spend = rpc_conn.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex'] signed_tx = CTransaction() signed_tx.from_hex(raw_spend) block_txes.append(signed_tx) return block_txes
-2,396,199,439,492,914,000
makes a list of CTransactions each spending an input from spending PrevOuts to an output to_pubKey :param node_id: (int) index of the CTestNode used as rpc connection. Must own spendingPrevOuts. spendingPrevouts: ({bytes --> (int, bytes, int)} dictionary) maps CStake "uniqueness" (i.e. serialized COutPoint) to (amount, prevScript, timeBlockFrom). to_pubKey (bytes) recipient public key :return: block_txes: ([CTransaction] list)
test/functional/test_framework/test_framework.py
make_txes
THYMESIA-SECURITIES/T-Notes
python
def make_txes(self, node_id, spendingPrevOuts, to_pubKey): ' makes a list of CTransactions each spending an input from spending PrevOuts to an output to_pubKey\n :param node_id: (int) index of the CTestNode used as rpc connection. Must own spendingPrevOuts.\n spendingPrevouts: ({bytes --> (int, bytes, int)} dictionary)\n maps CStake "uniqueness" (i.e. serialized COutPoint)\n to (amount, prevScript, timeBlockFrom).\n to_pubKey (bytes) recipient public key\n :return: block_txes: ([CTransaction] list)\n ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] block_txes = [] for uniqueness in spendingPrevOuts: value_out = int((spendingPrevOuts[uniqueness][0] - (DEFAULT_FEE * COIN))) scriptPubKey = CScript([to_pubKey, OP_CHECKSIG]) prevout = COutPoint() prevout.deserialize_uniqueness(BytesIO(uniqueness)) tx = create_transaction_from_outpoint(prevout, b, value_out, scriptPubKey) raw_spend = rpc_conn.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex'] signed_tx = CTransaction() signed_tx.from_hex(raw_spend) block_txes.append(signed_tx) return block_txes
def stake_block(self, node_id, nVersion, nHeight, prevHash, prevModifier, finalsaplingroot, stakeableUtxos, startTime, privKeyWIF, vtx, fDoubleSpend): ' manually stakes a block selecting the coinstake input from a list of candidates\n :param node_id: (int) index of the CTestNode used as rpc connection. Must own stakeableUtxos.\n nVersion: (int) version of the block being produced (7 or 8)\n nHeight: (int) height of the block being produced\n prevHash: (string) hex string of the previous block hash\n prevModifier (string) hex string of the previous block stake modifier\n finalsaplingroot (string) hex string of the previous block sapling root (blocks V8)\n stakeableUtxos: ({bytes --> (int, bytes, int)} dictionary)\n maps CStake "uniqueness" (i.e. serialized COutPoint)\n to (amount, prevScript, timeBlockFrom).\n startTime: (int) epoch time to be used as blocktime (iterated in solve_stake)\n privKeyWIF: (string) private key to be used for staking/signing\n If empty string, it will be used the pk from the stake input\n (dumping the sk from rpc_conn). If None, then the DUMMY_KEY will be used.\n vtx: ([CTransaction] list) transactions to add to block.vtx\n fDoubleSpend: (bool) wether any tx in vtx is allowed to spend the coinstake input\n :return: block: (CBlock) block produced, must be manually relayed\n ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] if (not (len(stakeableUtxos) > 0)): raise Exception('Need at least one stakeable utxo to stake a block!') if (startTime is None): startTime = time.time() nTime = (int(startTime) & 4294967280) coinbaseTx = create_coinbase_pos(nHeight) block = create_block(int(prevHash, 16), coinbaseTx, nTime, nVersion, int(finalsaplingroot, 16)) block.nVersion = nVersion block.solve_stake(stakeableUtxos, int(prevModifier, 16)) block_sig_key = CECKey() coinstakeTx_unsigned = CTransaction() prevout = COutPoint() prevout.deserialize_uniqueness(BytesIO(block.prevoutStake)) coinstakeTx_unsigned.vin.append(CTxIn(prevout, b'', 4294967295)) coinstakeTx_unsigned.vout.append(CTxOut()) (amount, prevScript, _) = stakeableUtxos[block.prevoutStake] outNValue = int((amount + (250 * COIN))) coinstakeTx_unsigned.vout.append(CTxOut(outNValue, hex_str_to_bytes(prevScript))) if (privKeyWIF == ''): if (not hasattr(self, 'DUMMY_KEY')): self.init_dummy_key() block_sig_key = self.DUMMY_KEY coinstakeTx_unsigned.vout[1].scriptPubKey = CScript([block_sig_key.get_pubkey(), OP_CHECKSIG]) else: if (privKeyWIF == None): rawtx = rpc_conn.getrawtransaction('{:064x}'.format(prevout.hash), True) privKeyWIF = rpc_conn.dumpprivkey(rawtx['vout'][prevout.n]['scriptPubKey']['addresses'][0]) (privKey, compressed) = wif_to_privkey(privKeyWIF) block_sig_key.set_compressed(compressed) block_sig_key.set_secretbytes(bytes.fromhex(privKey)) stake_tx_signed_raw_hex = rpc_conn.signrawtransaction(bytes_to_hex_str(coinstakeTx_unsigned.serialize()))['hex'] coinstakeTx = CTransaction() coinstakeTx.from_hex(stake_tx_signed_raw_hex) block.vtx.append(coinstakeTx) for tx in vtx: if ((not fDoubleSpend) and tx.spends(prevout)): continue block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.sign_block(block_sig_key) return block
178,559,729,814,218,200
manually stakes a block selecting the coinstake input from a list of candidates :param node_id: (int) index of the CTestNode used as rpc connection. Must own stakeableUtxos. nVersion: (int) version of the block being produced (7 or 8) nHeight: (int) height of the block being produced prevHash: (string) hex string of the previous block hash prevModifier (string) hex string of the previous block stake modifier finalsaplingroot (string) hex string of the previous block sapling root (blocks V8) stakeableUtxos: ({bytes --> (int, bytes, int)} dictionary) maps CStake "uniqueness" (i.e. serialized COutPoint) to (amount, prevScript, timeBlockFrom). startTime: (int) epoch time to be used as blocktime (iterated in solve_stake) privKeyWIF: (string) private key to be used for staking/signing If empty string, it will be used the pk from the stake input (dumping the sk from rpc_conn). If None, then the DUMMY_KEY will be used. vtx: ([CTransaction] list) transactions to add to block.vtx fDoubleSpend: (bool) wether any tx in vtx is allowed to spend the coinstake input :return: block: (CBlock) block produced, must be manually relayed
test/functional/test_framework/test_framework.py
stake_block
THYMESIA-SECURITIES/T-Notes
python
def stake_block(self, node_id, nVersion, nHeight, prevHash, prevModifier, finalsaplingroot, stakeableUtxos, startTime, privKeyWIF, vtx, fDoubleSpend): ' manually stakes a block selecting the coinstake input from a list of candidates\n :param node_id: (int) index of the CTestNode used as rpc connection. Must own stakeableUtxos.\n nVersion: (int) version of the block being produced (7 or 8)\n nHeight: (int) height of the block being produced\n prevHash: (string) hex string of the previous block hash\n prevModifier (string) hex string of the previous block stake modifier\n finalsaplingroot (string) hex string of the previous block sapling root (blocks V8)\n stakeableUtxos: ({bytes --> (int, bytes, int)} dictionary)\n maps CStake "uniqueness" (i.e. serialized COutPoint)\n to (amount, prevScript, timeBlockFrom).\n startTime: (int) epoch time to be used as blocktime (iterated in solve_stake)\n privKeyWIF: (string) private key to be used for staking/signing\n If empty string, it will be used the pk from the stake input\n (dumping the sk from rpc_conn). If None, then the DUMMY_KEY will be used.\n vtx: ([CTransaction] list) transactions to add to block.vtx\n fDoubleSpend: (bool) wether any tx in vtx is allowed to spend the coinstake input\n :return: block: (CBlock) block produced, must be manually relayed\n ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] if (not (len(stakeableUtxos) > 0)): raise Exception('Need at least one stakeable utxo to stake a block!') if (startTime is None): startTime = time.time() nTime = (int(startTime) & 4294967280) coinbaseTx = create_coinbase_pos(nHeight) block = create_block(int(prevHash, 16), coinbaseTx, nTime, nVersion, int(finalsaplingroot, 16)) block.nVersion = nVersion block.solve_stake(stakeableUtxos, int(prevModifier, 16)) block_sig_key = CECKey() coinstakeTx_unsigned = CTransaction() prevout = COutPoint() prevout.deserialize_uniqueness(BytesIO(block.prevoutStake)) coinstakeTx_unsigned.vin.append(CTxIn(prevout, b, 4294967295)) coinstakeTx_unsigned.vout.append(CTxOut()) (amount, prevScript, _) = stakeableUtxos[block.prevoutStake] outNValue = int((amount + (250 * COIN))) coinstakeTx_unsigned.vout.append(CTxOut(outNValue, hex_str_to_bytes(prevScript))) if (privKeyWIF == ): if (not hasattr(self, 'DUMMY_KEY')): self.init_dummy_key() block_sig_key = self.DUMMY_KEY coinstakeTx_unsigned.vout[1].scriptPubKey = CScript([block_sig_key.get_pubkey(), OP_CHECKSIG]) else: if (privKeyWIF == None): rawtx = rpc_conn.getrawtransaction('{:064x}'.format(prevout.hash), True) privKeyWIF = rpc_conn.dumpprivkey(rawtx['vout'][prevout.n]['scriptPubKey']['addresses'][0]) (privKey, compressed) = wif_to_privkey(privKeyWIF) block_sig_key.set_compressed(compressed) block_sig_key.set_secretbytes(bytes.fromhex(privKey)) stake_tx_signed_raw_hex = rpc_conn.signrawtransaction(bytes_to_hex_str(coinstakeTx_unsigned.serialize()))['hex'] coinstakeTx = CTransaction() coinstakeTx.from_hex(stake_tx_signed_raw_hex) block.vtx.append(coinstakeTx) for tx in vtx: if ((not fDoubleSpend) and tx.spends(prevout)): continue block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.sign_block(block_sig_key) return block
def stake_next_block(self, node_id, stakeableUtxos, btime=None, privKeyWIF=None, vtx=[], fDoubleSpend=False): ' Calls stake_block appending to the current tip' assert_greater_than(len(self.nodes), node_id) saplingActive = (self.nodes[node_id].getblockchaininfo()['upgrades']['v5 shield']['status'] == 'active') blockVersion = (8 if saplingActive else 7) nHeight = self.nodes[node_id].getblockcount() prevHhash = self.nodes[node_id].getblockhash(nHeight) prevBlock = self.nodes[node_id].getblock(prevHhash, True) prevModifier = prevBlock['stakeModifier'] saplingRoot = prevBlock['finalsaplingroot'] return self.stake_block(node_id, blockVersion, (nHeight + 1), prevHhash, prevModifier, saplingRoot, stakeableUtxos, btime, privKeyWIF, vtx, fDoubleSpend)
-4,526,642,527,111,188,500
Calls stake_block appending to the current tip
test/functional/test_framework/test_framework.py
stake_next_block
THYMESIA-SECURITIES/T-Notes
python
def stake_next_block(self, node_id, stakeableUtxos, btime=None, privKeyWIF=None, vtx=[], fDoubleSpend=False): ' ' assert_greater_than(len(self.nodes), node_id) saplingActive = (self.nodes[node_id].getblockchaininfo()['upgrades']['v5 shield']['status'] == 'active') blockVersion = (8 if saplingActive else 7) nHeight = self.nodes[node_id].getblockcount() prevHhash = self.nodes[node_id].getblockhash(nHeight) prevBlock = self.nodes[node_id].getblock(prevHhash, True) prevModifier = prevBlock['stakeModifier'] saplingRoot = prevBlock['finalsaplingroot'] return self.stake_block(node_id, blockVersion, (nHeight + 1), prevHhash, prevModifier, saplingRoot, stakeableUtxos, btime, privKeyWIF, vtx, fDoubleSpend)
def spend_inputs(self, node_id, inputs, outputs): ' auxiliary function used by spend_utxo / spend_utxos ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] spendingTx = rpc_conn.createrawtransaction(inputs, outputs) spendingTx_signed = rpc_conn.signrawtransaction(spendingTx) if spendingTx_signed['complete']: txhash = rpc_conn.sendrawtransaction(spendingTx_signed['hex']) return txhash else: return ''
-3,138,092,644,918,846,000
auxiliary function used by spend_utxo / spend_utxos
test/functional/test_framework/test_framework.py
spend_inputs
THYMESIA-SECURITIES/T-Notes
python
def spend_inputs(self, node_id, inputs, outputs): ' ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] spendingTx = rpc_conn.createrawtransaction(inputs, outputs) spendingTx_signed = rpc_conn.signrawtransaction(spendingTx) if spendingTx_signed['complete']: txhash = rpc_conn.sendrawtransaction(spendingTx_signed['hex']) return txhash else: return
def spend_utxo(self, node_id, utxo, recipient=''): ' spend amount from previously unspent output to a provided address\n :param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.\n utxo: (JSON) returned from listunspent used as input\n recipient: (string) destination address (new one if not provided)\n :return: txhash: (string) tx hash if successful, empty string otherwise\n ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] inputs = [{'txid': utxo['txid'], 'vout': utxo['vout']}] out_amount = (float(utxo['amount']) - DEFAULT_FEE) outputs = {} if (recipient == ''): recipient = rpc_conn.getnewaddress() outputs[recipient] = out_amount return self.spend_inputs(node_id, inputs, outputs)
7,583,722,645,633,327,000
spend amount from previously unspent output to a provided address :param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo. utxo: (JSON) returned from listunspent used as input recipient: (string) destination address (new one if not provided) :return: txhash: (string) tx hash if successful, empty string otherwise
test/functional/test_framework/test_framework.py
spend_utxo
THYMESIA-SECURITIES/T-Notes
python
def spend_utxo(self, node_id, utxo, recipient=): ' spend amount from previously unspent output to a provided address\n :param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.\n utxo: (JSON) returned from listunspent used as input\n recipient: (string) destination address (new one if not provided)\n :return: txhash: (string) tx hash if successful, empty string otherwise\n ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] inputs = [{'txid': utxo['txid'], 'vout': utxo['vout']}] out_amount = (float(utxo['amount']) - DEFAULT_FEE) outputs = {} if (recipient == ): recipient = rpc_conn.getnewaddress() outputs[recipient] = out_amount return self.spend_inputs(node_id, inputs, outputs)
def spend_utxos(self, node_id, utxo_list, recipient='', fMultiple=False): ' spend utxos to provided list of addresses or 10 new generate ones.\n :param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.\n utxo_list: (JSON list) returned from listunspent used as input\n recipient: (string, optional) destination address (new one if not provided)\n fMultiple: (boolean, optional, default=false) spend each utxo on a different tx\n :return: txHashes: (string list) list of hashes of completed txs\n ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] txHashes = [] if (recipient == ''): recipient = rpc_conn.getnewaddress() if fMultiple: for utxo in utxo_list: txHash = self.spend_utxo(node_id, utxo, recipient) if (txHash != ''): txHashes.append(txHash) else: inputs = [{'txid': x['txid'], 'vout': x['vout']} for x in utxo_list] out_amount = (sum([float(x['amount']) for x in utxo_list]) - DEFAULT_FEE) outputs = {} if (recipient == ''): recipient = rpc_conn.getnewaddress() outputs[recipient] = out_amount txHash = self.spend_inputs(node_id, inputs, outputs) if (txHash != ''): txHashes.append(txHash) return txHashes
-5,674,369,076,897,975,000
spend utxos to provided list of addresses or 10 new generate ones. :param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo. utxo_list: (JSON list) returned from listunspent used as input recipient: (string, optional) destination address (new one if not provided) fMultiple: (boolean, optional, default=false) spend each utxo on a different tx :return: txHashes: (string list) list of hashes of completed txs
test/functional/test_framework/test_framework.py
spend_utxos
THYMESIA-SECURITIES/T-Notes
python
def spend_utxos(self, node_id, utxo_list, recipient=, fMultiple=False): ' spend utxos to provided list of addresses or 10 new generate ones.\n :param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.\n utxo_list: (JSON list) returned from listunspent used as input\n recipient: (string, optional) destination address (new one if not provided)\n fMultiple: (boolean, optional, default=false) spend each utxo on a different tx\n :return: txHashes: (string list) list of hashes of completed txs\n ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] txHashes = [] if (recipient == ): recipient = rpc_conn.getnewaddress() if fMultiple: for utxo in utxo_list: txHash = self.spend_utxo(node_id, utxo, recipient) if (txHash != ): txHashes.append(txHash) else: inputs = [{'txid': x['txid'], 'vout': x['vout']} for x in utxo_list] out_amount = (sum([float(x['amount']) for x in utxo_list]) - DEFAULT_FEE) outputs = {} if (recipient == ): recipient = rpc_conn.getnewaddress() outputs[recipient] = out_amount txHash = self.spend_inputs(node_id, inputs, outputs) if (txHash != ): txHashes.append(txHash) return txHashes
def generate_pos(self, node_id, btime=None): ' stakes a block using generate on nodes[node_id]' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] ss = rpc_conn.getstakingstatus() assert ss['walletunlocked'] assert (ss['stakeablecoins'] > 0) assert (ss['stakingbalance'] > 0.0) if (btime is not None): next_btime = (btime + 60) fStaked = False failures = 0 while (not fStaked): try: rpc_conn.generate(1) fStaked = True except JSONRPCException as e: if ("Couldn't create new block" in str(e)): failures += 1 if (failures > 60): ss = rpc_conn.getstakingstatus() if (not (ss['walletunlocked'] and (ss['stakeablecoins'] > 0) and (ss['stakingbalance'] > 0.0))): raise AssertionError(('Node %d unable to stake!' % node_id)) if (btime is not None): btime += 1 set_node_times(self.nodes, btime) else: time.sleep(1) else: raise e if (btime is not None): btime = max((btime + 1), next_btime) set_node_times(self.nodes, btime) return btime else: return None
7,975,874,726,591,192,000
stakes a block using generate on nodes[node_id]
test/functional/test_framework/test_framework.py
generate_pos
THYMESIA-SECURITIES/T-Notes
python
def generate_pos(self, node_id, btime=None): ' ' assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] ss = rpc_conn.getstakingstatus() assert ss['walletunlocked'] assert (ss['stakeablecoins'] > 0) assert (ss['stakingbalance'] > 0.0) if (btime is not None): next_btime = (btime + 60) fStaked = False failures = 0 while (not fStaked): try: rpc_conn.generate(1) fStaked = True except JSONRPCException as e: if ("Couldn't create new block" in str(e)): failures += 1 if (failures > 60): ss = rpc_conn.getstakingstatus() if (not (ss['walletunlocked'] and (ss['stakeablecoins'] > 0) and (ss['stakingbalance'] > 0.0))): raise AssertionError(('Node %d unable to stake!' % node_id)) if (btime is not None): btime += 1 set_node_times(self.nodes, btime) else: time.sleep(1) else: raise e if (btime is not None): btime = max((btime + 1), next_btime) set_node_times(self.nodes, btime) return btime else: return None
def generate_pow(self, node_id, btime=None): ' stakes a block using generate on nodes[node_id]' assert_greater_than(len(self.nodes), node_id) self.nodes[node_id].generate(1) if (btime is not None): btime += 60 set_node_times(self.nodes, btime) return btime
8,197,249,247,907,530,000
stakes a block using generate on nodes[node_id]
test/functional/test_framework/test_framework.py
generate_pow
THYMESIA-SECURITIES/T-Notes
python
def generate_pow(self, node_id, btime=None): ' ' assert_greater_than(len(self.nodes), node_id) self.nodes[node_id].generate(1) if (btime is not None): btime += 60 set_node_times(self.nodes, btime) return btime
def clone_cache_from_node_1(cachedir, from_num=4): " Clones cache subdir from node 1 to nodes from 'from_num' to MAX_NODES" def copy_and_overwrite(from_path, to_path): if os.path.exists(to_path): shutil.rmtree(to_path) shutil.copytree(from_path, to_path) assert (from_num < MAX_NODES) node_0_datadir = os.path.join(get_datadir_path(cachedir, 0), 'regtest') for i in range(from_num, MAX_NODES): node_i_datadir = os.path.join(get_datadir_path(cachedir, i), 'regtest') for subdir in ['blocks', 'chainstate', 'sporks']: copy_and_overwrite(os.path.join(node_0_datadir, subdir), os.path.join(node_i_datadir, subdir)) initialize_datadir(cachedir, i)
1,864,112,916,403,433,500
Clones cache subdir from node 1 to nodes from 'from_num' to MAX_NODES
test/functional/test_framework/test_framework.py
clone_cache_from_node_1
THYMESIA-SECURITIES/T-Notes
python
def clone_cache_from_node_1(cachedir, from_num=4): " " def copy_and_overwrite(from_path, to_path): if os.path.exists(to_path): shutil.rmtree(to_path) shutil.copytree(from_path, to_path) assert (from_num < MAX_NODES) node_0_datadir = os.path.join(get_datadir_path(cachedir, 0), 'regtest') for i in range(from_num, MAX_NODES): node_i_datadir = os.path.join(get_datadir_path(cachedir, i), 'regtest') for subdir in ['blocks', 'chainstate', 'sporks']: copy_and_overwrite(os.path.join(node_0_datadir, subdir), os.path.join(node_i_datadir, subdir)) initialize_datadir(cachedir, i)
def evaluate(self, environment=None): 'Evaluate a marker.\n\n Return the boolean from evaluating the given marker against the\n environment. environment is an optional argument to override all or\n part of the determined environment.\n\n The environment is determined from the current Python process.\n ' current_environment = default_environment() if (environment is not None): current_environment.update(environment) return _evaluate_markers(self._markers, current_environment)
5,745,510,292,428,242,000
Evaluate a marker. Return the boolean from evaluating the given marker against the environment. environment is an optional argument to override all or part of the determined environment. The environment is determined from the current Python process.
env/env/lib/python3.6/site-packages/setuptools/_vendor/packaging/markers.py
evaluate
Aimee-pacy/NEWS
python
def evaluate(self, environment=None): 'Evaluate a marker.\n\n Return the boolean from evaluating the given marker against the\n environment. environment is an optional argument to override all or\n part of the determined environment.\n\n The environment is determined from the current Python process.\n ' current_environment = default_environment() if (environment is not None): current_environment.update(environment) return _evaluate_markers(self._markers, current_environment)
def __init__(self): '\n\t\tAttributes:\n\t\t\tdata (arr): data stored in the stack\n\t\t\tminimum (arr): minimum values of data stored\n\t\t' self.data = [] self.minimum = []
4,827,408,123,394,649,000
Attributes: data (arr): data stored in the stack minimum (arr): minimum values of data stored
libalgs-py/data_structures/min_stack.py
__init__
tdudz/libalgs-py
python
def __init__(self): '\n\t\tAttributes:\n\t\t\tdata (arr): data stored in the stack\n\t\t\tminimum (arr): minimum values of data stored\n\t\t' self.data = [] self.minimum = []
def empty(self): '\n\t\tReturns whether or not the stack is empty.\n\n\t\tTime Complexity: O(1)\n\t\t\n\t\tReturns:\n\t\t\tbool: whether or not the stack is empty\n\t\t' return (len(self.data) == 0)
-1,267,916,980,869,920,500
Returns whether or not the stack is empty. Time Complexity: O(1) Returns: bool: whether or not the stack is empty
libalgs-py/data_structures/min_stack.py
empty
tdudz/libalgs-py
python
def empty(self): '\n\t\tReturns whether or not the stack is empty.\n\n\t\tTime Complexity: O(1)\n\t\t\n\t\tReturns:\n\t\t\tbool: whether or not the stack is empty\n\t\t' return (len(self.data) == 0)
def push(self, x): '\n\t\tPushes an element onto the stack.\n\n\t\tTime Complexity: O(1)\n\n\t\tArgs:\n\t\t\tx: item to be added\n\t\t' self.data.append(x) if ((not self.minimum) or (x <= self.minimum[(- 1)])): self.minimum.append(x)
-2,443,692,277,918,272,000
Pushes an element onto the stack. Time Complexity: O(1) Args: x: item to be added
libalgs-py/data_structures/min_stack.py
push
tdudz/libalgs-py
python
def push(self, x): '\n\t\tPushes an element onto the stack.\n\n\t\tTime Complexity: O(1)\n\n\t\tArgs:\n\t\t\tx: item to be added\n\t\t' self.data.append(x) if ((not self.minimum) or (x <= self.minimum[(- 1)])): self.minimum.append(x)
def pop(self): '\n\t\tPops an element off the stack. \n\n\t\tTime Complexity: O(1)\n\n\t\tReturns:\n\t\t\tany: the last element on the stack\n\n\t\t' x = self.data.pop() if (x == self.minimum[(- 1)]): self.minimum.pop() return x
-6,070,208,603,326,677,000
Pops an element off the stack. Time Complexity: O(1) Returns: any: the last element on the stack
libalgs-py/data_structures/min_stack.py
pop
tdudz/libalgs-py
python
def pop(self): '\n\t\tPops an element off the stack. \n\n\t\tTime Complexity: O(1)\n\n\t\tReturns:\n\t\t\tany: the last element on the stack\n\n\t\t' x = self.data.pop() if (x == self.minimum[(- 1)]): self.minimum.pop() return x
def peek(self): "\n\t\tReturns the last item on the stack but doesn't remove it.\n\n\t\tTime Complexity: O(1)\n\n\t\t" return self.data[(- 1)]
-7,414,183,826,497,071,000
Returns the last item on the stack but doesn't remove it. Time Complexity: O(1)
libalgs-py/data_structures/min_stack.py
peek
tdudz/libalgs-py
python
def peek(self): "\n\t\tReturns the last item on the stack but doesn't remove it.\n\n\t\tTime Complexity: O(1)\n\n\t\t" return self.data[(- 1)]
def peek_min(self): "\n\t\tReturns the min on the stack but doesn't remove it.\n\n\t\tTime Complexity: O(1)\n\n\t\t" return self.minimum[(- 1)]
1,568,990,182,936,538,000
Returns the min on the stack but doesn't remove it. Time Complexity: O(1)
libalgs-py/data_structures/min_stack.py
peek_min
tdudz/libalgs-py
python
def peek_min(self): "\n\t\tReturns the min on the stack but doesn't remove it.\n\n\t\tTime Complexity: O(1)\n\n\t\t" return self.minimum[(- 1)]
def _send_request(self, http_request, **kwargs): "Runs the network request through the client's chained policies.\n\n :param http_request: The network request you want to make. Required.\n :type http_request: ~azure.core.pipeline.transport.HttpRequest\n :keyword bool stream: Whether the response payload will be streamed. Defaults to True.\n :return: The response of your network call. Does not do error handling on your response.\n :rtype: ~azure.core.pipeline.transport.HttpResponse\n " path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')} http_request.url = self._client.format_url(http_request.url, **path_format_arguments) stream = kwargs.pop('stream', True) pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) return pipeline_response.http_response
9,055,639,993,187,902,000
Runs the network request through the client's chained policies. :param http_request: The network request you want to make. Required. :type http_request: ~azure.core.pipeline.transport.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to True. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.pipeline.transport.HttpResponse
sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2021_04_01_preview/_key_vault_management_client.py
_send_request
AFengKK/azure-sdk-for-python
python
def _send_request(self, http_request, **kwargs): "Runs the network request through the client's chained policies.\n\n :param http_request: The network request you want to make. Required.\n :type http_request: ~azure.core.pipeline.transport.HttpRequest\n :keyword bool stream: Whether the response payload will be streamed. Defaults to True.\n :return: The response of your network call. Does not do error handling on your response.\n :rtype: ~azure.core.pipeline.transport.HttpResponse\n " path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')} http_request.url = self._client.format_url(http_request.url, **path_format_arguments) stream = kwargs.pop('stream', True) pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) return pipeline_response.http_response
def filterGaussian(img, size=(5, 5), stdv=0): 'Summary of filterGaussian\n This will apply a noise reduction filter, we will use s 5x5 Gaussian filter to smooth\n the image to lower the sensitivity to noise. (The smaller the size the less visible the blur)\n\n To populate the Gaussian matrix we will use a kernel of normally distributed[stdv=1] numbers which will\n set each pixel value equal to the weighted average of its neighboor pixels\n\n The Gaussian distribution:\n Gd = (1/2pi*stdv^2)exp(-((i-(k+1)^2) + (j - (k+1)^2))/(2*stdv^2))\n\n i,j E [1,2k+1] for the kernel of size: (2k+1)x(2k+1) \n ' if (not isCV(img)): raise ValueError('Image not in np.array format') if (not isinstance(size, tuple)): raise ValueError('filterGaussian: Size for Gaussian filter not tuple') return cv.GaussianBlur(img, size, stdv)
1,231,359,395,927,140,900
Summary of filterGaussian This will apply a noise reduction filter, we will use s 5x5 Gaussian filter to smooth the image to lower the sensitivity to noise. (The smaller the size the less visible the blur) To populate the Gaussian matrix we will use a kernel of normally distributed[stdv=1] numbers which will set each pixel value equal to the weighted average of its neighboor pixels The Gaussian distribution: Gd = (1/2pi*stdv^2)exp(-((i-(k+1)^2) + (j - (k+1)^2))/(2*stdv^2)) i,j E [1,2k+1] for the kernel of size: (2k+1)x(2k+1)
CarlaDriving/server/lane_detection/utils.py
filterGaussian
eamorgado/Car-Self-driving-Simulator
python
def filterGaussian(img, size=(5, 5), stdv=0): 'Summary of filterGaussian\n This will apply a noise reduction filter, we will use s 5x5 Gaussian filter to smooth\n the image to lower the sensitivity to noise. (The smaller the size the less visible the blur)\n\n To populate the Gaussian matrix we will use a kernel of normally distributed[stdv=1] numbers which will\n set each pixel value equal to the weighted average of its neighboor pixels\n\n The Gaussian distribution:\n Gd = (1/2pi*stdv^2)exp(-((i-(k+1)^2) + (j - (k+1)^2))/(2*stdv^2))\n\n i,j E [1,2k+1] for the kernel of size: (2k+1)x(2k+1) \n ' if (not isCV(img)): raise ValueError('Image not in np.array format') if (not isinstance(size, tuple)): raise ValueError('filterGaussian: Size for Gaussian filter not tuple') return cv.GaussianBlur(img, size, stdv)
def filterCanny(img, min_val=50, max_val=150, size=(5, 5), stdv=0): '\n The Canny detector is a multi-stage algorithm optimized for fast real-time edge detection, \n which will reduce complexity of the image much further.\n\n The algorithm will detect sharp changes in luminosity and will define them as edges.\n\n The algorithm has the following stages:\n - Noise reduction\n - Intensity gradient - here it will apply a Sobel filter along the x and y axis to detect if edges are horizontal vertical or diagonal\n - Non-maximum suppression - this shortens the frequency bandwith of the signal to sharpen it\n - Hysteresis thresholding\n ' if (not isCV(img)): raise ValueError('Image not in np.array format') if (min_val >= max_val): raise ValueError('filterCanny: Value order incorrect') gray_scale = toGrayScale(img) gaussian = filterGaussian(gray_scale, size=size, stdv=stdv) return cv.Canny(gaussian, min_val, max_val)
7,903,466,607,846,601,000
The Canny detector is a multi-stage algorithm optimized for fast real-time edge detection, which will reduce complexity of the image much further. The algorithm will detect sharp changes in luminosity and will define them as edges. The algorithm has the following stages: - Noise reduction - Intensity gradient - here it will apply a Sobel filter along the x and y axis to detect if edges are horizontal vertical or diagonal - Non-maximum suppression - this shortens the frequency bandwith of the signal to sharpen it - Hysteresis thresholding
CarlaDriving/server/lane_detection/utils.py
filterCanny
eamorgado/Car-Self-driving-Simulator
python
def filterCanny(img, min_val=50, max_val=150, size=(5, 5), stdv=0): '\n The Canny detector is a multi-stage algorithm optimized for fast real-time edge detection, \n which will reduce complexity of the image much further.\n\n The algorithm will detect sharp changes in luminosity and will define them as edges.\n\n The algorithm has the following stages:\n - Noise reduction\n - Intensity gradient - here it will apply a Sobel filter along the x and y axis to detect if edges are horizontal vertical or diagonal\n - Non-maximum suppression - this shortens the frequency bandwith of the signal to sharpen it\n - Hysteresis thresholding\n ' if (not isCV(img)): raise ValueError('Image not in np.array format') if (min_val >= max_val): raise ValueError('filterCanny: Value order incorrect') gray_scale = toGrayScale(img) gaussian = filterGaussian(gray_scale, size=size, stdv=stdv) return cv.Canny(gaussian, min_val, max_val)
def houghFilter(frame, distance_resolution=2, angle_resolution=(np.pi / 180), min_n_intersections=50, min_line_size=30, max_line_gap=5): '\n Params:\n frame\n distance_resolution: distance resolution of accumulator in pixels, larger ==> less precision\n angle_resolution: angle of accumulator in radians, larger ==> less precision\n min_n_intersections: minimum number of intersections\n min_line_size: minimum length of line in pixels\n max_line_gap: maximum distance in pixels between disconnected lines\n ' placeholder = np.array([]) hough = cv.HoughLinesP(frame, distance_resolution, angle_resolution, min_n_intersections, placeholder, min_line_size, max_line_gap) return hough
1,376,053,570,479,431,000
Params: frame distance_resolution: distance resolution of accumulator in pixels, larger ==> less precision angle_resolution: angle of accumulator in radians, larger ==> less precision min_n_intersections: minimum number of intersections min_line_size: minimum length of line in pixels max_line_gap: maximum distance in pixels between disconnected lines
CarlaDriving/server/lane_detection/utils.py
houghFilter
eamorgado/Car-Self-driving-Simulator
python
def houghFilter(frame, distance_resolution=2, angle_resolution=(np.pi / 180), min_n_intersections=50, min_line_size=30, max_line_gap=5): '\n Params:\n frame\n distance_resolution: distance resolution of accumulator in pixels, larger ==> less precision\n angle_resolution: angle of accumulator in radians, larger ==> less precision\n min_n_intersections: minimum number of intersections\n min_line_size: minimum length of line in pixels\n max_line_gap: maximum distance in pixels between disconnected lines\n ' placeholder = np.array([]) hough = cv.HoughLinesP(frame, distance_resolution, angle_resolution, min_n_intersections, placeholder, min_line_size, max_line_gap) return hough
def calculateLines(img, lines): '\n Combines line segments into one or two lanes\n Note: By looking at the slop of a line we can see if it is on the left side (m<0) or right (m>0)\n ' def calculateCoordinates(img, line_params): '\n Calculates the coordinates for a road lane\n ' (height, width, _) = img.shape (m, b) = line_params y1 = height y2 = int((y1 * (1 / 2))) x1 = max((- width), min((2 * width), int(((y1 - b) / m)))) x2 = max((- width), min((2 * width), int(((y2 - b) / m)))) return np.array([x1, y1, x2, y2]) lane_lines = [] if (lines is None): return np.array(lane_lines) (height, width, _) = img.shape (left_lines, right_lines) = ([], []) boundary = (1 / 3) left_region_boundary = (width * (1 - boundary)) right_region_boundary = (width * boundary) for line in lines: (x1, y1, x2, y2) = line.reshape(4) if (x1 == x2): continue line_params = np.polyfit((x1, x2), (y1, y2), 1) (slope, intercept) = (line_params[0], line_params[1]) if (slope < 0): if ((x1 < left_region_boundary) and (x2 < left_region_boundary)): left_lines.append((slope, intercept)) elif ((x1 > right_region_boundary) and (x2 > right_region_boundary)): right_lines.append((slope, intercept)) left_lines_avg = np.average(left_lines, axis=0) right_lines_avg = np.average(right_lines, axis=0) if (len(left_lines) > 0): left_line = calculateCoordinates(img, left_lines_avg) lane_lines.append(left_line) if (len(right_lines) > 0): right_line = calculateCoordinates(img, right_lines_avg) lane_lines.append(right_line) return np.array(lane_lines)
2,188,310,605,139,995,400
Combines line segments into one or two lanes Note: By looking at the slop of a line we can see if it is on the left side (m<0) or right (m>0)
CarlaDriving/server/lane_detection/utils.py
calculateLines
eamorgado/Car-Self-driving-Simulator
python
def calculateLines(img, lines): '\n Combines line segments into one or two lanes\n Note: By looking at the slop of a line we can see if it is on the left side (m<0) or right (m>0)\n ' def calculateCoordinates(img, line_params): '\n Calculates the coordinates for a road lane\n ' (height, width, _) = img.shape (m, b) = line_params y1 = height y2 = int((y1 * (1 / 2))) x1 = max((- width), min((2 * width), int(((y1 - b) / m)))) x2 = max((- width), min((2 * width), int(((y2 - b) / m)))) return np.array([x1, y1, x2, y2]) lane_lines = [] if (lines is None): return np.array(lane_lines) (height, width, _) = img.shape (left_lines, right_lines) = ([], []) boundary = (1 / 3) left_region_boundary = (width * (1 - boundary)) right_region_boundary = (width * boundary) for line in lines: (x1, y1, x2, y2) = line.reshape(4) if (x1 == x2): continue line_params = np.polyfit((x1, x2), (y1, y2), 1) (slope, intercept) = (line_params[0], line_params[1]) if (slope < 0): if ((x1 < left_region_boundary) and (x2 < left_region_boundary)): left_lines.append((slope, intercept)) elif ((x1 > right_region_boundary) and (x2 > right_region_boundary)): right_lines.append((slope, intercept)) left_lines_avg = np.average(left_lines, axis=0) right_lines_avg = np.average(right_lines, axis=0) if (len(left_lines) > 0): left_line = calculateCoordinates(img, left_lines_avg) lane_lines.append(left_line) if (len(right_lines) > 0): right_line = calculateCoordinates(img, right_lines_avg) lane_lines.append(right_line) return np.array(lane_lines)
def stabilizeSteeringAngle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=2, max_angle_deviation_one_lane=1): '\n Using last steering angle to stabilize the steering angle\n This can be improved to use last N angles, etc\n if new angle is too different from current angle, only turn by max_angle_deviation degrees\n ' if (num_of_lane_lines == 1): max_angle_deviation = max_angle_deviation_one_lane else: max_angle_deviation = max_angle_deviation_two_lines angle_deviation = (new_steering_angle - curr_steering_angle) if (abs(angle_deviation) > max_angle_deviation): stabilized_steering_angle = int((curr_steering_angle + ((max_angle_deviation * angle_deviation) / abs(angle_deviation)))) else: stabilized_steering_angle = new_steering_angle return stabilized_steering_angle
-520,693,664,504,330,800
Using last steering angle to stabilize the steering angle This can be improved to use last N angles, etc if new angle is too different from current angle, only turn by max_angle_deviation degrees
CarlaDriving/server/lane_detection/utils.py
stabilizeSteeringAngle
eamorgado/Car-Self-driving-Simulator
python
def stabilizeSteeringAngle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=2, max_angle_deviation_one_lane=1): '\n Using last steering angle to stabilize the steering angle\n This can be improved to use last N angles, etc\n if new angle is too different from current angle, only turn by max_angle_deviation degrees\n ' if (num_of_lane_lines == 1): max_angle_deviation = max_angle_deviation_one_lane else: max_angle_deviation = max_angle_deviation_two_lines angle_deviation = (new_steering_angle - curr_steering_angle) if (abs(angle_deviation) > max_angle_deviation): stabilized_steering_angle = int((curr_steering_angle + ((max_angle_deviation * angle_deviation) / abs(angle_deviation)))) else: stabilized_steering_angle = new_steering_angle return stabilized_steering_angle
def calculateCoordinates(img, line_params): '\n Calculates the coordinates for a road lane\n ' (height, width, _) = img.shape (m, b) = line_params y1 = height y2 = int((y1 * (1 / 2))) x1 = max((- width), min((2 * width), int(((y1 - b) / m)))) x2 = max((- width), min((2 * width), int(((y2 - b) / m)))) return np.array([x1, y1, x2, y2])
-2,809,386,333,949,187,000
Calculates the coordinates for a road lane
CarlaDriving/server/lane_detection/utils.py
calculateCoordinates
eamorgado/Car-Self-driving-Simulator
python
def calculateCoordinates(img, line_params): '\n \n ' (height, width, _) = img.shape (m, b) = line_params y1 = height y2 = int((y1 * (1 / 2))) x1 = max((- width), min((2 * width), int(((y1 - b) / m)))) x2 = max((- width), min((2 * width), int(((y2 - b) / m)))) return np.array([x1, y1, x2, y2])
def __init__(self, worker_id): "Create a new Service\n\n :param worker_id: the identifier of this service instance\n :type worker_id: int\n\n The identifier of the worker can be used for workload repartition\n because it's consistent and always the same.\n\n For example, if the number of workers for this service is 3,\n one will got 0, the second got 1 and the last got 2.\n if worker_id 1 died, the new spawned process will got 1 again.\n " super(Service, self).__init__() self._initialize(worker_id)
3,167,055,717,728,948,000
Create a new Service :param worker_id: the identifier of this service instance :type worker_id: int The identifier of the worker can be used for workload repartition because it's consistent and always the same. For example, if the number of workers for this service is 3, one will got 0, the second got 1 and the last got 2. if worker_id 1 died, the new spawned process will got 1 again.
cotyledon/_service.py
__init__
1upon0/cotyledon
python
def __init__(self, worker_id): "Create a new Service\n\n :param worker_id: the identifier of this service instance\n :type worker_id: int\n\n The identifier of the worker can be used for workload repartition\n because it's consistent and always the same.\n\n For example, if the number of workers for this service is 3,\n one will got 0, the second got 1 and the last got 2.\n if worker_id 1 died, the new spawned process will got 1 again.\n " super(Service, self).__init__() self._initialize(worker_id)
def terminate(self): 'Gracefully shutdown the service\n\n This method will be executed when the Service has to shutdown cleanly.\n\n If not implemented the process will just end with status 0.\n\n To customize the exit code, the :py:class:`SystemExit` exception can be\n used.\n\n Any exceptions raised by this method will be logged and the worker will\n exit with status 1.\n '
-4,470,026,754,118,150,000
Gracefully shutdown the service This method will be executed when the Service has to shutdown cleanly. If not implemented the process will just end with status 0. To customize the exit code, the :py:class:`SystemExit` exception can be used. Any exceptions raised by this method will be logged and the worker will exit with status 1.
cotyledon/_service.py
terminate
1upon0/cotyledon
python
def terminate(self): 'Gracefully shutdown the service\n\n This method will be executed when the Service has to shutdown cleanly.\n\n If not implemented the process will just end with status 0.\n\n To customize the exit code, the :py:class:`SystemExit` exception can be\n used.\n\n Any exceptions raised by this method will be logged and the worker will\n exit with status 1.\n '
def reload(self): 'Reloading of the service\n\n This method will be executed when the Service receives a SIGHUP.\n\n If not implemented the process will just end with status 0 and\n :py:class:`ServiceRunner` will start a new fresh process for this\n service with the same worker_id.\n\n Any exceptions raised by this method will be logged and the worker will\n exit with status 1.\n ' os.kill(os.getpid(), signal.SIGTERM)
-2,382,388,470,792,310,300
Reloading of the service This method will be executed when the Service receives a SIGHUP. If not implemented the process will just end with status 0 and :py:class:`ServiceRunner` will start a new fresh process for this service with the same worker_id. Any exceptions raised by this method will be logged and the worker will exit with status 1.
cotyledon/_service.py
reload
1upon0/cotyledon
python
def reload(self): 'Reloading of the service\n\n This method will be executed when the Service receives a SIGHUP.\n\n If not implemented the process will just end with status 0 and\n :py:class:`ServiceRunner` will start a new fresh process for this\n service with the same worker_id.\n\n Any exceptions raised by this method will be logged and the worker will\n exit with status 1.\n ' os.kill(os.getpid(), signal.SIGTERM)
def run(self): 'Method representing the service activity\n\n If not implemented the process will just wait to receive an ending\n signal.\n\n This method is ran into the thread and can block or return as needed\n\n Any exceptions raised by this method will be logged and the worker will\n exit with status 1.\n '
-3,914,593,872,354,240,000
Method representing the service activity If not implemented the process will just wait to receive an ending signal. This method is ran into the thread and can block or return as needed Any exceptions raised by this method will be logged and the worker will exit with status 1.
cotyledon/_service.py
run
1upon0/cotyledon
python
def run(self): 'Method representing the service activity\n\n If not implemented the process will just wait to receive an ending\n signal.\n\n This method is ran into the thread and can block or return as needed\n\n Any exceptions raised by this method will be logged and the worker will\n exit with status 1.\n '
def GetPrettyPrintErrors(input_api, output_api, cwd, rel_path, results): 'Runs pretty-print command for specified file.' exit_code = input_api.subprocess.call([input_api.python_executable, 'pretty_print.py', rel_path, '--presubmit', '--non-interactive'], cwd=cwd) if (exit_code != 0): error_msg = ('%s is not formatted correctly; run git cl format to fix.' % rel_path) results.append(output_api.PresubmitError(error_msg))
9,207,421,196,495,104,000
Runs pretty-print command for specified file.
tools/metrics/histograms/PRESUBMIT.py
GetPrettyPrintErrors
Ron423c/chromium
python
def GetPrettyPrintErrors(input_api, output_api, cwd, rel_path, results): exit_code = input_api.subprocess.call([input_api.python_executable, 'pretty_print.py', rel_path, '--presubmit', '--non-interactive'], cwd=cwd) if (exit_code != 0): error_msg = ('%s is not formatted correctly; run git cl format to fix.' % rel_path) results.append(output_api.PresubmitError(error_msg))
def GetPrefixErrors(input_api, output_api, cwd, rel_path, results): 'Validates histogram prefixes in specified file.' exit_code = input_api.subprocess.call([input_api.python_executable, 'validate_prefix.py', rel_path], cwd=cwd) if (exit_code != 0): error_msg = ('%s contains histogram(s) with disallowed prefix, please run validate_prefix.py %s to fix.' % (rel_path, rel_path)) results.append(output_api.PresubmitError(error_msg))
3,177,257,491,787,596,300
Validates histogram prefixes in specified file.
tools/metrics/histograms/PRESUBMIT.py
GetPrefixErrors
Ron423c/chromium
python
def GetPrefixErrors(input_api, output_api, cwd, rel_path, results): exit_code = input_api.subprocess.call([input_api.python_executable, 'validate_prefix.py', rel_path], cwd=cwd) if (exit_code != 0): error_msg = ('%s contains histogram(s) with disallowed prefix, please run validate_prefix.py %s to fix.' % (rel_path, rel_path)) results.append(output_api.PresubmitError(error_msg))
def GetObsoleteXmlErrors(input_api, output_api, cwd, results): 'Validates all histograms in the file are obsolete.' exit_code = input_api.subprocess.call([input_api.python_executable, 'validate_obsolete_histograms.py'], cwd=cwd) if (exit_code != 0): error_msg = 'histograms_xml/obsolete_histograms.xml contains non-obsolete histograms, please run validate_obsolete_histograms.py to fix.' results.append(output_api.PresubmitError(error_msg))
-8,614,773,996,719,192,000
Validates all histograms in the file are obsolete.
tools/metrics/histograms/PRESUBMIT.py
GetObsoleteXmlErrors
Ron423c/chromium
python
def GetObsoleteXmlErrors(input_api, output_api, cwd, results): exit_code = input_api.subprocess.call([input_api.python_executable, 'validate_obsolete_histograms.py'], cwd=cwd) if (exit_code != 0): error_msg = 'histograms_xml/obsolete_histograms.xml contains non-obsolete histograms, please run validate_obsolete_histograms.py to fix.' results.append(output_api.PresubmitError(error_msg))
def GetValidateHistogramsError(input_api, output_api, cwd, results): 'Validates histograms format and index file.' exit_code = input_api.subprocess.call([input_api.python_executable, 'validate_format.py'], cwd=cwd) if (exit_code != 0): error_msg = ('Histograms are not well-formatted; please run %s/validate_format.py and fix the reported errors.' % cwd) results.append(output_api.PresubmitError(error_msg)) exit_code = input_api.subprocess.call([input_api.python_executable, 'validate_histograms_index.py'], cwd=cwd) if (exit_code != 0): error_msg = ('Histograms index file is not up-to-date. Please run %s/histogram_paths.py to update it' % cwd) results.append(output_api.PresubmitError(error_msg))
1,017,347,260,587,767,800
Validates histograms format and index file.
tools/metrics/histograms/PRESUBMIT.py
GetValidateHistogramsError
Ron423c/chromium
python
def GetValidateHistogramsError(input_api, output_api, cwd, results): exit_code = input_api.subprocess.call([input_api.python_executable, 'validate_format.py'], cwd=cwd) if (exit_code != 0): error_msg = ('Histograms are not well-formatted; please run %s/validate_format.py and fix the reported errors.' % cwd) results.append(output_api.PresubmitError(error_msg)) exit_code = input_api.subprocess.call([input_api.python_executable, 'validate_histograms_index.py'], cwd=cwd) if (exit_code != 0): error_msg = ('Histograms index file is not up-to-date. Please run %s/histogram_paths.py to update it' % cwd) results.append(output_api.PresubmitError(error_msg))