body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def scan_status_command(client: Client, args: Dict[(str, Any)]) -> CommandResults:
"helloworld-scan-status command: Returns status for HelloWorld scans\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['scan_id']`` list of scan IDs or single scan ID\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains a scan status\n\n :rtype: ``CommandResults``\n "
scan_id_list = argToList(args.get('scan_id', []))
if (len(scan_id_list) == 0):
raise ValueError('scan_id(s) not specified')
scan_list: List[Dict[(str, Any)]] = []
for scan_id in scan_id_list:
scan = client.scan_status(scan_id=scan_id)
scan_list.append(scan)
readable_output = tableToMarkdown('Scan status', scan_list)
return CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.Scan', outputs_key_field='scan_id', outputs=scan_list) | 1,625,731,828,764,689,200 | helloworld-scan-status command: Returns status for HelloWorld scans
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['scan_id']`` list of scan IDs or single scan ID
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains a scan status
:rtype: ``CommandResults`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | scan_status_command | DeanArbel/content | python | def scan_status_command(client: Client, args: Dict[(str, Any)]) -> CommandResults:
"helloworld-scan-status command: Returns status for HelloWorld scans\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['scan_id']`` list of scan IDs or single scan ID\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains a scan status\n\n :rtype: ``CommandResults``\n "
scan_id_list = argToList(args.get('scan_id', []))
if (len(scan_id_list) == 0):
raise ValueError('scan_id(s) not specified')
scan_list: List[Dict[(str, Any)]] = []
for scan_id in scan_id_list:
scan = client.scan_status(scan_id=scan_id)
scan_list.append(scan)
readable_output = tableToMarkdown('Scan status', scan_list)
return CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.Scan', outputs_key_field='scan_id', outputs=scan_list) |
def scan_results_command(client: Client, args: Dict[(str, Any)]) -> Union[(Dict[(str, Any)], CommandResults, List[CommandResults])]:
"helloworld-scan-results command: Returns results for a HelloWorld scan\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['scan_id']`` scan ID to retrieve results\n ``args['format']`` format of the results. Options are 'file' or 'json'\n\n :return:\n A ``CommandResults`` compatible to return ``return_results()``,\n that contains a scan result when json format is selected, or\n A Dict of entries also compatible to ``return_results()`` that\n contains the output file when file format is selected.\n\n :rtype: ``Union[Dict[str, Any],CommandResults]``\n "
scan_id = args.get('scan_id', None)
if (not scan_id):
raise ValueError('scan_id not specified')
scan_format = args.get('format', 'file')
results = client.scan_results(scan_id=scan_id)
if (scan_format == 'file'):
return fileResult(filename=f'{scan_id}.json', data=json.dumps(results, indent=4), file_type=entryTypes['entryInfoFile'])
elif (scan_format == 'json'):
cves: List[Common.CVE] = []
command_results: List[CommandResults] = []
entities = results.get('entities', [])
for e in entities:
if (('vulns' in e.keys()) and isinstance(e['vulns'], list)):
cves.extend([Common.CVE(id=c, cvss=None, published=None, modified=None, description=None) for c in e['vulns']])
readable_output = tableToMarkdown(f'Scan {scan_id} results', entities)
command_results.append(CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.Scan', outputs_key_field='scan_id', outputs=results))
cves = list(set(cves))
for cve in cves:
command_results.append(CommandResults(readable_output=f'CVE {cve}', indicator=cve))
return command_results
else:
raise ValueError('Incorrect format, must be "json" or "file"') | -1,730,858,595,813,137,200 | helloworld-scan-results command: Returns results for a HelloWorld scan
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['scan_id']`` scan ID to retrieve results
``args['format']`` format of the results. Options are 'file' or 'json'
:return:
A ``CommandResults`` compatible to return ``return_results()``,
that contains a scan result when json format is selected, or
A Dict of entries also compatible to ``return_results()`` that
contains the output file when file format is selected.
:rtype: ``Union[Dict[str, Any],CommandResults]`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | scan_results_command | DeanArbel/content | python | def scan_results_command(client: Client, args: Dict[(str, Any)]) -> Union[(Dict[(str, Any)], CommandResults, List[CommandResults])]:
"helloworld-scan-results command: Returns results for a HelloWorld scan\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['scan_id']`` scan ID to retrieve results\n ``args['format']`` format of the results. Options are 'file' or 'json'\n\n :return:\n A ``CommandResults`` compatible to return ``return_results()``,\n that contains a scan result when json format is selected, or\n A Dict of entries also compatible to ``return_results()`` that\n contains the output file when file format is selected.\n\n :rtype: ``Union[Dict[str, Any],CommandResults]``\n "
scan_id = args.get('scan_id', None)
if (not scan_id):
raise ValueError('scan_id not specified')
scan_format = args.get('format', 'file')
results = client.scan_results(scan_id=scan_id)
if (scan_format == 'file'):
return fileResult(filename=f'{scan_id}.json', data=json.dumps(results, indent=4), file_type=entryTypes['entryInfoFile'])
elif (scan_format == 'json'):
cves: List[Common.CVE] = []
command_results: List[CommandResults] = []
entities = results.get('entities', [])
for e in entities:
if (('vulns' in e.keys()) and isinstance(e['vulns'], list)):
cves.extend([Common.CVE(id=c, cvss=None, published=None, modified=None, description=None) for c in e['vulns']])
readable_output = tableToMarkdown(f'Scan {scan_id} results', entities)
command_results.append(CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.Scan', outputs_key_field='scan_id', outputs=results))
cves = list(set(cves))
for cve in cves:
command_results.append(CommandResults(readable_output=f'CVE {cve}', indicator=cve))
return command_results
else:
raise ValueError('Incorrect format, must be "json" or "file"') |
def main() -> None:
'main function, parses params and runs command functions\n\n :return:\n :rtype:\n '
api_key = demisto.params().get('apikey')
base_url = urljoin(demisto.params()['url'], '/api/v1')
verify_certificate = (not demisto.params().get('insecure', False))
first_fetch_time = arg_to_datetime(arg=demisto.params().get('first_fetch', '3 days'), arg_name='First fetch time', required=True)
first_fetch_timestamp = (int(first_fetch_time.timestamp()) if first_fetch_time else None)
assert isinstance(first_fetch_timestamp, int)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {'Authorization': f'Bearer {api_key}'}
client = Client(base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy)
if (demisto.command() == 'test-module'):
result = test_module(client, first_fetch_timestamp)
return_results(result)
elif (demisto.command() == 'fetch-incidents'):
alert_status = demisto.params().get('alert_status', None)
alert_type = demisto.params().get('alert_type', None)
min_severity = demisto.params().get('min_severity', None)
max_results = arg_to_number(arg=demisto.params().get('max_fetch'), arg_name='max_fetch', required=False)
if ((not max_results) or (max_results > MAX_INCIDENTS_TO_FETCH)):
max_results = MAX_INCIDENTS_TO_FETCH
(next_run, incidents) = fetch_incidents(client=client, max_results=max_results, last_run=demisto.getLastRun(), first_fetch_time=first_fetch_timestamp, alert_status=alert_status, min_severity=min_severity, alert_type=alert_type)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif (demisto.command() == 'ip'):
default_threshold_ip = int(demisto.params().get('threshold_ip', '65'))
return_results(ip_reputation_command(client, demisto.args(), default_threshold_ip))
elif (demisto.command() == 'domain'):
default_threshold_domain = int(demisto.params().get('threshold_domain', '65'))
return_results(domain_reputation_command(client, demisto.args(), default_threshold_domain))
elif (demisto.command() == 'helloworld-say-hello'):
return_results(say_hello_command(client, demisto.args()))
elif (demisto.command() == 'helloworld-search-alerts'):
return_results(search_alerts_command(client, demisto.args()))
elif (demisto.command() == 'helloworld-get-alert'):
return_results(get_alert_command(client, demisto.args()))
elif (demisto.command() == 'helloworld-update-alert-status'):
return_results(update_alert_status_command(client, demisto.args()))
elif (demisto.command() == 'helloworld-scan-start'):
return_results(scan_start_command(client, demisto.args()))
elif (demisto.command() == 'helloworld-scan-status'):
return_results(scan_status_command(client, demisto.args()))
elif (demisto.command() == 'helloworld-scan-results'):
return_results(scan_results_command(client, demisto.args()))
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'''Failed to execute {demisto.command()} command.
Error:
{str(e)}''') | -8,174,009,125,033,763,000 | main function, parses params and runs command functions
:return:
:rtype: | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | main | DeanArbel/content | python | def main() -> None:
'main function, parses params and runs command functions\n\n :return:\n :rtype:\n '
api_key = demisto.params().get('apikey')
base_url = urljoin(demisto.params()['url'], '/api/v1')
verify_certificate = (not demisto.params().get('insecure', False))
first_fetch_time = arg_to_datetime(arg=demisto.params().get('first_fetch', '3 days'), arg_name='First fetch time', required=True)
first_fetch_timestamp = (int(first_fetch_time.timestamp()) if first_fetch_time else None)
assert isinstance(first_fetch_timestamp, int)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {'Authorization': f'Bearer {api_key}'}
client = Client(base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy)
if (demisto.command() == 'test-module'):
result = test_module(client, first_fetch_timestamp)
return_results(result)
elif (demisto.command() == 'fetch-incidents'):
alert_status = demisto.params().get('alert_status', None)
alert_type = demisto.params().get('alert_type', None)
min_severity = demisto.params().get('min_severity', None)
max_results = arg_to_number(arg=demisto.params().get('max_fetch'), arg_name='max_fetch', required=False)
if ((not max_results) or (max_results > MAX_INCIDENTS_TO_FETCH)):
max_results = MAX_INCIDENTS_TO_FETCH
(next_run, incidents) = fetch_incidents(client=client, max_results=max_results, last_run=demisto.getLastRun(), first_fetch_time=first_fetch_timestamp, alert_status=alert_status, min_severity=min_severity, alert_type=alert_type)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif (demisto.command() == 'ip'):
default_threshold_ip = int(demisto.params().get('threshold_ip', '65'))
return_results(ip_reputation_command(client, demisto.args(), default_threshold_ip))
elif (demisto.command() == 'domain'):
default_threshold_domain = int(demisto.params().get('threshold_domain', '65'))
return_results(domain_reputation_command(client, demisto.args(), default_threshold_domain))
elif (demisto.command() == 'helloworld-say-hello'):
return_results(say_hello_command(client, demisto.args()))
elif (demisto.command() == 'helloworld-search-alerts'):
return_results(search_alerts_command(client, demisto.args()))
elif (demisto.command() == 'helloworld-get-alert'):
return_results(get_alert_command(client, demisto.args()))
elif (demisto.command() == 'helloworld-update-alert-status'):
return_results(update_alert_status_command(client, demisto.args()))
elif (demisto.command() == 'helloworld-scan-start'):
return_results(scan_start_command(client, demisto.args()))
elif (demisto.command() == 'helloworld-scan-status'):
return_results(scan_status_command(client, demisto.args()))
elif (demisto.command() == 'helloworld-scan-results'):
return_results(scan_results_command(client, demisto.args()))
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.
Error:
{str(e)}') |
def get_ip_reputation(self, ip: str) -> Dict[(str, Any)]:
"Gets the IP reputation using the '/ip' API endpoint\n\n :type ip: ``str``\n :param ip: IP address to get the reputation for\n\n :return: dict containing the IP reputation as returned from the API\n :rtype: ``Dict[str, Any]``\n "
return self._http_request(method='GET', url_suffix='/ip', params={'ip': ip}) | -5,505,118,003,103,052,000 | Gets the IP reputation using the '/ip' API endpoint
:type ip: ``str``
:param ip: IP address to get the reputation for
:return: dict containing the IP reputation as returned from the API
:rtype: ``Dict[str, Any]`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | get_ip_reputation | DeanArbel/content | python | def get_ip_reputation(self, ip: str) -> Dict[(str, Any)]:
"Gets the IP reputation using the '/ip' API endpoint\n\n :type ip: ``str``\n :param ip: IP address to get the reputation for\n\n :return: dict containing the IP reputation as returned from the API\n :rtype: ``Dict[str, Any]``\n "
return self._http_request(method='GET', url_suffix='/ip', params={'ip': ip}) |
def get_domain_reputation(self, domain: str) -> Dict[(str, Any)]:
"Gets the Domain reputation using the '/domain' API endpoint\n\n :type domain: ``str``\n :param domain: domain name to get the reputation for\n\n :return: dict containing the domain reputation as returned from the API\n :rtype: ``Dict[str, Any]``\n "
return self._http_request(method='GET', url_suffix='/domain', params={'domain': domain}) | 4,621,716,766,601,556,000 | Gets the Domain reputation using the '/domain' API endpoint
:type domain: ``str``
:param domain: domain name to get the reputation for
:return: dict containing the domain reputation as returned from the API
:rtype: ``Dict[str, Any]`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | get_domain_reputation | DeanArbel/content | python | def get_domain_reputation(self, domain: str) -> Dict[(str, Any)]:
"Gets the Domain reputation using the '/domain' API endpoint\n\n :type domain: ``str``\n :param domain: domain name to get the reputation for\n\n :return: dict containing the domain reputation as returned from the API\n :rtype: ``Dict[str, Any]``\n "
return self._http_request(method='GET', url_suffix='/domain', params={'domain': domain}) |
def search_alerts(self, alert_status: Optional[str], severity: Optional[str], alert_type: Optional[str], max_results: Optional[int], start_time: Optional[int]) -> List[Dict[(str, Any)]]:
'Searches for HelloWorld alerts using the \'/get_alerts\' API endpoint\n\n All the parameters are passed directly to the API as HTTP POST parameters in the request\n\n :type alert_status: ``Optional[str]``\n :param alert_status: status of the alert to search for. Options are: \'ACTIVE\' or \'CLOSED\'\n\n :type severity: ``Optional[str]``\n :param severity:\n severity of the alert to search for. Comma-separated values.\n Options are: "Low", "Medium", "High", "Critical"\n\n :type alert_type: ``Optional[str]``\n :param alert_type: type of alerts to search for. There is no list of predefined types\n\n :type max_results: ``Optional[int]``\n :param max_results: maximum number of results to return\n\n :type start_time: ``Optional[int]``\n :param start_time: start timestamp (epoch in seconds) for the alert search\n\n :return: list containing the found HelloWorld alerts as dicts\n :rtype: ``List[Dict[str, Any]]``\n '
request_params: Dict[(str, Any)] = {}
if alert_status:
request_params['alert_status'] = alert_status
if alert_type:
request_params['alert_type'] = alert_type
if severity:
request_params['severity'] = severity
if max_results:
request_params['max_results'] = max_results
if start_time:
request_params['start_time'] = start_time
return self._http_request(method='GET', url_suffix='/get_alerts', params=request_params) | 2,007,290,296,748,268,500 | Searches for HelloWorld alerts using the '/get_alerts' API endpoint
All the parameters are passed directly to the API as HTTP POST parameters in the request
:type alert_status: ``Optional[str]``
:param alert_status: status of the alert to search for. Options are: 'ACTIVE' or 'CLOSED'
:type severity: ``Optional[str]``
:param severity:
severity of the alert to search for. Comma-separated values.
Options are: "Low", "Medium", "High", "Critical"
:type alert_type: ``Optional[str]``
:param alert_type: type of alerts to search for. There is no list of predefined types
:type max_results: ``Optional[int]``
:param max_results: maximum number of results to return
:type start_time: ``Optional[int]``
:param start_time: start timestamp (epoch in seconds) for the alert search
:return: list containing the found HelloWorld alerts as dicts
:rtype: ``List[Dict[str, Any]]`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | search_alerts | DeanArbel/content | python | def search_alerts(self, alert_status: Optional[str], severity: Optional[str], alert_type: Optional[str], max_results: Optional[int], start_time: Optional[int]) -> List[Dict[(str, Any)]]:
'Searches for HelloWorld alerts using the \'/get_alerts\' API endpoint\n\n All the parameters are passed directly to the API as HTTP POST parameters in the request\n\n :type alert_status: ``Optional[str]``\n :param alert_status: status of the alert to search for. Options are: \'ACTIVE\' or \'CLOSED\'\n\n :type severity: ``Optional[str]``\n :param severity:\n severity of the alert to search for. Comma-separated values.\n Options are: "Low", "Medium", "High", "Critical"\n\n :type alert_type: ``Optional[str]``\n :param alert_type: type of alerts to search for. There is no list of predefined types\n\n :type max_results: ``Optional[int]``\n :param max_results: maximum number of results to return\n\n :type start_time: ``Optional[int]``\n :param start_time: start timestamp (epoch in seconds) for the alert search\n\n :return: list containing the found HelloWorld alerts as dicts\n :rtype: ``List[Dict[str, Any]]``\n '
request_params: Dict[(str, Any)] = {}
if alert_status:
request_params['alert_status'] = alert_status
if alert_type:
request_params['alert_type'] = alert_type
if severity:
request_params['severity'] = severity
if max_results:
request_params['max_results'] = max_results
if start_time:
request_params['start_time'] = start_time
return self._http_request(method='GET', url_suffix='/get_alerts', params=request_params) |
def get_alert(self, alert_id: str) -> Dict[(str, Any)]:
'Gets a specific HelloWorld alert by id\n\n :type alert_id: ``str``\n :param alert_id: id of the alert to return\n\n :return: dict containing the alert as returned from the API\n :rtype: ``Dict[str, Any]``\n '
return self._http_request(method='GET', url_suffix='/get_alert_details', params={'alert_id': alert_id}) | -3,893,194,839,806,734,300 | Gets a specific HelloWorld alert by id
:type alert_id: ``str``
:param alert_id: id of the alert to return
:return: dict containing the alert as returned from the API
:rtype: ``Dict[str, Any]`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | get_alert | DeanArbel/content | python | def get_alert(self, alert_id: str) -> Dict[(str, Any)]:
'Gets a specific HelloWorld alert by id\n\n :type alert_id: ``str``\n :param alert_id: id of the alert to return\n\n :return: dict containing the alert as returned from the API\n :rtype: ``Dict[str, Any]``\n '
return self._http_request(method='GET', url_suffix='/get_alert_details', params={'alert_id': alert_id}) |
def update_alert_status(self, alert_id: str, alert_status: str) -> Dict[(str, Any)]:
"Changes the status of a specific HelloWorld alert\n\n :type alert_id: ``str``\n :param alert_id: id of the alert to return\n\n :type alert_status: ``str``\n :param alert_status: new alert status. Options are: 'ACTIVE' or 'CLOSED'\n\n :return: dict containing the alert as returned from the API\n :rtype: ``Dict[str, Any]``\n "
return self._http_request(method='GET', url_suffix='/change_alert_status', params={'alert_id': alert_id, 'alert_status': alert_status}) | 4,261,590,240,170,449,000 | Changes the status of a specific HelloWorld alert
:type alert_id: ``str``
:param alert_id: id of the alert to return
:type alert_status: ``str``
:param alert_status: new alert status. Options are: 'ACTIVE' or 'CLOSED'
:return: dict containing the alert as returned from the API
:rtype: ``Dict[str, Any]`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | update_alert_status | DeanArbel/content | python | def update_alert_status(self, alert_id: str, alert_status: str) -> Dict[(str, Any)]:
"Changes the status of a specific HelloWorld alert\n\n :type alert_id: ``str``\n :param alert_id: id of the alert to return\n\n :type alert_status: ``str``\n :param alert_status: new alert status. Options are: 'ACTIVE' or 'CLOSED'\n\n :return: dict containing the alert as returned from the API\n :rtype: ``Dict[str, Any]``\n "
return self._http_request(method='GET', url_suffix='/change_alert_status', params={'alert_id': alert_id, 'alert_status': alert_status}) |
def scan_start(self, hostname: str) -> Dict[(str, Any)]:
'Starts a HelloWorld scan on a specific hostname\n\n :type hostname: ``str``\n :param hostname: hostname of the machine to scan\n\n :return: dict containing the scan status as returned from the API\n :rtype: ``Dict[str, Any]``\n '
return self._http_request(method='GET', url_suffix='/start_scan', params={'hostname': hostname}) | -6,631,833,082,852,968,000 | Starts a HelloWorld scan on a specific hostname
:type hostname: ``str``
:param hostname: hostname of the machine to scan
:return: dict containing the scan status as returned from the API
:rtype: ``Dict[str, Any]`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | scan_start | DeanArbel/content | python | def scan_start(self, hostname: str) -> Dict[(str, Any)]:
'Starts a HelloWorld scan on a specific hostname\n\n :type hostname: ``str``\n :param hostname: hostname of the machine to scan\n\n :return: dict containing the scan status as returned from the API\n :rtype: ``Dict[str, Any]``\n '
return self._http_request(method='GET', url_suffix='/start_scan', params={'hostname': hostname}) |
def scan_status(self, scan_id: str) -> Dict[(str, Any)]:
'Gets the status of a HelloWorld scan\n\n :type scan_id: ``str``\n :param scan_id: ID of the scan to retrieve status for\n\n :return: dict containing the scan status as returned from the API\n :rtype: ``Dict[str, Any]``\n '
return self._http_request(method='GET', url_suffix='/check_scan', params={'scan_id': scan_id}) | 4,874,480,294,823,485,000 | Gets the status of a HelloWorld scan
:type scan_id: ``str``
:param scan_id: ID of the scan to retrieve status for
:return: dict containing the scan status as returned from the API
:rtype: ``Dict[str, Any]`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | scan_status | DeanArbel/content | python | def scan_status(self, scan_id: str) -> Dict[(str, Any)]:
'Gets the status of a HelloWorld scan\n\n :type scan_id: ``str``\n :param scan_id: ID of the scan to retrieve status for\n\n :return: dict containing the scan status as returned from the API\n :rtype: ``Dict[str, Any]``\n '
return self._http_request(method='GET', url_suffix='/check_scan', params={'scan_id': scan_id}) |
def scan_results(self, scan_id: str) -> Dict[(str, Any)]:
'Gets the results of a HelloWorld scan\n\n :type scan_id: ``str``\n :param scan_id: ID of the scan to retrieve results for\n\n :return: dict containing the scan results as returned from the API\n :rtype: ``Dict[str, Any]``\n '
return self._http_request(method='GET', url_suffix='/get_scan_results', params={'scan_id': scan_id}) | -3,338,946,734,264,717,300 | Gets the results of a HelloWorld scan
:type scan_id: ``str``
:param scan_id: ID of the scan to retrieve results for
:return: dict containing the scan results as returned from the API
:rtype: ``Dict[str, Any]`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | scan_results | DeanArbel/content | python | def scan_results(self, scan_id: str) -> Dict[(str, Any)]:
'Gets the results of a HelloWorld scan\n\n :type scan_id: ``str``\n :param scan_id: ID of the scan to retrieve results for\n\n :return: dict containing the scan results as returned from the API\n :rtype: ``Dict[str, Any]``\n '
return self._http_request(method='GET', url_suffix='/get_scan_results', params={'scan_id': scan_id}) |
def say_hello(self, name: str) -> str:
"Returns 'Hello {name}'\n\n :type name: ``str``\n :param name: name to append to the 'Hello' string\n\n :return: string containing 'Hello {name}'\n :rtype: ``str``\n "
return f'Hello {name}' | -5,721,078,814,974,353,000 | Returns 'Hello {name}'
:type name: ``str``
:param name: name to append to the 'Hello' string
:return: string containing 'Hello {name}'
:rtype: ``str`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | say_hello | DeanArbel/content | python | def say_hello(self, name: str) -> str:
"Returns 'Hello {name}'\n\n :type name: ``str``\n :param name: name to append to the 'Hello' string\n\n :return: string containing 'Hello {name}'\n :rtype: ``str``\n "
return f'Hello {name}' |
def calculate_psnr(img1, img2):
'\n data range [0, 1]\n '
img1 = img1.clamp(0, 1)
img2 = img2.clamp(0, 1)
mse = torch.mean(((img1 - img2) ** 2), [1, 2, 3])
PIXEL_MAX = 1
return (20 * torch.mean(torch.log10((PIXEL_MAX / torch.sqrt(mse))))) | 847,792,582,663,107,200 | data range [0, 1] | utils/metrics.py | calculate_psnr | Wang-jiahao/SimDeblur | python | def calculate_psnr(img1, img2):
'\n \n '
img1 = img1.clamp(0, 1)
img2 = img2.clamp(0, 1)
mse = torch.mean(((img1 - img2) ** 2), [1, 2, 3])
PIXEL_MAX = 1
return (20 * torch.mean(torch.log10((PIXEL_MAX / torch.sqrt(mse))))) |
def printParaNum(model):
'\n function: print the number of total parameters and trainable parameters\n '
total_params = sum((p.numel() for p in model.parameters()))
total_trainable_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
print(('Total parameters: %d' % total_params))
print(('Trainable parameters: %d' % total_trainable_params)) | 2,902,576,970,362,084,000 | function: print the number of total parameters and trainable parameters | src/train_amp.py | printParaNum | suiyizhao/Pytorch-speedup | python | def printParaNum(model):
'\n \n '
total_params = sum((p.numel() for p in model.parameters()))
total_trainable_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
print(('Total parameters: %d' % total_params))
print(('Trainable parameters: %d' % total_trainable_params)) |
def set_random_seed(seed, deterministic=False):
'\n function: Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n '
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False | -1,521,102,580,318,788,400 | function: Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False. | src/train_amp.py | set_random_seed | suiyizhao/Pytorch-speedup | python | def set_random_seed(seed, deterministic=False):
'\n function: Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n '
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False |
@ops.RegisterGradient('Batch')
def _BatchGrad(op, *out_grads):
'Gradient for batch op.'
gradients = []
for i in range(len(op.inputs)):
gradients.append(gen_batch_ops.unbatch(out_grads[i], op.outputs[(- 2)], op.outputs[(- 1)], timeout_micros=op.get_attr('grad_timeout_micros'), shared_name='batch_gradient_{}_{}'.format(op.name, i)))
return gradients | 1,933,694,469,260,478,700 | Gradient for batch op. | tensorflow/contrib/batching/python/ops/batch_ops.py | _BatchGrad | ekyuho/tensorflow | python | @ops.RegisterGradient('Batch')
def _BatchGrad(op, *out_grads):
gradients = []
for i in range(len(op.inputs)):
gradients.append(gen_batch_ops.unbatch(out_grads[i], op.outputs[(- 2)], op.outputs[(- 1)], timeout_micros=op.get_attr('grad_timeout_micros'), shared_name='batch_gradient_{}_{}'.format(op.name, i)))
return gradients |
def batch_function(num_batch_threads, max_batch_size, batch_timeout_micros, allowed_batch_sizes=None, grad_timeout_micros=((60 * 1000) * 1000), unbatch_timeout_micros=((60 * 1000) * 1000)):
'Batches the computation done by the decorated function.\n\n So, for example, in the following code\n\n ```python\n @batch_function(1, 2, 3)\n def layer(a):\n return tf.matmul(a, a)\n\n b = layer(w)\n ```\n\n if more than one session.run call is simultaneously trying to compute `b`\n the values of `w` will be gathered, non-deterministically concatenated\n along the first axis, and only one thread will run the computation. See the\n documentation of the `Batch` op for more details.\n\n Assumes that all arguments of the decorated function are Tensors which will\n be batched along their first dimension.\n\n SparseTensor is not supported. The return value of the decorated function\n must be a Tensor or a list/tuple of Tensors.\n\n Args:\n num_batch_threads: Number of scheduling threads for processing batches\n of work. Determines the number of batches processed in parallel.\n max_batch_size: Batch sizes will never be bigger than this.\n batch_timeout_micros: Maximum number of microseconds to wait before\n outputting an incomplete batch.\n allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,\n does nothing. Otherwise, supplies a list of batch sizes, causing the op\n to pad batches up to one of those sizes. The entries must increase\n monotonically, and the final entry must equal max_batch_size.\n grad_timeout_micros: The timeout to use for the gradient. See the\n documentation of the unbatch op for more details. Defaults to 60s.\n unbatch_timeout_micros: The timeout to use for unbatching. See the\n documentation of the unbatch op for more details. Defaults to 60s.\n\n Returns:\n The decorated function will return the unbatched computation output Tensors.\n '
def decorator(f):
def decorated(*args):
with ops.name_scope('batch') as name:
for a in args:
if (not isinstance(a, ops.Tensor)):
raise ValueError(('All arguments to functions decorated with `batch_function` are supposed to be Tensors; found %s' % repr(a)))
(batched_tensors, batch_index, id_t) = gen_batch_ops.batch(args, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, shared_name=name)
outputs = f(*batched_tensors)
if isinstance(outputs, ops.Tensor):
outputs_list = [outputs]
else:
outputs_list = outputs
with ops.name_scope('unbatch') as unbatch_name:
unbatched = [gen_batch_ops.unbatch(t, batch_index, id_t, timeout_micros=unbatch_timeout_micros, shared_name=unbatch_name) for t in outputs_list]
if isinstance(outputs, ops.Tensor):
return unbatched[0]
return unbatched
return decorated
return decorator | -9,184,000,206,858,053,000 | Batches the computation done by the decorated function.
So, for example, in the following code
```python
@batch_function(1, 2, 3)
def layer(a):
return tf.matmul(a, a)
b = layer(w)
```
if more than one session.run call is simultaneously trying to compute `b`
the values of `w` will be gathered, non-deterministically concatenated
along the first axis, and only one thread will run the computation. See the
documentation of the `Batch` op for more details.
Assumes that all arguments of the decorated function are Tensors which will
be batched along their first dimension.
SparseTensor is not supported. The return value of the decorated function
must be a Tensor or a list/tuple of Tensors.
Args:
num_batch_threads: Number of scheduling threads for processing batches
of work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
grad_timeout_micros: The timeout to use for the gradient. See the
documentation of the unbatch op for more details. Defaults to 60s.
unbatch_timeout_micros: The timeout to use for unbatching. See the
documentation of the unbatch op for more details. Defaults to 60s.
Returns:
The decorated function will return the unbatched computation output Tensors. | tensorflow/contrib/batching/python/ops/batch_ops.py | batch_function | ekyuho/tensorflow | python | def batch_function(num_batch_threads, max_batch_size, batch_timeout_micros, allowed_batch_sizes=None, grad_timeout_micros=((60 * 1000) * 1000), unbatch_timeout_micros=((60 * 1000) * 1000)):
'Batches the computation done by the decorated function.\n\n So, for example, in the following code\n\n ```python\n @batch_function(1, 2, 3)\n def layer(a):\n return tf.matmul(a, a)\n\n b = layer(w)\n ```\n\n if more than one session.run call is simultaneously trying to compute `b`\n the values of `w` will be gathered, non-deterministically concatenated\n along the first axis, and only one thread will run the computation. See the\n documentation of the `Batch` op for more details.\n\n Assumes that all arguments of the decorated function are Tensors which will\n be batched along their first dimension.\n\n SparseTensor is not supported. The return value of the decorated function\n must be a Tensor or a list/tuple of Tensors.\n\n Args:\n num_batch_threads: Number of scheduling threads for processing batches\n of work. Determines the number of batches processed in parallel.\n max_batch_size: Batch sizes will never be bigger than this.\n batch_timeout_micros: Maximum number of microseconds to wait before\n outputting an incomplete batch.\n allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,\n does nothing. Otherwise, supplies a list of batch sizes, causing the op\n to pad batches up to one of those sizes. The entries must increase\n monotonically, and the final entry must equal max_batch_size.\n grad_timeout_micros: The timeout to use for the gradient. See the\n documentation of the unbatch op for more details. Defaults to 60s.\n unbatch_timeout_micros: The timeout to use for unbatching. See the\n documentation of the unbatch op for more details. Defaults to 60s.\n\n Returns:\n The decorated function will return the unbatched computation output Tensors.\n '
def decorator(f):
def decorated(*args):
with ops.name_scope('batch') as name:
for a in args:
if (not isinstance(a, ops.Tensor)):
raise ValueError(('All arguments to functions decorated with `batch_function` are supposed to be Tensors; found %s' % repr(a)))
(batched_tensors, batch_index, id_t) = gen_batch_ops.batch(args, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, shared_name=name)
outputs = f(*batched_tensors)
if isinstance(outputs, ops.Tensor):
outputs_list = [outputs]
else:
outputs_list = outputs
with ops.name_scope('unbatch') as unbatch_name:
unbatched = [gen_batch_ops.unbatch(t, batch_index, id_t, timeout_micros=unbatch_timeout_micros, shared_name=unbatch_name) for t in outputs_list]
if isinstance(outputs, ops.Tensor):
return unbatched[0]
return unbatched
return decorated
return decorator |
async def execute_wkhtmltopdf(uri: str) -> bytes:
'Run wkhtmltopdf on the command-line and return the output.'
cmd = ['wkhtmltopdf', '--log-level', 'none', uri, '-']
return check_output(cmd) | -8,748,739,963,590,872,000 | Run wkhtmltopdf on the command-line and return the output. | src/app.py | execute_wkhtmltopdf | mtik00/wkhtmltopdf-service | python | async def execute_wkhtmltopdf(uri: str) -> bytes:
cmd = ['wkhtmltopdf', '--log-level', 'none', uri, '-']
return check_output(cmd) |
async def convert_body(request: Request):
"\n It's just _way_ easier to deal with files rather than STDIN.\n\n Take the body of the request, write it to a temporary file, then use\n wkhtmltopdf to convert it.\n "
data = (await request.body())
if (not data):
return Response('ERROR: No body', status_code=400)
with TemporaryDirectory() as tmpdirname:
outfile = os.path.join(tmpdirname, 'out.html')
with open(outfile, 'w') as fh:
fh.write(data.decode('utf-8'))
bytes = (await execute_wkhtmltopdf(outfile))
return Response(bytes, media_type='application/pdf') | 7,125,513,509,955,753,000 | It's just _way_ easier to deal with files rather than STDIN.
Take the body of the request, write it to a temporary file, then use
wkhtmltopdf to convert it. | src/app.py | convert_body | mtik00/wkhtmltopdf-service | python | async def convert_body(request: Request):
"\n It's just _way_ easier to deal with files rather than STDIN.\n\n Take the body of the request, write it to a temporary file, then use\n wkhtmltopdf to convert it.\n "
data = (await request.body())
if (not data):
return Response('ERROR: No body', status_code=400)
with TemporaryDirectory() as tmpdirname:
outfile = os.path.join(tmpdirname, 'out.html')
with open(outfile, 'w') as fh:
fh.write(data.decode('utf-8'))
bytes = (await execute_wkhtmltopdf(outfile))
return Response(bytes, media_type='application/pdf') |
def get_mbreplacer_dir():
'\n Get the mbreplacer dir\n :return str: mbreplacer root dir\n '
return os.getcwd() | 1,429,617,218,071,645,200 | Get the mbreplacer dir
:return str: mbreplacer root dir | mbreplacer.py | get_mbreplacer_dir | ackhoury/mbreplacer | python | def get_mbreplacer_dir():
'\n Get the mbreplacer dir\n :return str: mbreplacer root dir\n '
return os.getcwd() |
@app.route('/')
def home():
'Step 1: User Authorization.\n\n Redirect the user/resource owner to the OAuth provider (i.e. Github)\n using an URL with a few key OAuth parameters.\n '
return render_template('index.html') | -7,705,316,012,026,364,000 | Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters. | td/oauth.py | home | Aspire1Inspire2/td-ameritrade-python-api | python | @app.route('/')
def home():
'Step 1: User Authorization.\n\n Redirect the user/resource owner to the OAuth provider (i.e. Github)\n using an URL with a few key OAuth parameters.\n '
return render_template('index.html') |
@app.route('/login')
def demo():
'Step 1: User Authorization.\n\n Redirect the user/resource owner to the OAuth provider (i.e. Github)\n using an URL with a few key OAuth parameters.\n '
auth_tuple = app.config['auth_client'].authorization_url()
session['oauth_state'] = auth_tuple[1]
return redirect(auth_tuple[0]) | -5,519,529,732,331,011,000 | Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters. | td/oauth.py | demo | Aspire1Inspire2/td-ameritrade-python-api | python | @app.route('/login')
def demo():
'Step 1: User Authorization.\n\n Redirect the user/resource owner to the OAuth provider (i.e. Github)\n using an URL with a few key OAuth parameters.\n '
auth_tuple = app.config['auth_client'].authorization_url()
session['oauth_state'] = auth_tuple[1]
return redirect(auth_tuple[0]) |
@app.route('/login/callback', methods=['GET'])
def callback():
' Step 3: Retrieving an access token.\n\n The user has been redirected back from the provider to your registered\n callback URL. With this redirection comes an authorization code included\n in the redirect URL. We will use that to obtain an access token.\n '
token_dict = app.config['auth_client'].grab_access_token_and_refresh_token(url=request.url)
session['oauth_token'] = token_dict
if app.config['call_close']:
return redirect(url_for('shutdown'))
return jsonify(token_dict) | -1,592,033,070,512,526,000 | Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token. | td/oauth.py | callback | Aspire1Inspire2/td-ameritrade-python-api | python | @app.route('/login/callback', methods=['GET'])
def callback():
' Step 3: Retrieving an access token.\n\n The user has been redirected back from the provider to your registered\n callback URL. With this redirection comes an authorization code included\n in the redirect URL. We will use that to obtain an access token.\n '
token_dict = app.config['auth_client'].grab_access_token_and_refresh_token(url=request.url)
session['oauth_token'] = token_dict
if app.config['call_close']:
return redirect(url_for('shutdown'))
return jsonify(token_dict) |
def _test_repr_or_str(self, fn, expect_id):
"Test Queue's repr or str.\n\n fn is repr or str. expect_id is True if we expect the Queue's id to\n appear in fn(Queue()).\n "
def gen():
when = (yield)
self.assertAlmostEqual(0.1, when)
when = (yield 0.1)
self.assertAlmostEqual(0.2, when)
(yield 0.1)
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
self.assertTrue(fn(q).startswith('<Queue'), fn(q))
id_is_present = (hex(id(q)) in fn(q))
self.assertEqual(expect_id, id_is_present)
@asyncio.coroutine
def add_getter():
q = asyncio.Queue(loop=loop)
asyncio.Task(q.get(), loop=loop)
(yield from asyncio.sleep(0.1, loop=loop))
self.assertTrue(('_getters[1]' in fn(q)))
q.put_nowait(0)
loop.run_until_complete(add_getter())
@asyncio.coroutine
def add_putter():
q = asyncio.Queue(maxsize=1, loop=loop)
q.put_nowait(1)
asyncio.Task(q.put(2), loop=loop)
(yield from asyncio.sleep(0.1, loop=loop))
self.assertTrue(('_putters[1]' in fn(q)))
q.get_nowait()
loop.run_until_complete(add_putter())
q = asyncio.Queue(loop=loop)
q.put_nowait(1)
self.assertTrue(('_queue=[1]' in fn(q))) | -2,233,485,092,732,088,300 | Test Queue's repr or str.
fn is repr or str. expect_id is True if we expect the Queue's id to
appear in fn(Queue()). | tests/python/test_queues.py | _test_repr_or_str | ProvoK/trio-asyncio | python | def _test_repr_or_str(self, fn, expect_id):
"Test Queue's repr or str.\n\n fn is repr or str. expect_id is True if we expect the Queue's id to\n appear in fn(Queue()).\n "
def gen():
when = (yield)
self.assertAlmostEqual(0.1, when)
when = (yield 0.1)
self.assertAlmostEqual(0.2, when)
(yield 0.1)
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
self.assertTrue(fn(q).startswith('<Queue'), fn(q))
id_is_present = (hex(id(q)) in fn(q))
self.assertEqual(expect_id, id_is_present)
@asyncio.coroutine
def add_getter():
q = asyncio.Queue(loop=loop)
asyncio.Task(q.get(), loop=loop)
(yield from asyncio.sleep(0.1, loop=loop))
self.assertTrue(('_getters[1]' in fn(q)))
q.put_nowait(0)
loop.run_until_complete(add_getter())
@asyncio.coroutine
def add_putter():
q = asyncio.Queue(maxsize=1, loop=loop)
q.put_nowait(1)
asyncio.Task(q.put(2), loop=loop)
(yield from asyncio.sleep(0.1, loop=loop))
self.assertTrue(('_putters[1]' in fn(q)))
q.get_nowait()
loop.run_until_complete(add_putter())
q = asyncio.Queue(loop=loop)
q.put_nowait(1)
self.assertTrue(('_queue=[1]' in fn(q))) |
def _submit_filtering_jobs(self, uuid):
'\n Here we create the task and put it on the job queue.\n '
two_weeks_ago = (datetime.date.today() - datetime.timedelta(14))
params = {'from': int(two_weeks_ago.strftime('%s')), 'to': int(time.time()), 'unit': 'seconds'}
location_api_resp = requests.get(f'http://location-api:5000/geohashRegionsForUser/{uuid}', params=params)
if (location_api_resp.status_code != 200):
logger.warning(location_api_resp)
api.abort(500, 'There was a problem when requesting data from the Location API')
visited_regions_geohash_prefixes = location_api_resp.json()
logger.info(f'Visited Regions for diagonzed patient: {str(visited_regions_geohash_prefixes)}')
location_api_resp_users = requests.get('http://location-api:5000/users')
if (location_api_resp_users.status_code != 200):
logger.warning(location_api_resp_users)
api.abort(500, 'There was a problem when requesting data from the Location API')
all_influx_users = list((set(location_api_resp_users.json()) - {str(uuid)}))
logger.info(f'All Influx users without diagnozed patient: {str(all_influx_users)}')
n_workers = 3
task_size = (len(all_influx_users) // n_workers)
all_influx_users_partitioned = SingleUser._chunks(all_influx_users, task_size)
redis_instance = redis.Redis(host=os.getenv('REDIS_HOST', 'queue'), port=os.getenv('REDIS_PORT', 6379), db=os.getenv('REDIS_DB_ID', 0))
redis_namespace = os.getenv('REDIS_NAMESPACE', 'worker')
redis_collection = os.getenv('REDIS_COLLECTION', 'jobs')
logger.info(f'Connected with Redis ({redis_namespace}:{redis_collection})')
for (idx, users_batch) in enumerate(all_influx_users_partitioned):
job = {'type': 'scan_users_locations', 'args': {'user_id_range': users_batch, 'diagnozed_uuid': uuid, 'diagnozed_visited_regions': visited_regions_geohash_prefixes}}
redis_instance.rpush(f'{redis_namespace}:{redis_collection}', json.dumps(job))
logger.info(f'''Successfully pushed job #{idx} to the Job Queue:
{json.dumps(job)}''')
logger.info('Finished pushing jobs to the Queue.') | -3,618,480,378,836,912,000 | Here we create the task and put it on the job queue. | users-api/routes.py | _submit_filtering_jobs | pwegrzyn/pandemic-monitor | python | def _submit_filtering_jobs(self, uuid):
'\n \n '
two_weeks_ago = (datetime.date.today() - datetime.timedelta(14))
params = {'from': int(two_weeks_ago.strftime('%s')), 'to': int(time.time()), 'unit': 'seconds'}
location_api_resp = requests.get(f'http://location-api:5000/geohashRegionsForUser/{uuid}', params=params)
if (location_api_resp.status_code != 200):
logger.warning(location_api_resp)
api.abort(500, 'There was a problem when requesting data from the Location API')
visited_regions_geohash_prefixes = location_api_resp.json()
logger.info(f'Visited Regions for diagonzed patient: {str(visited_regions_geohash_prefixes)}')
location_api_resp_users = requests.get('http://location-api:5000/users')
if (location_api_resp_users.status_code != 200):
logger.warning(location_api_resp_users)
api.abort(500, 'There was a problem when requesting data from the Location API')
all_influx_users = list((set(location_api_resp_users.json()) - {str(uuid)}))
logger.info(f'All Influx users without diagnozed patient: {str(all_influx_users)}')
n_workers = 3
task_size = (len(all_influx_users) // n_workers)
all_influx_users_partitioned = SingleUser._chunks(all_influx_users, task_size)
redis_instance = redis.Redis(host=os.getenv('REDIS_HOST', 'queue'), port=os.getenv('REDIS_PORT', 6379), db=os.getenv('REDIS_DB_ID', 0))
redis_namespace = os.getenv('REDIS_NAMESPACE', 'worker')
redis_collection = os.getenv('REDIS_COLLECTION', 'jobs')
logger.info(f'Connected with Redis ({redis_namespace}:{redis_collection})')
for (idx, users_batch) in enumerate(all_influx_users_partitioned):
job = {'type': 'scan_users_locations', 'args': {'user_id_range': users_batch, 'diagnozed_uuid': uuid, 'diagnozed_visited_regions': visited_regions_geohash_prefixes}}
redis_instance.rpush(f'{redis_namespace}:{redis_collection}', json.dumps(job))
logger.info(f'Successfully pushed job #{idx} to the Job Queue:
{json.dumps(job)}')
logger.info('Finished pushing jobs to the Queue.') |
def __init__(self, path: str):
"\n Create an instance of GitRepoVersionInfo\n :param path: The path to search for git information. It searches for '.git' in this folder or any parent\n folder.\n "
self._is_repo = False
try:
self._repo = git.Repo(path, search_parent_directories=True)
self._is_repo = True
except git.exc.InvalidGitRepositoryError:
self._repo = None | -4,167,830,116,496,115,700 | Create an instance of GitRepoVersionInfo
:param path: The path to search for git information. It searches for '.git' in this folder or any parent
folder. | step_exec_lib/utils/git.py | __init__ | giantswarm/step-exec-lib | python | def __init__(self, path: str):
"\n Create an instance of GitRepoVersionInfo\n :param path: The path to search for git information. It searches for '.git' in this folder or any parent\n folder.\n "
self._is_repo = False
try:
self._repo = git.Repo(path, search_parent_directories=True)
self._is_repo = True
except git.exc.InvalidGitRepositoryError:
self._repo = None |
@property
def is_git_repo(self) -> bool:
'\n Checks if the path given in constructor is a sub-path of a valid git repo.\n :return: Boolean true, if repo was found.\n '
return self._is_repo | 5,407,171,041,855,514,000 | Checks if the path given in constructor is a sub-path of a valid git repo.
:return: Boolean true, if repo was found. | step_exec_lib/utils/git.py | is_git_repo | giantswarm/step-exec-lib | python | @property
def is_git_repo(self) -> bool:
'\n Checks if the path given in constructor is a sub-path of a valid git repo.\n :return: Boolean true, if repo was found.\n '
return self._is_repo |
def get_git_version(self, strip_v_in_version: bool=True) -> str:
'\n Gets application version in the format [last-tag]-[last-commit-sha].\n :param strip_v_in_version: If the version tag starts with \'v\' (like \'v1.2.3),\n this chooses if the \'v\' should be stripped, so the resulting tag is \'1.2.3\'.\n If there\'s a "-", "." or "_" separator after "v", it is removed as well.\n :return: The version string\n '
if (not self._is_repo):
raise git.exc.InvalidGitRepositoryError()
tags = sorted(self._repo.tags, key=(lambda t: t.commit.committed_date))
latest_tag = (None if (len(tags) == 0) else tags[(- 1)])
ver = ('0.0.0' if (latest_tag is None) else latest_tag.name)
if (strip_v_in_version and ver.startswith('v')):
txt_ver = ver.lstrip('v')
txt_ver = txt_ver.lstrip('-_.')
else:
txt_ver = ver
sha = self._repo.head.commit.hexsha
if ((latest_tag is not None) and (sha == latest_tag.commit.hexsha)):
return txt_ver
return f'{txt_ver}-{sha}' | -2,077,656,113,087,697,700 | Gets application version in the format [last-tag]-[last-commit-sha].
:param strip_v_in_version: If the version tag starts with 'v' (like 'v1.2.3),
this chooses if the 'v' should be stripped, so the resulting tag is '1.2.3'.
If there's a "-", "." or "_" separator after "v", it is removed as well.
:return: The version string | step_exec_lib/utils/git.py | get_git_version | giantswarm/step-exec-lib | python | def get_git_version(self, strip_v_in_version: bool=True) -> str:
'\n Gets application version in the format [last-tag]-[last-commit-sha].\n :param strip_v_in_version: If the version tag starts with \'v\' (like \'v1.2.3),\n this chooses if the \'v\' should be stripped, so the resulting tag is \'1.2.3\'.\n If there\'s a "-", "." or "_" separator after "v", it is removed as well.\n :return: The version string\n '
if (not self._is_repo):
raise git.exc.InvalidGitRepositoryError()
tags = sorted(self._repo.tags, key=(lambda t: t.commit.committed_date))
latest_tag = (None if (len(tags) == 0) else tags[(- 1)])
ver = ('0.0.0' if (latest_tag is None) else latest_tag.name)
if (strip_v_in_version and ver.startswith('v')):
txt_ver = ver.lstrip('v')
txt_ver = txt_ver.lstrip('-_.')
else:
txt_ver = ver
sha = self._repo.head.commit.hexsha
if ((latest_tag is not None) and (sha == latest_tag.commit.hexsha)):
return txt_ver
return f'{txt_ver}-{sha}' |
def contract_by_lifespan(agent_stats, lifespans):
'Pull agents close to their mean according to how short-lived they were. For punishing abundance of premature death\n when rewarding diversity.'
weights = sigmoid_lifespan(lifespans)
n_agents = lifespans.shape[0]
mean_agent = agent_stats.mean(axis=0)
mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
agent_deltas = (mean_agents - agent_stats)
agent_skills = (agent_stats + (weights * agent_deltas.T).T)
return agent_skills | 8,577,192,660,219,871,000 | Pull agents close to their mean according to how short-lived they were. For punishing abundance of premature death
when rewarding diversity. | evolution/diversity.py | contract_by_lifespan | narendasan/neural-mmo | python | def contract_by_lifespan(agent_stats, lifespans):
'Pull agents close to their mean according to how short-lived they were. For punishing abundance of premature death\n when rewarding diversity.'
weights = sigmoid_lifespan(lifespans)
n_agents = lifespans.shape[0]
mean_agent = agent_stats.mean(axis=0)
mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
agent_deltas = (mean_agents - agent_stats)
agent_skills = (agent_stats + (weights * agent_deltas.T).T)
return agent_skills |
def expand_by_lifespan(agent_stats, lifespans):
'Push agents further from their mean according to how short-lived they were. For punishing abundance of premature\n death when rewarding homogeneity.'
weights = sigmoid_lifespan(lifespans)
n_agents = lifespans.shape[0]
mean_agent = agent_stats.mean(axis=0)
mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
agent_deltas = (mean_agents - agent_stats)
agent_deltas = ((agent_deltas / np.linalg.norm(agent_deltas)) * 100)
agent_skills = (agent_stats - (weights * agent_deltas.T).T)
return agent_skills | 556,244,921,716,009,500 | Push agents further from their mean according to how short-lived they were. For punishing abundance of premature
death when rewarding homogeneity. | evolution/diversity.py | expand_by_lifespan | narendasan/neural-mmo | python | def expand_by_lifespan(agent_stats, lifespans):
'Push agents further from their mean according to how short-lived they were. For punishing abundance of premature\n death when rewarding homogeneity.'
weights = sigmoid_lifespan(lifespans)
n_agents = lifespans.shape[0]
mean_agent = agent_stats.mean(axis=0)
mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
agent_deltas = (mean_agents - agent_stats)
agent_deltas = ((agent_deltas / np.linalg.norm(agent_deltas)) * 100)
agent_skills = (agent_stats - (weights * agent_deltas.T).T)
return agent_skills |
def sum_experience(agent_stats, skill_headers=None, verbose=False, pop=None):
'Simply take the sum of XP over skills and agents.'
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
a_skills = np.vstack(agent_skills)
a_lifespans = np.hstack(lifespans)
(n_agents, n_skills) = a_skills.shape
mean_xp = (a_skills.sum() / (n_agents * n_skills))
if verbose:
print('skills')
print(a_skills.T)
print('lifespans')
print(a_lifespans)
print('mean xp:', mean_xp)
print()
return mean_xp | 1,694,325,565,475,479,300 | Simply take the sum of XP over skills and agents. | evolution/diversity.py | sum_experience | narendasan/neural-mmo | python | def sum_experience(agent_stats, skill_headers=None, verbose=False, pop=None):
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
a_skills = np.vstack(agent_skills)
a_lifespans = np.hstack(lifespans)
(n_agents, n_skills) = a_skills.shape
mean_xp = (a_skills.sum() / (n_agents * n_skills))
if verbose:
print('skills')
print(a_skills.T)
print('lifespans')
print(a_lifespans)
print('mean xp:', mean_xp)
print()
return mean_xp |
def calc_convex_hull(agent_stats, skill_headers=None, verbose=False, infos={}, pop=None, punish_youth=True):
'Calculate the diversity of a population of agents in skill-space by computing the volume inside the convex hull of\n the agents when treated as points in this space.'
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
agent_skills = np.vstack(agent_skills)
n_skills = agent_skills.shape[1]
lifespans = np.hstack(lifespans)
if verbose:
print('skills:')
print(agent_skills.transpose())
print('lifespans:')
print(lifespans)
print(len(agent_stats['lifespans']), 'populations')
if punish_youth:
agent_skills = contract_by_lifespan(agent_skills, lifespans)
if (n_skills == 1):
score = (agent_skills.max() - agent_skills.mean())
else:
try:
hull = ConvexHull(agent_skills, qhull_options='QJ')
infos['hull'] = hull
score = hull.volume
score = (score ** (1 / n_skills))
except Exception as e:
print(e)
score = 0
if verbose:
print('score:', score)
return score | 1,588,951,508,444,711,700 | Calculate the diversity of a population of agents in skill-space by computing the volume inside the convex hull of
the agents when treated as points in this space. | evolution/diversity.py | calc_convex_hull | narendasan/neural-mmo | python | def calc_convex_hull(agent_stats, skill_headers=None, verbose=False, infos={}, pop=None, punish_youth=True):
'Calculate the diversity of a population of agents in skill-space by computing the volume inside the convex hull of\n the agents when treated as points in this space.'
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
agent_skills = np.vstack(agent_skills)
n_skills = agent_skills.shape[1]
lifespans = np.hstack(lifespans)
if verbose:
print('skills:')
print(agent_skills.transpose())
print('lifespans:')
print(lifespans)
print(len(agent_stats['lifespans']), 'populations')
if punish_youth:
agent_skills = contract_by_lifespan(agent_skills, lifespans)
if (n_skills == 1):
score = (agent_skills.max() - agent_skills.mean())
else:
try:
hull = ConvexHull(agent_skills, qhull_options='QJ')
infos['hull'] = hull
score = hull.volume
score = (score ** (1 / n_skills))
except Exception as e:
print(e)
score = 0
if verbose:
print('score:', score)
return score |
def calc_homogeneity_l2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=True):
'Use L2 distance to punish agents for having high mean pairwise distance. Optimal state is all agents at the same\n point in skill-space, with maximal lifespans.'
if ('skills' not in agent_stats):
raise Exception('We should be including dead agents in this calculation, so we should get at least some skill stats back here')
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
assert (len(agent_skills) == len(lifespans))
if punish_youth:
agent_skills = expand_by_lifespan(agent_skills, lifespans)
n_agents = agent_skills.shape[0]
a = agent_skills
b = a.reshape(n_agents, 1, a.shape[1])
distances = np.sqrt(np.einsum('ijk, ijk->ij', (a - b), (a - b)))
score = (np.sum(distances) / (n_agents ** 2))
if verbose:
print('agent skills:\n{}'.format(a.transpose()))
print('lifespans:\n{}'.format(lifespans))
print('score:\n{}\n'.format(score))
return (- score) | 366,864,616,967,479,600 | Use L2 distance to punish agents for having high mean pairwise distance. Optimal state is all agents at the same
point in skill-space, with maximal lifespans. | evolution/diversity.py | calc_homogeneity_l2 | narendasan/neural-mmo | python | def calc_homogeneity_l2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=True):
'Use L2 distance to punish agents for having high mean pairwise distance. Optimal state is all agents at the same\n point in skill-space, with maximal lifespans.'
if ('skills' not in agent_stats):
raise Exception('We should be including dead agents in this calculation, so we should get at least some skill stats back here')
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
assert (len(agent_skills) == len(lifespans))
if punish_youth:
agent_skills = expand_by_lifespan(agent_skills, lifespans)
n_agents = agent_skills.shape[0]
a = agent_skills
b = a.reshape(n_agents, 1, a.shape[1])
distances = np.sqrt(np.einsum('ijk, ijk->ij', (a - b), (a - b)))
score = (np.sum(distances) / (n_agents ** 2))
if verbose:
print('agent skills:\n{}'.format(a.transpose()))
print('lifespans:\n{}'.format(lifespans))
print('score:\n{}\n'.format(score))
return (- score) |
def test(env, actor_model, is_discrete):
'\n\t\tTests the model.\n\t\tParameters:\n\t\t\tenv - the environment to test the policy on\n\t\t\tactor_model - the actor model to load in\n\t\tReturn:\n\t\t\tNone\n\t'
print(f'Testing {actor_model}', flush=True)
if (actor_model == ''):
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0]
policy = FeedForwardActorNN(obs_dim, act_dim, is_discrete)
policy.load_state_dict(torch.load(actor_model))
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete) | 4,032,199,025,681,221,600 | Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None | ppoPolicyTraining.py | test | britig/S2RL-Policies | python | def test(env, actor_model, is_discrete):
'\n\t\tTests the model.\n\t\tParameters:\n\t\t\tenv - the environment to test the policy on\n\t\t\tactor_model - the actor model to load in\n\t\tReturn:\n\t\t\tNone\n\t'
print(f'Testing {actor_model}', flush=True)
if (actor_model == ):
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0]
policy = FeedForwardActorNN(obs_dim, act_dim, is_discrete)
policy.load_state_dict(torch.load(actor_model))
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete) |
def __init__(self, env, **hyperparameters):
'\n\t\t\tInitializes the PPO model, including hyperparameters.\n\n\t\t\tParameters:\n\t\t\t\tpolicy_class - the policy class to use for our actor/critic networks.\n\t\t\t\tenv - the environment to train on.\n\t\t\t\thyperparameters - all extra arguments passed into PPO that should be hyperparameters.\n\n\t\t\tReturns:\n\t\t\t\tNone\n\t\t'
assert (type(env.observation_space) == gym.spaces.Box)
self._init_hyperparameters(hyperparameters)
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0]
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim, self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2, False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
self.logger = {'t_so_far': 0, 'i_so_far': 0, 'batch_lens': [], 'batch_rews': [], 'batch_infractions': [], 'actor_losses': [], 'actor_network': 0} | 1,361,639,296,199,345,000 | Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None | ppoPolicyTraining.py | __init__ | britig/S2RL-Policies | python | def __init__(self, env, **hyperparameters):
'\n\t\t\tInitializes the PPO model, including hyperparameters.\n\n\t\t\tParameters:\n\t\t\t\tpolicy_class - the policy class to use for our actor/critic networks.\n\t\t\t\tenv - the environment to train on.\n\t\t\t\thyperparameters - all extra arguments passed into PPO that should be hyperparameters.\n\n\t\t\tReturns:\n\t\t\t\tNone\n\t\t'
assert (type(env.observation_space) == gym.spaces.Box)
self._init_hyperparameters(hyperparameters)
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0]
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim, self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2, False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
self.logger = {'t_so_far': 0, 'i_so_far': 0, 'batch_lens': [], 'batch_rews': [], 'batch_infractions': [], 'actor_losses': [], 'actor_network': 0} |
def learn(self, env_name, failure_observations, subpolicy):
'\n\t\t\tTrain the actor and critic networks. Here is where the main PPO algorithm resides.\n\n\t\t\tParameters:\n\t\t\t\ttotal_timesteps - the total number of timesteps to train for\n\n\t\t\tReturn:\n\t\t\t\tNone\n\t\t'
print(f'Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ', end='')
print(f'{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations')
t_so_far = 0
i_so_far = 0
while (i_so_far < self.training_step):
(batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens) = self.rollout(subpolicy, failure_observations)
t_so_far += np.sum(batch_lens)
i_so_far += 1
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
(V, _) = self.evaluate(batch_obs, batch_acts)
A_k = (batch_rtgs - V.detach())
A_k = ((A_k - A_k.mean()) / (A_k.std() + 1e-10))
for _ in range(self.n_updates_per_iteration):
(V, curr_log_probs) = self.evaluate(batch_obs, batch_acts)
ratios = torch.exp((curr_log_probs - batch_log_probs))
surr1 = (ratios * A_k)
surr2 = (torch.clamp(ratios, (1 - self.clip), (1 + self.clip)) * A_k)
actor_loss = (- torch.min(surr1, surr2)).mean()
critic_loss = nn.MSELoss()(V, batch_rtgs)
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
self._log_summary()
if ((i_so_far % self.save_freq) == 0):
if subpolicy:
torch.save(self.actor.state_dict(), (('./ppo_actor_subpolicy' + env_name) + '.pth'))
torch.save(self.critic.state_dict(), (('./ppo_critic_subpolicy' + env_name) + '.pth'))
else:
torch.save(self.actor.state_dict(), (('./ppo_actor' + env_name) + '.pth'))
torch.save(self.critic.state_dict(), (('./ppo_critic' + env_name) + '.pth')) | 270,654,134,278,599,580 | Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None | ppoPolicyTraining.py | learn | britig/S2RL-Policies | python | def learn(self, env_name, failure_observations, subpolicy):
'\n\t\t\tTrain the actor and critic networks. Here is where the main PPO algorithm resides.\n\n\t\t\tParameters:\n\t\t\t\ttotal_timesteps - the total number of timesteps to train for\n\n\t\t\tReturn:\n\t\t\t\tNone\n\t\t'
print(f'Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ', end=)
print(f'{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations')
t_so_far = 0
i_so_far = 0
while (i_so_far < self.training_step):
(batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens) = self.rollout(subpolicy, failure_observations)
t_so_far += np.sum(batch_lens)
i_so_far += 1
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
(V, _) = self.evaluate(batch_obs, batch_acts)
A_k = (batch_rtgs - V.detach())
A_k = ((A_k - A_k.mean()) / (A_k.std() + 1e-10))
for _ in range(self.n_updates_per_iteration):
(V, curr_log_probs) = self.evaluate(batch_obs, batch_acts)
ratios = torch.exp((curr_log_probs - batch_log_probs))
surr1 = (ratios * A_k)
surr2 = (torch.clamp(ratios, (1 - self.clip), (1 + self.clip)) * A_k)
actor_loss = (- torch.min(surr1, surr2)).mean()
critic_loss = nn.MSELoss()(V, batch_rtgs)
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
self._log_summary()
if ((i_so_far % self.save_freq) == 0):
if subpolicy:
torch.save(self.actor.state_dict(), (('./ppo_actor_subpolicy' + env_name) + '.pth'))
torch.save(self.critic.state_dict(), (('./ppo_critic_subpolicy' + env_name) + '.pth'))
else:
torch.save(self.actor.state_dict(), (('./ppo_actor' + env_name) + '.pth'))
torch.save(self.critic.state_dict(), (('./ppo_critic' + env_name) + '.pth')) |
def rollout(self, subpolicy, failure_observations):
"\n\t\t\tThis is where we collect the batch of data\n\t\t\tfrom simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch\n\t\t\tof data each time we iterate the actor/critic networks.\n\n\t\t\tParameters:\n\t\t\t\tNone\n\n\t\t\tReturn:\n\t\t\t\tbatch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)\n\t\t\t\tbatch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)\n\t\t\t\tbatch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)\n\t\t\t\tbatch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)\n\t\t\t\tbatch_lens - the lengths of each episode this batch. Shape: (number of episodes)\n\t\t"
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
ep_rews = []
t = 0
while (t < self.timesteps_per_batch):
act_list = []
ep_rews = []
obs = self.env.reset()
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
(delta, target_id, crosstrack_error) = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
if self.render:
self.env.render()
t += 1
batch_obs.append(obs)
if self.discrete:
(action, log_prob) = self.get_action_discrete(obs)
else:
(action, log_prob) = self.get_action(obs)
if (abs(round(float(action[0]), 1)) < abs(round(float(a_predicted_clf), 1))):
count_infractions_acc = (count_infractions_acc + 1)
if (abs(round(float(action[1]), 1)) < (abs(round(float(delta), 1)) - 0.2)):
count_infractions_steer = (count_infractions_steer + 1)
(obs, rew, done, info) = self.env.step(action)
count_infractions = (count_infractions_acc + count_infractions_steer)
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
if done:
break
batch_lens.append((ep_t + 1))
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view((- 1))
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float)
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews)
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return (batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens) | 1,873,087,376,621,526,000 | This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes) | ppoPolicyTraining.py | rollout | britig/S2RL-Policies | python | def rollout(self, subpolicy, failure_observations):
"\n\t\t\tThis is where we collect the batch of data\n\t\t\tfrom simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch\n\t\t\tof data each time we iterate the actor/critic networks.\n\n\t\t\tParameters:\n\t\t\t\tNone\n\n\t\t\tReturn:\n\t\t\t\tbatch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)\n\t\t\t\tbatch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)\n\t\t\t\tbatch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)\n\t\t\t\tbatch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)\n\t\t\t\tbatch_lens - the lengths of each episode this batch. Shape: (number of episodes)\n\t\t"
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
ep_rews = []
t = 0
while (t < self.timesteps_per_batch):
act_list = []
ep_rews = []
obs = self.env.reset()
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
(delta, target_id, crosstrack_error) = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
if self.render:
self.env.render()
t += 1
batch_obs.append(obs)
if self.discrete:
(action, log_prob) = self.get_action_discrete(obs)
else:
(action, log_prob) = self.get_action(obs)
if (abs(round(float(action[0]), 1)) < abs(round(float(a_predicted_clf), 1))):
count_infractions_acc = (count_infractions_acc + 1)
if (abs(round(float(action[1]), 1)) < (abs(round(float(delta), 1)) - 0.2)):
count_infractions_steer = (count_infractions_steer + 1)
(obs, rew, done, info) = self.env.step(action)
count_infractions = (count_infractions_acc + count_infractions_steer)
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
if done:
break
batch_lens.append((ep_t + 1))
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view((- 1))
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float)
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews)
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return (batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens) |
def compute_rtgs(self, batch_rews):
'\n\t\t\tCompute the Reward-To-Go of each timestep in a batch given the rewards.\n\n\t\t\tParameters:\n\t\t\t\tbatch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)\n\n\t\t\tReturn:\n\t\t\t\tbatch_rtgs - the rewards to go, Shape: (number of timesteps in batch)\n\t\t'
batch_rtgs = []
for ep_rews in reversed(batch_rews):
discounted_reward = 0
for rew in reversed(ep_rews):
discounted_reward = (rew + (discounted_reward * self.gamma))
batch_rtgs.insert(0, discounted_reward)
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs | 4,242,929,496,582,007,000 | Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch) | ppoPolicyTraining.py | compute_rtgs | britig/S2RL-Policies | python | def compute_rtgs(self, batch_rews):
'\n\t\t\tCompute the Reward-To-Go of each timestep in a batch given the rewards.\n\n\t\t\tParameters:\n\t\t\t\tbatch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)\n\n\t\t\tReturn:\n\t\t\t\tbatch_rtgs - the rewards to go, Shape: (number of timesteps in batch)\n\t\t'
batch_rtgs = []
for ep_rews in reversed(batch_rews):
discounted_reward = 0
for rew in reversed(ep_rews):
discounted_reward = (rew + (discounted_reward * self.gamma))
batch_rtgs.insert(0, discounted_reward)
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs |
def get_action(self, obs):
'\n\t\t\tQueries an action from the actor network, should be called from rollout.\n\n\t\t\tParameters:\n\t\t\t\tobs - the observation at the current timestep\n\n\t\t\tReturn:\n\t\t\t\taction - the action to take, as a numpy array\n\t\t\t\tlog_prob - the log probability of the selected action in the distribution\n\t\t'
mean = self.actor(obs)
dist = MultivariateNormal(mean, self.cov_mat)
action = dist.sample()
log_prob = dist.log_prob(action)
return (action.detach().numpy(), log_prob.detach()) | 7,726,324,014,643,275,000 | Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution | ppoPolicyTraining.py | get_action | britig/S2RL-Policies | python | def get_action(self, obs):
'\n\t\t\tQueries an action from the actor network, should be called from rollout.\n\n\t\t\tParameters:\n\t\t\t\tobs - the observation at the current timestep\n\n\t\t\tReturn:\n\t\t\t\taction - the action to take, as a numpy array\n\t\t\t\tlog_prob - the log probability of the selected action in the distribution\n\t\t'
mean = self.actor(obs)
dist = MultivariateNormal(mean, self.cov_mat)
action = dist.sample()
log_prob = dist.log_prob(action)
return (action.detach().numpy(), log_prob.detach()) |
def evaluate(self, batch_obs, batch_acts):
'\n\t\t\tEstimate the values of each observation, and the log probs of\n\t\t\teach action in the most recent batch with the most recent\n\t\t\titeration of the actor network. Should be called from learn.\n\n\t\t\tParameters:\n\t\t\t\tbatch_obs - the observations from the most recently collected batch as a tensor.\n\t\t\t\t\t\t\tShape: (number of timesteps in batch, dimension of observation)\n\t\t\t\tbatch_acts - the actions from the most recently collected batch as a tensor.\n\t\t\t\t\t\t\tShape: (number of timesteps in batch, dimension of action)\n\n\t\t\tReturn:\n\t\t\t\tV - the predicted values of batch_obs\n\t\t\t\tlog_probs - the log probabilities of the actions taken in batch_acts given batch_obs\n\t\t'
V = self.critic(batch_obs).squeeze()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
log_probs = dist.log_prob(batch_acts)
return (V, log_probs) | -3,305,831,494,162,423,000 | Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs | ppoPolicyTraining.py | evaluate | britig/S2RL-Policies | python | def evaluate(self, batch_obs, batch_acts):
'\n\t\t\tEstimate the values of each observation, and the log probs of\n\t\t\teach action in the most recent batch with the most recent\n\t\t\titeration of the actor network. Should be called from learn.\n\n\t\t\tParameters:\n\t\t\t\tbatch_obs - the observations from the most recently collected batch as a tensor.\n\t\t\t\t\t\t\tShape: (number of timesteps in batch, dimension of observation)\n\t\t\t\tbatch_acts - the actions from the most recently collected batch as a tensor.\n\t\t\t\t\t\t\tShape: (number of timesteps in batch, dimension of action)\n\n\t\t\tReturn:\n\t\t\t\tV - the predicted values of batch_obs\n\t\t\t\tlog_probs - the log probabilities of the actions taken in batch_acts given batch_obs\n\t\t'
V = self.critic(batch_obs).squeeze()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
log_probs = dist.log_prob(batch_acts)
return (V, log_probs) |
def _init_hyperparameters(self, hyperparameters):
'\n\t\t\tInitialize default and custom values for hyperparameters\n\n\t\t\tParameters:\n\t\t\t\thyperparameters - the extra arguments included when creating the PPO model, should only include\n\t\t\t\t\t\t\t\t\thyperparameters defined below with custom values.\n\n\t\t\tReturn:\n\t\t\t\tNone\n\t\t'
self.timesteps_per_batch = 4800
self.max_timesteps_per_episode = 1600
self.n_updates_per_iteration = 5
self.lr = 0.005
self.gamma = 0.95
self.clip = 0.2
self.render = False
self.save_freq = 10
self.seed = None
self.discrete = False
self.training_step = 200
for (param, val) in hyperparameters.items():
exec(((('self.' + param) + ' = ') + str(val)))
if (self.seed != None):
assert (type(self.seed) == int)
torch.manual_seed(self.seed)
print(f'Successfully set seed to {self.seed}') | 319,362,538,887,235,700 | Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None | ppoPolicyTraining.py | _init_hyperparameters | britig/S2RL-Policies | python | def _init_hyperparameters(self, hyperparameters):
'\n\t\t\tInitialize default and custom values for hyperparameters\n\n\t\t\tParameters:\n\t\t\t\thyperparameters - the extra arguments included when creating the PPO model, should only include\n\t\t\t\t\t\t\t\t\thyperparameters defined below with custom values.\n\n\t\t\tReturn:\n\t\t\t\tNone\n\t\t'
self.timesteps_per_batch = 4800
self.max_timesteps_per_episode = 1600
self.n_updates_per_iteration = 5
self.lr = 0.005
self.gamma = 0.95
self.clip = 0.2
self.render = False
self.save_freq = 10
self.seed = None
self.discrete = False
self.training_step = 200
for (param, val) in hyperparameters.items():
exec(((('self.' + param) + ' = ') + str(val)))
if (self.seed != None):
assert (type(self.seed) == int)
torch.manual_seed(self.seed)
print(f'Successfully set seed to {self.seed}') |
def _log_summary(self):
"\n\t\t\tPrint to stdout what we've logged so far in the most recent batch.\n\n\t\t\tParameters:\n\t\t\t\tNone\n\n\t\t\tReturn:\n\t\t\t\tNone\n\t\t"
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar('Average Episodic Return', int(float(avg_ep_rews)), t_so_far)
writer.add_scalar('Average actor Loss', int(float(avg_actor_loss)), t_so_far)
writer.add_scalar('Average Infractions', int(float(avg_ep_infractions)), t_so_far)
for (name, param) in actor_model.named_parameters():
if ('weight' in name):
writer.add_histogram(name, param.detach().numpy(), t_so_far)
print(flush=True)
print(f'-------------------- Iteration #{i_so_far} --------------------', flush=True)
print(f'Average Episodic Length: {avg_ep_lens}', flush=True)
print(f'Average Episodic Return: {avg_ep_rews}', flush=True)
print(f'Average Episodic Infractions : {avg_ep_infractions}', flush=True)
print(f'Average Loss: {avg_actor_loss}', flush=True)
print(f'Timesteps So Far: {t_so_far}', flush=True)
print(f'------------------------------------------------------', flush=True)
print(flush=True)
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = [] | 5,219,838,179,941,541,000 | Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None | ppoPolicyTraining.py | _log_summary | britig/S2RL-Policies | python | def _log_summary(self):
"\n\t\t\tPrint to stdout what we've logged so far in the most recent batch.\n\n\t\t\tParameters:\n\t\t\t\tNone\n\n\t\t\tReturn:\n\t\t\t\tNone\n\t\t"
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar('Average Episodic Return', int(float(avg_ep_rews)), t_so_far)
writer.add_scalar('Average actor Loss', int(float(avg_actor_loss)), t_so_far)
writer.add_scalar('Average Infractions', int(float(avg_ep_infractions)), t_so_far)
for (name, param) in actor_model.named_parameters():
if ('weight' in name):
writer.add_histogram(name, param.detach().numpy(), t_so_far)
print(flush=True)
print(f'-------------------- Iteration #{i_so_far} --------------------', flush=True)
print(f'Average Episodic Length: {avg_ep_lens}', flush=True)
print(f'Average Episodic Return: {avg_ep_rews}', flush=True)
print(f'Average Episodic Infractions : {avg_ep_infractions}', flush=True)
print(f'Average Loss: {avg_actor_loss}', flush=True)
print(f'Timesteps So Far: {t_so_far}', flush=True)
print(f'------------------------------------------------------', flush=True)
print(flush=True)
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = [] |
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(('train', 'test'))
def AmazonReviewPolarity(root: str, split: Union[(Tuple[str], str)]):
"AmazonReviewPolarity Dataset\n\n For additional details refer to https://arxiv.org/abs/1509.01626\n\n Number of lines per split:\n - train: 3600000\n - test: 400000\n\n Args:\n root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')\n split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `test`)\n\n :returns: DataPipe that yields tuple of label (1 to 2) and text containing the review title and text\n :rtype: (int, str)\n "
if (not is_module_available('torchdata')):
raise ModuleNotFoundError('Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`')
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(filepath_fn=(lambda x: os.path.join(root, _PATH)), hash_dict={os.path.join(root, _PATH): MD5}, hash_type='md5')
cache_compressed_dp = GDriveReader(cache_compressed_dp).end_caching(mode='wb', same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=(lambda x: os.path.join(root, _EXTRACTED_FILES[split])))
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode='b').read_from_tar().filter((lambda x: (_EXTRACTED_FILES[split] in x[0])))
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode='wb', same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, encoding='utf-8')
return data_dp.parse_csv().map(fn=(lambda t: (int(t[0]), ' '.join(t[1:])))) | 1,040,654,380,379,947,600 | AmazonReviewPolarity Dataset
For additional details refer to https://arxiv.org/abs/1509.01626
Number of lines per split:
- train: 3600000
- test: 400000
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `test`)
:returns: DataPipe that yields tuple of label (1 to 2) and text containing the review title and text
:rtype: (int, str) | torchtext/datasets/amazonreviewpolarity.py | AmazonReviewPolarity | abhinavarora/text | python | @_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(('train', 'test'))
def AmazonReviewPolarity(root: str, split: Union[(Tuple[str], str)]):
"AmazonReviewPolarity Dataset\n\n For additional details refer to https://arxiv.org/abs/1509.01626\n\n Number of lines per split:\n - train: 3600000\n - test: 400000\n\n Args:\n root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')\n split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `test`)\n\n :returns: DataPipe that yields tuple of label (1 to 2) and text containing the review title and text\n :rtype: (int, str)\n "
if (not is_module_available('torchdata')):
raise ModuleNotFoundError('Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`')
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(filepath_fn=(lambda x: os.path.join(root, _PATH)), hash_dict={os.path.join(root, _PATH): MD5}, hash_type='md5')
cache_compressed_dp = GDriveReader(cache_compressed_dp).end_caching(mode='wb', same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=(lambda x: os.path.join(root, _EXTRACTED_FILES[split])))
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode='b').read_from_tar().filter((lambda x: (_EXTRACTED_FILES[split] in x[0])))
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode='wb', same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, encoding='utf-8')
return data_dp.parse_csv().map(fn=(lambda t: (int(t[0]), ' '.join(t[1:])))) |
def get_relative_errors(test_data_id):
'\n Compute and save the relative errors of every point found on every network in a testing set.\n Relative error is defined in (Katz and Reggia 2017).\n test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).\n '
(network_sizes, num_samples, _) = fe.load_test_data(('%s.npz' % test_data_id))
for alg in ['traverse', 'baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print(('%s, alg %s, N %d,samp %d' % (test_data_id, alg, N, samp)))
npz = np.load(('results/%s_%s_N_%d_s_%d.npz' % (alg, test_data_id, N, samp)))
W = npz['W']
fxV = npz['fxV']
(fxV, converged) = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = (np.tanh(W.dot(fxV)) - fxV)
re = np.fabs((f / margin))
(re_fx, re_un) = (re[:, converged].max(axis=0), re[:, (~ converged)].max(axis=0))
re_fx = re_fx[(re_fx > 0)]
(f_fx, f_un) = (np.fabs(f[:, converged]).max(axis=0), np.fabs(f[:, (~ converged)]).max(axis=0))
f_fx = f_fx[(f_fx > 0)]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file(('results/%s_re_%s_N_%d_s_%d.npz' % (alg, test_data_id, N, samp)), **re_npz) | -6,441,740,488,261,575,000 | Compute and save the relative errors of every point found on every network in a testing set.
Relative error is defined in (Katz and Reggia 2017).
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension). | roundoff.py | get_relative_errors | garrettkatz/rnn-fxpts | python | def get_relative_errors(test_data_id):
'\n Compute and save the relative errors of every point found on every network in a testing set.\n Relative error is defined in (Katz and Reggia 2017).\n test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).\n '
(network_sizes, num_samples, _) = fe.load_test_data(('%s.npz' % test_data_id))
for alg in ['traverse', 'baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print(('%s, alg %s, N %d,samp %d' % (test_data_id, alg, N, samp)))
npz = np.load(('results/%s_%s_N_%d_s_%d.npz' % (alg, test_data_id, N, samp)))
W = npz['W']
fxV = npz['fxV']
(fxV, converged) = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = (np.tanh(W.dot(fxV)) - fxV)
re = np.fabs((f / margin))
(re_fx, re_un) = (re[:, converged].max(axis=0), re[:, (~ converged)].max(axis=0))
re_fx = re_fx[(re_fx > 0)]
(f_fx, f_un) = (np.fabs(f[:, converged]).max(axis=0), np.fabs(f[:, (~ converged)]).max(axis=0))
f_fx = f_fx[(f_fx > 0)]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file(('results/%s_re_%s_N_%d_s_%d.npz' % (alg, test_data_id, N, samp)), **re_npz) |
def show_traverse_re_fig(test_data_ids, Ns, samp_range):
'\n Plot relative errors from points found by fiber traversal.\n test_data_ids and Ns should be length-2 lists.\n Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].\n Similarly the second column draws from Ns[1], test_data_ids[1].\n Each network sample within samp_range is shown on a separate row.\n '
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id, N) in zip(test_data_ids, Ns):
print(('samp %d, N %d' % (samp, N)))
npz = np.load(('results/traverse_re_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
(m_fx, m_un) = (npz['re_fx'], npz['re_un'])
ax = plt.subplot(len(samp_range), len(Ns), sp)
sp += 1
if (m_un.shape[0] > 0):
plt.hist(np.log2(m_un), bins=30, log=log, facecolor='k')
plt.hist(np.log2(m_fx), bins=10, log=log, facecolor='w')
lo = (10 * (int((np.log2(m_fx).min() / 10)) - 1))
if (m_un.shape[0] > 0):
hi = (10 * (int((np.log2(m_un).max() / 10)) + 1))
else:
hi = 0
plt.xticks(range((- 10), 1, 2), ([''] + [('$2^{%d}$' % yl) for yl in range((- 8), 1, 2)]))
if (N == Ns[0]):
plt.ylabel('# of points')
if (samp == samp_range[0]):
ax.set_title(('N = %d' % N))
if (samp == samp_range[(- 1)]):
plt.xlabel('Fiber Relative Error')
plt.show() | -1,005,911,344,185,639,000 | Plot relative errors from points found by fiber traversal.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row. | roundoff.py | show_traverse_re_fig | garrettkatz/rnn-fxpts | python | def show_traverse_re_fig(test_data_ids, Ns, samp_range):
'\n Plot relative errors from points found by fiber traversal.\n test_data_ids and Ns should be length-2 lists.\n Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].\n Similarly the second column draws from Ns[1], test_data_ids[1].\n Each network sample within samp_range is shown on a separate row.\n '
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id, N) in zip(test_data_ids, Ns):
print(('samp %d, N %d' % (samp, N)))
npz = np.load(('results/traverse_re_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
(m_fx, m_un) = (npz['re_fx'], npz['re_un'])
ax = plt.subplot(len(samp_range), len(Ns), sp)
sp += 1
if (m_un.shape[0] > 0):
plt.hist(np.log2(m_un), bins=30, log=log, facecolor='k')
plt.hist(np.log2(m_fx), bins=10, log=log, facecolor='w')
lo = (10 * (int((np.log2(m_fx).min() / 10)) - 1))
if (m_un.shape[0] > 0):
hi = (10 * (int((np.log2(m_un).max() / 10)) + 1))
else:
hi = 0
plt.xticks(range((- 10), 1, 2), ([] + [('$2^{%d}$' % yl) for yl in range((- 8), 1, 2)]))
if (N == Ns[0]):
plt.ylabel('# of points')
if (samp == samp_range[0]):
ax.set_title(('N = %d' % N))
if (samp == samp_range[(- 1)]):
plt.xlabel('Fiber Relative Error')
plt.show() |
def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
'\n Analyze edge cases of relative errors on a single network\n Uses the samp^{th} sample network of size N in test data test_data_id.\n Relative errors in the range (0, 2^{cap}) are considered edge cases.\n Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.\n T and B are as defined in (Katz and Reggia 2017).\n '
npz = fe.load_npz_file(('results/baseline_re_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
res = fe.load_pkl_file(('results/TvB_%s_N_%d_s_%d.pkl' % (test_data_id, N, samp)))
re_un = npz['re_un']
percent = ((100.0 * (re_un < (2 ** cap)).sum()) / np.array((res['T-B'] - res['B-T'])))
print(('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d' % (N, samp, res['B-T'], res['T-B'], (re_un < (2 ** cap)).sum(), percent, cap)))
return percent | -598,339,714,970,083,200 | Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017). | roundoff.py | baseline_re_single_analysis | garrettkatz/rnn-fxpts | python | def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
'\n Analyze edge cases of relative errors on a single network\n Uses the samp^{th} sample network of size N in test data test_data_id.\n Relative errors in the range (0, 2^{cap}) are considered edge cases.\n Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.\n T and B are as defined in (Katz and Reggia 2017).\n '
npz = fe.load_npz_file(('results/baseline_re_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
res = fe.load_pkl_file(('results/TvB_%s_N_%d_s_%d.pkl' % (test_data_id, N, samp)))
re_un = npz['re_un']
percent = ((100.0 * (re_un < (2 ** cap)).sum()) / np.array((res['T-B'] - res['B-T'])))
print(('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d' % (N, samp, res['B-T'], res['T-B'], (re_un < (2 ** cap)).sum(), percent, cap)))
return percent |
def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
'\n Runs baseline_re_single_analysis on all networks in test_data_id of size N.\n cap is as in baseline_re_single_analysis.\n returns numpy.array percents, where\n percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.\n '
percents = []
(network_sizes, num_samples, _) = fe.load_test_data(('%s.npz' % test_data_id))
for (N, S) in zip(network_sizes, num_samples):
if (N not in Ns):
continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id, N, samp, cap=cap))
percents = np.array(percents)
print(('mean %%: %f%%' % percents.mean())) | -2,609,222,182,299,046,000 | Runs baseline_re_single_analysis on all networks in test_data_id of size N.
cap is as in baseline_re_single_analysis.
returns numpy.array percents, where
percents[i] is as in baseline_re_single_analysis for the i^{th} sample network. | roundoff.py | baseline_re_batch_analysis | garrettkatz/rnn-fxpts | python | def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
'\n Runs baseline_re_single_analysis on all networks in test_data_id of size N.\n cap is as in baseline_re_single_analysis.\n returns numpy.array percents, where\n percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.\n '
percents = []
(network_sizes, num_samples, _) = fe.load_test_data(('%s.npz' % test_data_id))
for (N, S) in zip(network_sizes, num_samples):
if (N not in Ns):
continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id, N, samp, cap=cap))
percents = np.array(percents)
print(('mean %%: %f%%' % percents.mean())) |
def show_baseline_re_fig(test_data_ids, Ns, samp_range):
'\n Plot relative errors from points found by the baseline solver.\n test_data_ids and Ns should be length-2 lists.\n Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].\n Similarly the second column draws from Ns[1], test_data_ids[1].\n Each network sample within samp_range is shown on a separate row.\n '
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id, N) in zip(test_data_ids, Ns):
print(('samp %d, N %d' % (samp, N)))
npz = np.load(('results/baseline_re_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
(m_fx, m_un) = (npz['re_fx'], npz['re_un'])
ax = plt.subplot(len(samp_range), len(Ns), sp)
sp += 1
if (m_un.shape[0] > 0):
plt.hist(np.log2(m_un), bins=30, log=log, facecolor='k')
plt.hist(np.log2(m_fx), bins=10, log=log, facecolor='w')
(lo, hi) = ((- 20), 50)
plt.xticks(range(lo, (hi + 1), 10), ([''] + [('$2^{%d}$' % yl) for yl in range((lo + 10), (hi + 1), 10)]))
if (N == Ns[0]):
plt.ylabel('# of points')
if (samp == samp_range[0]):
ax.set_title(('N = %d' % N))
if (samp == samp_range[(- 1)]):
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show() | 4,852,017,577,796,613,000 | Plot relative errors from points found by the baseline solver.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row. | roundoff.py | show_baseline_re_fig | garrettkatz/rnn-fxpts | python | def show_baseline_re_fig(test_data_ids, Ns, samp_range):
'\n Plot relative errors from points found by the baseline solver.\n test_data_ids and Ns should be length-2 lists.\n Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].\n Similarly the second column draws from Ns[1], test_data_ids[1].\n Each network sample within samp_range is shown on a separate row.\n '
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id, N) in zip(test_data_ids, Ns):
print(('samp %d, N %d' % (samp, N)))
npz = np.load(('results/baseline_re_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
(m_fx, m_un) = (npz['re_fx'], npz['re_un'])
ax = plt.subplot(len(samp_range), len(Ns), sp)
sp += 1
if (m_un.shape[0] > 0):
plt.hist(np.log2(m_un), bins=30, log=log, facecolor='k')
plt.hist(np.log2(m_fx), bins=10, log=log, facecolor='w')
(lo, hi) = ((- 20), 50)
plt.xticks(range(lo, (hi + 1), 10), ([] + [('$2^{%d}$' % yl) for yl in range((lo + 10), (hi + 1), 10)]))
if (N == Ns[0]):
plt.ylabel('# of points')
if (samp == samp_range[0]):
ax.set_title(('N = %d' % N))
if (samp == samp_range[(- 1)]):
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show() |
def get_baseline_rd(test_data_id, N, samp, cap, logfilename=os.devnull):
'\n Compute and save relative distances between pairs of points found by the baseline solver.\n Relative distance is defined in (Katz and Reggia 2017).\n Computes for the samp^{th} sample network of size N in test_data_id.\n test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).\n Only pairs within a random subset of points of size cap are inspected.\n logfilename is a file name at which progress updates are written.\n '
logfile = open(logfilename, 'w')
logfile.write(('Running baseline rd (%s,%d,%d)...\n' % (test_data_id, N, samp)))
npz = fe.load_npz_file(('results/baseline_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if ((cap is not None) and (fxV.shape[1] > cap)):
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:, perm[:cap]]
(in_RR, out_RR) = ([], [])
for j in range(fxV_unique.shape[1]):
logfile.write(('duping %d of %d...\n' % (j, fxV_unique.shape[1])))
(dups, RR, R) = rfx.identical_fixed_points(W, fxV, fxV_unique[:, [j]])
in_RR.append(RR[dups])
out_RR.append(RR[(~ dups)])
(in_RR, out_RR) = (np.concatenate(in_RR), np.concatenate(out_RR))
(npz['in_RR'], npz['out_RR']) = (in_RR, out_RR)
fe.save_npz_file(('results/baseline_rd_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)), **npz)
logfile.write('Done.\n')
logfile.close()
print(('Done %s %d %d' % (test_data_id, N, samp))) | 2,217,433,724,058,219,500 | Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written. | roundoff.py | get_baseline_rd | garrettkatz/rnn-fxpts | python | def get_baseline_rd(test_data_id, N, samp, cap, logfilename=os.devnull):
'\n Compute and save relative distances between pairs of points found by the baseline solver.\n Relative distance is defined in (Katz and Reggia 2017).\n Computes for the samp^{th} sample network of size N in test_data_id.\n test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).\n Only pairs within a random subset of points of size cap are inspected.\n logfilename is a file name at which progress updates are written.\n '
logfile = open(logfilename, 'w')
logfile.write(('Running baseline rd (%s,%d,%d)...\n' % (test_data_id, N, samp)))
npz = fe.load_npz_file(('results/baseline_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if ((cap is not None) and (fxV.shape[1] > cap)):
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:, perm[:cap]]
(in_RR, out_RR) = ([], [])
for j in range(fxV_unique.shape[1]):
logfile.write(('duping %d of %d...\n' % (j, fxV_unique.shape[1])))
(dups, RR, R) = rfx.identical_fixed_points(W, fxV, fxV_unique[:, [j]])
in_RR.append(RR[dups])
out_RR.append(RR[(~ dups)])
(in_RR, out_RR) = (np.concatenate(in_RR), np.concatenate(out_RR))
(npz['in_RR'], npz['out_RR']) = (in_RR, out_RR)
fe.save_npz_file(('results/baseline_rd_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)), **npz)
logfile.write('Done.\n')
logfile.close()
print(('Done %s %d %d' % (test_data_id, N, samp))) |
def pool_get_baseline_rd(args):
'\n Wrapper function passed to multiprocessing.Pool\n '
get_baseline_rd(*args) | -1,554,045,137,290,081,300 | Wrapper function passed to multiprocessing.Pool | roundoff.py | pool_get_baseline_rd | garrettkatz/rnn-fxpts | python | def pool_get_baseline_rd(args):
'\n \n '
get_baseline_rd(*args) |
def run_baseline_rd(test_data_id, Ns, num_procs):
'\n Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.\n Multiprocessing is used to run on multiple networks in parallel.\n num_procs is the number of processors to use.\n '
cpu_count = mp.cpu_count()
print(('%d cpus, using %d' % (cpu_count, num_procs)))
pool_args = []
(network_sizes, num_samples, _) = fe.load_test_data(('%s.npz' % test_data_id))
for (N, S) in zip(network_sizes, num_samples):
if (N not in Ns):
continue
cap = 20000
for s in range(S):
logfilename = ('logs/baseline_rd_%s_N_%d_s_%d.log' % (test_data_id, N, s))
pool_args.append((test_data_id, N, s, cap, logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if (num_procs < 1):
for args in pool_args:
test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print(('total time: %f' % (time.time() - start_time))) | 8,696,587,791,661,715,000 | Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use. | roundoff.py | run_baseline_rd | garrettkatz/rnn-fxpts | python | def run_baseline_rd(test_data_id, Ns, num_procs):
'\n Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.\n Multiprocessing is used to run on multiple networks in parallel.\n num_procs is the number of processors to use.\n '
cpu_count = mp.cpu_count()
print(('%d cpus, using %d' % (cpu_count, num_procs)))
pool_args = []
(network_sizes, num_samples, _) = fe.load_test_data(('%s.npz' % test_data_id))
for (N, S) in zip(network_sizes, num_samples):
if (N not in Ns):
continue
cap = 20000
for s in range(S):
logfilename = ('logs/baseline_rd_%s_N_%d_s_%d.log' % (test_data_id, N, s))
pool_args.append((test_data_id, N, s, cap, logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if (num_procs < 1):
for args in pool_args:
test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print(('total time: %f' % (time.time() - start_time))) |
def get_traverse_rd(test_data_id, N, samp, cap, logfilename=os.devnull):
'\n Compute and save relative distances between pairs of points found by the baseline solver.\n Relative distance is defined in (Katz and Reggia 2017).\n Computes for the samp^{th} sample network of size N in test_data_id.\n test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).\n Only pairs within a random subset of points of size cap are inspected.\n logfilename is a file name at which progress updates are written.\n '
logfile = open(logfilename, 'w')
logfile.write(('Running traverse rd (%s,%d,%d)...\n' % (test_data_id, N, samp)))
npz = fe.load_npz_file(('results/traverse_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if ((cap is not None) and (fxV.shape[1] > cap)):
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:, perm[:cap]]
(in_RR, out_RR) = ([], [])
for j in range(fxV_unique.shape[1]):
logfile.write(('duping %d of %d...\n' % (j, fxV_unique.shape[1])))
(dups, RR, R) = rfx.identical_fixed_points(W, fxV, fxV_unique[:, [j]])
in_RR.append(RR[dups])
out_RR.append(RR[(~ dups)])
(in_RR, out_RR) = (np.concatenate(in_RR), np.concatenate(out_RR))
(npz['in_RR'], npz['out_RR']) = (in_RR, out_RR)
fe.save_npz_file(('results/traverse_rd_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)), **npz)
logfile.write('Done.\n')
logfile.close()
print(('Done %s %d %d' % (test_data_id, N, samp))) | -2,238,623,223,670,777,900 | Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written. | roundoff.py | get_traverse_rd | garrettkatz/rnn-fxpts | python | def get_traverse_rd(test_data_id, N, samp, cap, logfilename=os.devnull):
'\n Compute and save relative distances between pairs of points found by the baseline solver.\n Relative distance is defined in (Katz and Reggia 2017).\n Computes for the samp^{th} sample network of size N in test_data_id.\n test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).\n Only pairs within a random subset of points of size cap are inspected.\n logfilename is a file name at which progress updates are written.\n '
logfile = open(logfilename, 'w')
logfile.write(('Running traverse rd (%s,%d,%d)...\n' % (test_data_id, N, samp)))
npz = fe.load_npz_file(('results/traverse_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if ((cap is not None) and (fxV.shape[1] > cap)):
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:, perm[:cap]]
(in_RR, out_RR) = ([], [])
for j in range(fxV_unique.shape[1]):
logfile.write(('duping %d of %d...\n' % (j, fxV_unique.shape[1])))
(dups, RR, R) = rfx.identical_fixed_points(W, fxV, fxV_unique[:, [j]])
in_RR.append(RR[dups])
out_RR.append(RR[(~ dups)])
(in_RR, out_RR) = (np.concatenate(in_RR), np.concatenate(out_RR))
(npz['in_RR'], npz['out_RR']) = (in_RR, out_RR)
fe.save_npz_file(('results/traverse_rd_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)), **npz)
logfile.write('Done.\n')
logfile.close()
print(('Done %s %d %d' % (test_data_id, N, samp))) |
def pool_get_traverse_rd(args):
'\n Wrapper function passed to multiprocessing.Pool\n '
get_traverse_rd(*args) | -951,652,383,376,324,400 | Wrapper function passed to multiprocessing.Pool | roundoff.py | pool_get_traverse_rd | garrettkatz/rnn-fxpts | python | def pool_get_traverse_rd(args):
'\n \n '
get_traverse_rd(*args) |
def run_traverse_rd(test_data_id, Ns, num_procs):
'\n Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.\n Multiprocessing is used to run on multiple networks in parallel.\n num_procs is the number of processors to use.\n '
cpu_count = mp.cpu_count()
print(('%d cpus, using %d' % (cpu_count, num_procs)))
pool_args = []
(network_sizes, num_samples, _) = fe.load_test_data(('%s.npz' % test_data_id))
for (N, S) in zip(network_sizes, num_samples):
if (N not in Ns):
continue
cap = 20000
for s in range(S):
logfilename = ('logs/traverse_rd_%s_N_%d_s_%d.log' % (test_data_id, N, s))
pool_args.append((test_data_id, N, s, cap, logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if (num_procs < 1):
for args in pool_args:
test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print(('total time: %f' % (time.time() - start_time))) | -4,067,689,719,634,688,500 | Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use. | roundoff.py | run_traverse_rd | garrettkatz/rnn-fxpts | python | def run_traverse_rd(test_data_id, Ns, num_procs):
'\n Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.\n Multiprocessing is used to run on multiple networks in parallel.\n num_procs is the number of processors to use.\n '
cpu_count = mp.cpu_count()
print(('%d cpus, using %d' % (cpu_count, num_procs)))
pool_args = []
(network_sizes, num_samples, _) = fe.load_test_data(('%s.npz' % test_data_id))
for (N, S) in zip(network_sizes, num_samples):
if (N not in Ns):
continue
cap = 20000
for s in range(S):
logfilename = ('logs/traverse_rd_%s_N_%d_s_%d.log' % (test_data_id, N, s))
pool_args.append((test_data_id, N, s, cap, logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if (num_procs < 1):
for args in pool_args:
test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print(('total time: %f' % (time.time() - start_time))) |
def get_simple_rd(test_data_id, N, samp, cap, logfilename=os.devnull):
'\n Use simple unique test: if max absolute coordinate-wise difference < 2**-32\n Compute and save distances between pairs of points found by both solvers.\n Computes for the samp^{th} sample network of size N in test_data_id.\n test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).\n Only pairs within a random subset of points of size cap are inspected.\n Saves pair-wise distance distribution in histogram with one bucket per integer power of 2\n logfilename is a file name at which progress updates are written.\n '
logfile = open(logfilename, 'w')
rfx.hardwrite(logfile, ('Running simple rd (%s,%d,%d)...\n' % (test_data_id, N, samp)))
buckets = {}
bins = np.arange((- 1025), 3)
for method_key in ['traverse', 'baseline']:
npz = fe.load_npz_file(('results/%s_%s_N_%d_s_%d.npz' % (method_key, test_data_id, N, samp)))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros((len(bins) - 1))
if ((cap is not None) and (fxV.shape[1] > cap)):
rfx.hardwrite(logfile, 'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:, perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile, ('disting %d of %d...\n' % (j, fxV.shape[1])))
dists = np.fabs((fxV - fxV[:, [j]])).max(axis=0)
dists[(dists == 0)] = (2.0 ** bins[0])
logdists = np.log2(dists)
logdists[(logdists < bins[0])] = bins[0]
logdists[(logdists > bins[(- 1)])] = bins[(- 1)]
(hist, _) = np.histogram(logdists, bins=bins)
buckets[method_key] += hist
npz = {'bins': bins, 'traverse_buckets': buckets['traverse'], 'baseline_buckets': buckets['baseline']}
fe.save_npz_file(('results/simple_rd_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)), **npz)
rfx.hardwrite(logfile, 'Done.\n')
logfile.close()
print(('Done %s %d %d' % (test_data_id, N, samp))) | -3,566,493,107,082,433,000 | Use simple unique test: if max absolute coordinate-wise difference < 2**-32
Compute and save distances between pairs of points found by both solvers.
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
Saves pair-wise distance distribution in histogram with one bucket per integer power of 2
logfilename is a file name at which progress updates are written. | roundoff.py | get_simple_rd | garrettkatz/rnn-fxpts | python | def get_simple_rd(test_data_id, N, samp, cap, logfilename=os.devnull):
'\n Use simple unique test: if max absolute coordinate-wise difference < 2**-32\n Compute and save distances between pairs of points found by both solvers.\n Computes for the samp^{th} sample network of size N in test_data_id.\n test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).\n Only pairs within a random subset of points of size cap are inspected.\n Saves pair-wise distance distribution in histogram with one bucket per integer power of 2\n logfilename is a file name at which progress updates are written.\n '
logfile = open(logfilename, 'w')
rfx.hardwrite(logfile, ('Running simple rd (%s,%d,%d)...\n' % (test_data_id, N, samp)))
buckets = {}
bins = np.arange((- 1025), 3)
for method_key in ['traverse', 'baseline']:
npz = fe.load_npz_file(('results/%s_%s_N_%d_s_%d.npz' % (method_key, test_data_id, N, samp)))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros((len(bins) - 1))
if ((cap is not None) and (fxV.shape[1] > cap)):
rfx.hardwrite(logfile, 'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:, perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile, ('disting %d of %d...\n' % (j, fxV.shape[1])))
dists = np.fabs((fxV - fxV[:, [j]])).max(axis=0)
dists[(dists == 0)] = (2.0 ** bins[0])
logdists = np.log2(dists)
logdists[(logdists < bins[0])] = bins[0]
logdists[(logdists > bins[(- 1)])] = bins[(- 1)]
(hist, _) = np.histogram(logdists, bins=bins)
buckets[method_key] += hist
npz = {'bins': bins, 'traverse_buckets': buckets['traverse'], 'baseline_buckets': buckets['baseline']}
fe.save_npz_file(('results/simple_rd_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)), **npz)
rfx.hardwrite(logfile, 'Done.\n')
logfile.close()
print(('Done %s %d %d' % (test_data_id, N, samp))) |
def pool_get_simple_rd(args):
'\n Wrapper function passed to multiprocessing.Pool\n '
get_simple_rd(*args) | -4,100,977,010,512,307,000 | Wrapper function passed to multiprocessing.Pool | roundoff.py | pool_get_simple_rd | garrettkatz/rnn-fxpts | python | def pool_get_simple_rd(args):
'\n \n '
get_simple_rd(*args) |
def run_simple_rd(test_data_id, Ns, num_procs):
'\n Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.\n Multiprocessing is used to run on multiple networks in parallel.\n num_procs is the number of processors to use.\n '
cpu_count = mp.cpu_count()
print(('%d cpus, using %d' % (cpu_count, num_procs)))
pool_args = []
(network_sizes, num_samples, _) = fe.load_test_data(('%s.npz' % test_data_id))
for (N, S) in zip(network_sizes, num_samples):
if (N not in Ns):
continue
cap = 1000
for s in range(S):
logfilename = ('logs/simple_rd_%s_N_%d_s_%d.log' % (test_data_id, N, s))
pool_args.append((test_data_id, N, s, cap, logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if (num_procs < 1):
for args in pool_args:
test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print(('total time: %f' % (time.time() - start_time))) | 7,719,977,757,564,353,000 | Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use. | roundoff.py | run_simple_rd | garrettkatz/rnn-fxpts | python | def run_simple_rd(test_data_id, Ns, num_procs):
'\n Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.\n Multiprocessing is used to run on multiple networks in parallel.\n num_procs is the number of processors to use.\n '
cpu_count = mp.cpu_count()
print(('%d cpus, using %d' % (cpu_count, num_procs)))
pool_args = []
(network_sizes, num_samples, _) = fe.load_test_data(('%s.npz' % test_data_id))
for (N, S) in zip(network_sizes, num_samples):
if (N not in Ns):
continue
cap = 1000
for s in range(S):
logfilename = ('logs/simple_rd_%s_N_%d_s_%d.log' % (test_data_id, N, s))
pool_args.append((test_data_id, N, s, cap, logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if (num_procs < 1):
for args in pool_args:
test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print(('total time: %f' % (time.time() - start_time))) |
def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
'\n Plot relative distances from points found by fiber traversal.\n test_ids, Ns, and samp_range should be as in show_traverse_re_fig.\n '
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id, N) in zip(test_data_ids, Ns):
print(('samp %d, N %d' % (samp, N)))
npz = np.load(('results/traverse_rd_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
(in_rr, out_rr) = (npz['in_RR'], npz['out_RR'])
if (in_rr > 0).any():
in_rr[(in_rr == 0)] = in_rr[(in_rr > 0)].min()
else:
in_rr[(in_rr == 0)] = (2 ** (- 30))
ax = plt.subplot(len(samp_range), len(Ns), sp)
sp += 1
if (out_rr.shape[0] > 0):
plt.hist(np.log2(out_rr), bins=30, log=log, facecolor='k')
plt.hist(np.log2(in_rr), bins=10, log=log, facecolor='w')
if (N == Ns[0]):
plt.ylabel('# of pairs')
if (samp == samp_range[0]):
ax.set_title(('N = %d' % N))
if (samp == samp_range[(- 1)]):
plt.xlabel('Fiber Relative Distance')
plt.xlim([(- 30), 50])
plt.xticks(range((- 30), 51, 10), ([''] + [('$2^{%d}$' % xl) for xl in range((- 20), 51, 10)]))
plt.show() | 783,642,926,598,277,500 | Plot relative distances from points found by fiber traversal.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig. | roundoff.py | show_traverse_rd_fig | garrettkatz/rnn-fxpts | python | def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
'\n Plot relative distances from points found by fiber traversal.\n test_ids, Ns, and samp_range should be as in show_traverse_re_fig.\n '
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id, N) in zip(test_data_ids, Ns):
print(('samp %d, N %d' % (samp, N)))
npz = np.load(('results/traverse_rd_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
(in_rr, out_rr) = (npz['in_RR'], npz['out_RR'])
if (in_rr > 0).any():
in_rr[(in_rr == 0)] = in_rr[(in_rr > 0)].min()
else:
in_rr[(in_rr == 0)] = (2 ** (- 30))
ax = plt.subplot(len(samp_range), len(Ns), sp)
sp += 1
if (out_rr.shape[0] > 0):
plt.hist(np.log2(out_rr), bins=30, log=log, facecolor='k')
plt.hist(np.log2(in_rr), bins=10, log=log, facecolor='w')
if (N == Ns[0]):
plt.ylabel('# of pairs')
if (samp == samp_range[0]):
ax.set_title(('N = %d' % N))
if (samp == samp_range[(- 1)]):
plt.xlabel('Fiber Relative Distance')
plt.xlim([(- 30), 50])
plt.xticks(range((- 30), 51, 10), ([] + [('$2^{%d}$' % xl) for xl in range((- 20), 51, 10)]))
plt.show() |
def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
'\n Plot relative distances from points found by the baseline solver.\n test_ids, Ns, and samp_range should be as in show_baseline_re_fig.\n '
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id, N) in zip(test_data_ids, Ns):
print(('samp %d, N %d' % (samp, N)))
npz = np.load(('results/baseline_rd_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
(in_rr, out_rr) = (npz['in_RR'], npz['out_RR'])
if (in_rr > 0).any():
in_rr[(in_rr == 0)] = in_rr[(in_rr > 0)].min()
else:
in_rr[(in_rr == 0)] = (2 ** (- 30))
ax = plt.subplot(len(samp_range), len(Ns), sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all():
out_rr[:] = (4 * in_rr.max())
else:
out_rr[np.isinf(out_rr)] = (4 * out_rr[(~ np.isinf(out_rr))].max())
print('out_rr:')
print(out_rr.shape)
print((out_rr == 0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if (out_rr.shape[0] > 0):
plt.hist(np.log2(out_rr), bins=30, log=log, facecolor='k')
plt.hist(np.log2(in_rr), bins=10, log=log, facecolor='w')
if (N == Ns[0]):
plt.ylabel('# of pairs')
if (samp == samp_range[0]):
ax.set_title(('N = %d' % N))
if (samp == samp_range[(- 1)]):
plt.xlabel('Baseline Relative Distance')
plt.xlim([(- 30), 50])
plt.xticks(range((- 30), 51, 10), ([''] + [('$2^{%d}$' % xl) for xl in range((- 20), 51, 10)]))
plt.show() | 8,544,105,541,260,878,000 | Plot relative distances from points found by the baseline solver.
test_ids, Ns, and samp_range should be as in show_baseline_re_fig. | roundoff.py | show_baseline_rd_fig | garrettkatz/rnn-fxpts | python | def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
'\n Plot relative distances from points found by the baseline solver.\n test_ids, Ns, and samp_range should be as in show_baseline_re_fig.\n '
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id, N) in zip(test_data_ids, Ns):
print(('samp %d, N %d' % (samp, N)))
npz = np.load(('results/baseline_rd_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
(in_rr, out_rr) = (npz['in_RR'], npz['out_RR'])
if (in_rr > 0).any():
in_rr[(in_rr == 0)] = in_rr[(in_rr > 0)].min()
else:
in_rr[(in_rr == 0)] = (2 ** (- 30))
ax = plt.subplot(len(samp_range), len(Ns), sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all():
out_rr[:] = (4 * in_rr.max())
else:
out_rr[np.isinf(out_rr)] = (4 * out_rr[(~ np.isinf(out_rr))].max())
print('out_rr:')
print(out_rr.shape)
print((out_rr == 0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if (out_rr.shape[0] > 0):
plt.hist(np.log2(out_rr), bins=30, log=log, facecolor='k')
plt.hist(np.log2(in_rr), bins=10, log=log, facecolor='w')
if (N == Ns[0]):
plt.ylabel('# of pairs')
if (samp == samp_range[0]):
ax.set_title(('N = %d' % N))
if (samp == samp_range[(- 1)]):
plt.xlabel('Baseline Relative Distance')
plt.xlim([(- 30), 50])
plt.xticks(range((- 30), 51, 10), ([] + [('$2^{%d}$' % xl) for xl in range((- 20), 51, 10)]))
plt.show() |
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
'\n Plot relative distances from points found by fiber traversal or baseline.\n test_ids, Ns, and samp_range should be as in show_traverse_re_fig.\n '
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id, N) in zip(test_data_ids, Ns):
print(('samp %d, N %d' % (samp, N)))
npz = np.load(('results/simple_rd_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
if (buckets is None):
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8, 2.4))
if log:
buckets[(buckets > 0)] = np.log2(buckets[(buckets > 0)])
plt.bar(left=bins[:(- 1)], height=buckets, width=(bins[1:] - bins[:(- 1)]), facecolor='none')
plt.ylabel('# of pairs')
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$')
xmin_idx = int(((bins[:(- 1)] > (- 1000)) & (buckets > 0)).argmax())
xstep = int(np.ceil(((bins[(- 1)] - bins[xmin_idx]) / 10)))
plt.xticks(bins[xmin_idx::xstep], [('$2^{%d}$' % xl) for xl in bins[xmin_idx::xstep]])
plt.xlim([(bins[xmin_idx] - xstep), (bins[(- 1)] + xstep)])
if log:
ymax = (np.ceil(buckets.max()) + 1)
ystep = np.ceil((ymax / 5))
plt.yticks(np.arange(0, (ymax + ystep), ystep), [('$2^{%d}$' % yl) for yl in np.arange(0, (ymax + ystep), ystep)])
plt.ylim([0, (ymax + 1)])
plt.tight_layout()
plt.show() | -3,905,793,942,477,665,300 | Plot relative distances from points found by fiber traversal or baseline.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig. | roundoff.py | show_simple_rd_all_fig | garrettkatz/rnn-fxpts | python | def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
'\n Plot relative distances from points found by fiber traversal or baseline.\n test_ids, Ns, and samp_range should be as in show_traverse_re_fig.\n '
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id, N) in zip(test_data_ids, Ns):
print(('samp %d, N %d' % (samp, N)))
npz = np.load(('results/simple_rd_%s_N_%d_s_%d.npz' % (test_data_id, N, samp)))
if (buckets is None):
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8, 2.4))
if log:
buckets[(buckets > 0)] = np.log2(buckets[(buckets > 0)])
plt.bar(left=bins[:(- 1)], height=buckets, width=(bins[1:] - bins[:(- 1)]), facecolor='none')
plt.ylabel('# of pairs')
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$')
xmin_idx = int(((bins[:(- 1)] > (- 1000)) & (buckets > 0)).argmax())
xstep = int(np.ceil(((bins[(- 1)] - bins[xmin_idx]) / 10)))
plt.xticks(bins[xmin_idx::xstep], [('$2^{%d}$' % xl) for xl in bins[xmin_idx::xstep]])
plt.xlim([(bins[xmin_idx] - xstep), (bins[(- 1)] + xstep)])
if log:
ymax = (np.ceil(buckets.max()) + 1)
ystep = np.ceil((ymax / 5))
plt.yticks(np.arange(0, (ymax + ystep), ystep), [('$2^{%d}$' % yl) for yl in np.arange(0, (ymax + ystep), ystep)])
plt.ylim([0, (ymax + 1)])
plt.tight_layout()
plt.show() |
def take_damage(self, dmg, source):
' after taking damage, if the priestess is not dead, it heals itself'
hp_before_attack = self.hp
super().take_damage(dmg, source)
if (self._is_alive and (hp_before_attack > self.hp) and (source != 'pit')):
heal_message = self.heal_itself()
self.model.announce(f'{self.name}: {heal_message}') | 1,302,554,236,194,353,700 | after taking damage, if the priestess is not dead, it heals itself | priestess.py | take_damage | nvanbaak/dungeon-adventure-2 | python | def take_damage(self, dmg, source):
' '
hp_before_attack = self.hp
super().take_damage(dmg, source)
if (self._is_alive and (hp_before_attack > self.hp) and (source != 'pit')):
heal_message = self.heal_itself()
self.model.announce(f'{self.name}: {heal_message}') |
def resolve_workout(self, info, **kwargs):
'query resolver for workout property'
all_exercises = Exercise.objects.all()
if kwargs.get('body_part'):
all_exercises = all_exercises.select_related('body_part').filter(body_part__name=kwargs.get('body_part').lower())
if kwargs.get('level'):
all_exercises = all_exercises.select_related('level').filter(level__difficulty=kwargs.get('level').lower())
if kwargs.get('exercise_name'):
all_exercises = all_exercises.filter(name__icontains=kwargs.get('exercise_name').lower())
if kwargs.get('equipment'):
all_exercises = all_exercises.select_related('equipment').filter(equipment__name=kwargs.get('equipment').lower())
return all_exercises | -8,813,676,179,882,051,000 | query resolver for workout property | quarantineworkout/workout/schema.py | resolve_workout | adeoke/django-quarantine-workout-graphql | python | def resolve_workout(self, info, **kwargs):
all_exercises = Exercise.objects.all()
if kwargs.get('body_part'):
all_exercises = all_exercises.select_related('body_part').filter(body_part__name=kwargs.get('body_part').lower())
if kwargs.get('level'):
all_exercises = all_exercises.select_related('level').filter(level__difficulty=kwargs.get('level').lower())
if kwargs.get('exercise_name'):
all_exercises = all_exercises.filter(name__icontains=kwargs.get('exercise_name').lower())
if kwargs.get('equipment'):
all_exercises = all_exercises.select_related('equipment').filter(equipment__name=kwargs.get('equipment').lower())
return all_exercises |
def __init__(self, zip_code, house_number, house_addition=''):
'\n To fetch the garbage calendar, you need to set a zip_code and house_number.\n '
self.zip_code = zip_code.replace(' ', '')
self.house_number = house_number.strip()
self.house_addition = house_addition.strip() | 3,134,568,172,365,344,000 | To fetch the garbage calendar, you need to set a zip_code and house_number. | rova/rova.py | __init__ | synoniem/rova | python | def __init__(self, zip_code, house_number, house_addition=):
'\n \n '
self.zip_code = zip_code.replace(' ', )
self.house_number = house_number.strip()
self.house_addition = house_addition.strip() |
def is_rova_area(self):
'\n Check if ROVA collects garbage at this address\n '
url = 'https://www.rova.nl/api/waste-calendar/upcoming'
response = requests.get(url, params={'postalcode': self.zip_code, 'houseNumber': self.house_number, 'addition': self.house_addition, 'take': '1'})
response.raise_for_status()
rova_response = response.text.strip()
if (rova_response != '[]'):
rova_response = 'OK'
return (rova_response == 'OK') | -2,616,346,550,750,675,500 | Check if ROVA collects garbage at this address | rova/rova.py | is_rova_area | synoniem/rova | python | def is_rova_area(self):
'\n \n '
url = 'https://www.rova.nl/api/waste-calendar/upcoming'
response = requests.get(url, params={'postalcode': self.zip_code, 'houseNumber': self.house_number, 'addition': self.house_addition, 'take': '1'})
response.raise_for_status()
rova_response = response.text.strip()
if (rova_response != '[]'):
rova_response = 'OK'
return (rova_response == 'OK') |
def get_calendar_items(self, take=5):
'\n Get next pickup date for each garbage types\n '
url = 'https://www.rova.nl/api/waste-calendar/upcoming'
response = requests.get(url, params={'postalcode': self.zip_code, 'houseNumber': self.house_number, 'addition': self.house_addition, 'take': take})
response.raise_for_status()
rova_response = response.json()
items = []
types = []
for item in rova_response:
date = datetime.strptime(item['date'], '%Y-%m-%dT%H:%M:%SZ')
date = date.strftime('%Y-%m-%dT%H:%M:%S')
garbage_type = item['garbageTypeCode'].upper()
items.append({'GarbageTypeCode': garbage_type, 'Date': date})
types.append(garbage_type)
return items | -7,547,873,869,175,699,000 | Get next pickup date for each garbage types | rova/rova.py | get_calendar_items | synoniem/rova | python | def get_calendar_items(self, take=5):
'\n \n '
url = 'https://www.rova.nl/api/waste-calendar/upcoming'
response = requests.get(url, params={'postalcode': self.zip_code, 'houseNumber': self.house_number, 'addition': self.house_addition, 'take': take})
response.raise_for_status()
rova_response = response.json()
items = []
types = []
for item in rova_response:
date = datetime.strptime(item['date'], '%Y-%m-%dT%H:%M:%SZ')
date = date.strftime('%Y-%m-%dT%H:%M:%S')
garbage_type = item['garbageTypeCode'].upper()
items.append({'GarbageTypeCode': garbage_type, 'Date': date})
types.append(garbage_type)
return items |
def __init__(self, state_size, action_size, seed):
'Initialize an Agent object.\n \n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n seed (int): random seed\n '
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
self.t_step = 0 | 2,056,519,366,746,090,000 | Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed | dqn/exercise/dqn_agent.py | __init__ | 0xtristan/deep-reinforcement-learning | python | def __init__(self, state_size, action_size, seed):
'Initialize an Agent object.\n \n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n seed (int): random seed\n '
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
self.t_step = 0 |
def act(self, state, eps=0.0):
'Returns actions for given state as per current policy.\n \n Params\n ======\n state (array_like): current state\n eps (float): epsilon, for epsilon-greedy action selection\n '
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
if (random.random() > eps):
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size)) | 3,284,820,839,670,036,500 | Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection | dqn/exercise/dqn_agent.py | act | 0xtristan/deep-reinforcement-learning | python | def act(self, state, eps=0.0):
'Returns actions for given state as per current policy.\n \n Params\n ======\n state (array_like): current state\n eps (float): epsilon, for epsilon-greedy action selection\n '
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
if (random.random() > eps):
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size)) |
def learn(self, experiences, gamma):
"Update value parameters using given batch of experience tuples.\n\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples \n gamma (float): discount factor\n "
(states, actions, rewards, next_states, dones) = experiences
'*** YOUR CODE HERE ***'
Q_targets_next = self.qnetwork_local(next_states).detach().max(1)[0].unsqueeze(1)
Q_targets = (rewards + ((gamma * Q_targets_next) * (1 - dones)))
Q_expected = self.qnetwork_local(states).gather(1, actions)
loss = F.mse_loss(Q_expected, Q_targets)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) | 8,166,505,585,780,385,000 | Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor | dqn/exercise/dqn_agent.py | learn | 0xtristan/deep-reinforcement-learning | python | def learn(self, experiences, gamma):
"Update value parameters using given batch of experience tuples.\n\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples \n gamma (float): discount factor\n "
(states, actions, rewards, next_states, dones) = experiences
'*** YOUR CODE HERE ***'
Q_targets_next = self.qnetwork_local(next_states).detach().max(1)[0].unsqueeze(1)
Q_targets = (rewards + ((gamma * Q_targets_next) * (1 - dones)))
Q_expected = self.qnetwork_local(states).gather(1, actions)
loss = F.mse_loss(Q_expected, Q_targets)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) |
def soft_update(self, local_model, target_model, tau):
'Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n tau (float): interpolation parameter \n '
for (target_param, local_param) in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(((tau * local_param.data) + ((1.0 - tau) * target_param.data))) | 3,655,770,241,422,866,000 | Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter | dqn/exercise/dqn_agent.py | soft_update | 0xtristan/deep-reinforcement-learning | python | def soft_update(self, local_model, target_model, tau):
'Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n tau (float): interpolation parameter \n '
for (target_param, local_param) in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(((tau * local_param.data) + ((1.0 - tau) * target_param.data))) |
def __init__(self, action_size, buffer_size, batch_size, seed):
'Initialize a ReplayBuffer object.\n\n Params\n ======\n action_size (int): dimension of each action\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n seed (int): random seed\n '
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple('Experience', field_names=['state', 'action', 'reward', 'next_state', 'done'])
self.seed = random.seed(seed) | -1,162,416,917,650,856,000 | Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed | dqn/exercise/dqn_agent.py | __init__ | 0xtristan/deep-reinforcement-learning | python | def __init__(self, action_size, buffer_size, batch_size, seed):
'Initialize a ReplayBuffer object.\n\n Params\n ======\n action_size (int): dimension of each action\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n seed (int): random seed\n '
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple('Experience', field_names=['state', 'action', 'reward', 'next_state', 'done'])
self.seed = random.seed(seed) |
def add(self, state, action, reward, next_state, done):
'Add a new experience to memory.'
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e) | -8,881,662,531,665,694,000 | Add a new experience to memory. | dqn/exercise/dqn_agent.py | add | 0xtristan/deep-reinforcement-learning | python | def add(self, state, action, reward, next_state, done):
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e) |
def sample(self):
'Randomly sample a batch of experiences from memory.'
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if (e is not None)])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if (e is not None)])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if (e is not None)])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if (e is not None)])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if (e is not None)]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones) | 7,523,822,767,090,451,000 | Randomly sample a batch of experiences from memory. | dqn/exercise/dqn_agent.py | sample | 0xtristan/deep-reinforcement-learning | python | def sample(self):
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if (e is not None)])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if (e is not None)])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if (e is not None)])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if (e is not None)])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if (e is not None)]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones) |
def __len__(self):
'Return the current size of internal memory.'
return len(self.memory) | -960,517,394,760,847,000 | Return the current size of internal memory. | dqn/exercise/dqn_agent.py | __len__ | 0xtristan/deep-reinforcement-learning | python | def __len__(self):
return len(self.memory) |
@classmethod
def _connect(cls):
'Connects to vertica.\n \n :return: a connection to vertica.\n '
return connect(**cls._conn_info) | 783,110,972,030,852,000 | Connects to vertica.
:return: a connection to vertica. | vertica_python/tests/base.py | _connect | etsy/vertica-python | python | @classmethod
def _connect(cls):
'Connects to vertica.\n \n :return: a connection to vertica.\n '
return connect(**cls._conn_info) |
def _query_and_fetchall(self, query):
'Creates a new connection, executes a query and fetches all the results.\n \n :param query: query to execute\n :return: all fetched results as returned by cursor.fetchall()\n '
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
results = cur.fetchall()
return results | 6,632,299,217,410,403,000 | Creates a new connection, executes a query and fetches all the results.
:param query: query to execute
:return: all fetched results as returned by cursor.fetchall() | vertica_python/tests/base.py | _query_and_fetchall | etsy/vertica-python | python | def _query_and_fetchall(self, query):
'Creates a new connection, executes a query and fetches all the results.\n \n :param query: query to execute\n :return: all fetched results as returned by cursor.fetchall()\n '
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
results = cur.fetchall()
return results |
def _query_and_fetchone(self, query):
'Creates a new connection, executes a query and fetches one result.\n \n :param query: query to execute\n :return: the first result fetched by cursor.fetchone()\n '
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
result = cur.fetchone()
return result | -3,428,955,883,212,195,000 | Creates a new connection, executes a query and fetches one result.
:param query: query to execute
:return: the first result fetched by cursor.fetchone() | vertica_python/tests/base.py | _query_and_fetchone | etsy/vertica-python | python | def _query_and_fetchone(self, query):
'Creates a new connection, executes a query and fetches one result.\n \n :param query: query to execute\n :return: the first result fetched by cursor.fetchone()\n '
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
result = cur.fetchone()
return result |
def get_interaction_table(self, user_id, item_id, y):
'Get interaction_table that is used for fetching user-item interaction label in LS regularization.\n\n Args:\n user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]\n item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]\n y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]\n\n Returns:\n tuple:\n - interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}\n - offset(int): The offset that is used for calculating the key(index) in interaction_table\n '
offset = len(str(self.n_entities))
offset = (10 ** offset)
keys = ((user_id * offset) + item_id)
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return (interaction_table, offset) | -8,299,757,703,930,673,000 | Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table | recbole/model/knowledge_aware_recommender/kgnnls.py | get_interaction_table | xingkongxiaxia/RecBole | python | def get_interaction_table(self, user_id, item_id, y):
'Get interaction_table that is used for fetching user-item interaction label in LS regularization.\n\n Args:\n user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]\n item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]\n y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]\n\n Returns:\n tuple:\n - interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}\n - offset(int): The offset that is used for calculating the key(index) in interaction_table\n '
offset = len(str(self.n_entities))
offset = (10 ** offset)
keys = ((user_id * offset) + item_id)
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return (interaction_table, offset) |
def sample_neg_interaction(self, pos_interaction_table, offset):
'Sample neg_interaction to construct train data.\n\n Args:\n pos_interaction_table(dict): the interaction_table that only contains pos_interaction.\n offset(int): The offset that is used for calculating the key(index) in interaction_table\n\n Returns:\n interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}\n '
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while (neg_num < pos_num):
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = ((user_id * offset) + item_id)
if (keys not in pos_interaction_table):
neg_interaction_table[keys] = 0.0
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table | 3,427,626,011,596,649,500 | Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id} | recbole/model/knowledge_aware_recommender/kgnnls.py | sample_neg_interaction | xingkongxiaxia/RecBole | python | def sample_neg_interaction(self, pos_interaction_table, offset):
'Sample neg_interaction to construct train data.\n\n Args:\n pos_interaction_table(dict): the interaction_table that only contains pos_interaction.\n offset(int): The offset that is used for calculating the key(index) in interaction_table\n\n Returns:\n interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}\n '
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while (neg_num < pos_num):
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = ((user_id * offset) + item_id)
if (keys not in pos_interaction_table):
neg_interaction_table[keys] = 0.0
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table |
def construct_adj(self, kg_graph):
'Get neighbors and corresponding relations for each entity in the KG.\n\n Args:\n kg_graph(scipy.sparse.coo_matrix): an undirected graph\n\n Returns:\n tuple:\n - adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,\n shape: [n_entities, neighbor_sample_size]\n - adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,\n shape: [n_entities, neighbor_sample_size]\n '
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if (head not in kg_dict):
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if (tail not in kg_dict):
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
entity_num = kg_graph.shape[0]
adj_entity = np.zeros([entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros([entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if (entity not in kg_dict.keys()):
adj_entity[entity] = np.array(([entity] * self.neighbor_sample_size))
adj_relation[entity] = np.array(([0] * self.neighbor_sample_size))
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if (n_neighbors >= self.neighbor_sample_size):
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size, replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size, replace=True)
adj_entity[entity] = np.array([neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array([neighbors[i][1] for i in sampled_indices])
return (torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)) | 2,217,805,210,382,188,500 | Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size] | recbole/model/knowledge_aware_recommender/kgnnls.py | construct_adj | xingkongxiaxia/RecBole | python | def construct_adj(self, kg_graph):
'Get neighbors and corresponding relations for each entity in the KG.\n\n Args:\n kg_graph(scipy.sparse.coo_matrix): an undirected graph\n\n Returns:\n tuple:\n - adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,\n shape: [n_entities, neighbor_sample_size]\n - adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,\n shape: [n_entities, neighbor_sample_size]\n '
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if (head not in kg_dict):
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if (tail not in kg_dict):
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
entity_num = kg_graph.shape[0]
adj_entity = np.zeros([entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros([entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if (entity not in kg_dict.keys()):
adj_entity[entity] = np.array(([entity] * self.neighbor_sample_size))
adj_relation[entity] = np.array(([0] * self.neighbor_sample_size))
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if (n_neighbors >= self.neighbor_sample_size):
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size, replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size, replace=True)
adj_entity[entity] = np.array([neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array([neighbors[i][1] for i in sampled_indices])
return (torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)) |
def get_neighbors(self, items):
"Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.\n\n Args:\n items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]\n\n Returns:\n tuple:\n - entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.\n dimensions of entities: {[batch_size, 1],\n [batch_size, n_neighbor],\n [batch_size, n_neighbor^2],\n ...,\n [batch_size, n_neighbor^n_iter]}\n - relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for\n entities. Relations have the same shape as entities.\n "
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(self.adj_entity, 0, index), (self.batch_size, (- 1)))
neighbor_relations = torch.reshape(torch.index_select(self.adj_relation, 0, index), (self.batch_size, (- 1)))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return (entities, relations) | 6,309,155,545,897,962,000 | Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities. | recbole/model/knowledge_aware_recommender/kgnnls.py | get_neighbors | xingkongxiaxia/RecBole | python | def get_neighbors(self, items):
"Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.\n\n Args:\n items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]\n\n Returns:\n tuple:\n - entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.\n dimensions of entities: {[batch_size, 1],\n [batch_size, n_neighbor],\n [batch_size, n_neighbor^2],\n ...,\n [batch_size, n_neighbor^n_iter]}\n - relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for\n entities. Relations have the same shape as entities.\n "
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(self.adj_entity, 0, index), (self.batch_size, (- 1)))
neighbor_relations = torch.reshape(torch.index_select(self.adj_relation, 0, index), (self.batch_size, (- 1)))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return (entities, relations) |
def aggregate(self, user_embeddings, entities, relations):
'For each item, aggregate the entity representation and its neighborhood representation into a single vector.\n\n Args:\n user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]\n entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.\n dimensions of entities: {[batch_size, 1],\n [batch_size, n_neighbor],\n [batch_size, n_neighbor^2],\n ...,\n [batch_size, n_neighbor^n_iter]}\n relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.\n relations have the same shape as entities.\n\n Returns:\n item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]\n\n '
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range((self.n_iter - i)):
shape = (self.batch_size, (- 1), self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(entity_vectors[(hop + 1)], shape)
neighbor_relations = torch.reshape(relation_vectors[hop], shape)
user_embeddings = torch.reshape(user_embeddings, (self.batch_size, 1, 1, self.embedding_size))
user_relation_scores = torch.mean((user_embeddings * neighbor_relations), dim=(- 1))
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores), dim=(- 1))
neighbors_agg = torch.mean((user_relation_scores_normalized * neighbor_vectors), dim=2)
if (self.aggregator_class == 'sum'):
output = torch.reshape((self_vectors + neighbors_agg), ((- 1), self.embedding_size))
elif (self.aggregator_class == 'neighbor'):
output = torch.reshape(neighbors_agg, ((- 1), self.embedding_size))
elif (self.aggregator_class == 'concat'):
output = torch.cat([self_vectors, neighbors_agg], dim=(- 1))
output = torch.reshape(output, ((- 1), (self.embedding_size * 2)))
else:
raise Exception(('Unknown aggregator: ' + self.aggregator_class))
output = self.linear_layers[i](output)
output = torch.reshape(output, [self.batch_size, (- 1), self.embedding_size])
if (i == (self.n_iter - 1)):
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(entity_vectors[0], (self.batch_size, self.embedding_size))
return res | -6,236,746,642,292,884,000 | For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size] | recbole/model/knowledge_aware_recommender/kgnnls.py | aggregate | xingkongxiaxia/RecBole | python | def aggregate(self, user_embeddings, entities, relations):
'For each item, aggregate the entity representation and its neighborhood representation into a single vector.\n\n Args:\n user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]\n entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.\n dimensions of entities: {[batch_size, 1],\n [batch_size, n_neighbor],\n [batch_size, n_neighbor^2],\n ...,\n [batch_size, n_neighbor^n_iter]}\n relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.\n relations have the same shape as entities.\n\n Returns:\n item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]\n\n '
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range((self.n_iter - i)):
shape = (self.batch_size, (- 1), self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(entity_vectors[(hop + 1)], shape)
neighbor_relations = torch.reshape(relation_vectors[hop], shape)
user_embeddings = torch.reshape(user_embeddings, (self.batch_size, 1, 1, self.embedding_size))
user_relation_scores = torch.mean((user_embeddings * neighbor_relations), dim=(- 1))
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores), dim=(- 1))
neighbors_agg = torch.mean((user_relation_scores_normalized * neighbor_vectors), dim=2)
if (self.aggregator_class == 'sum'):
output = torch.reshape((self_vectors + neighbors_agg), ((- 1), self.embedding_size))
elif (self.aggregator_class == 'neighbor'):
output = torch.reshape(neighbors_agg, ((- 1), self.embedding_size))
elif (self.aggregator_class == 'concat'):
output = torch.cat([self_vectors, neighbors_agg], dim=(- 1))
output = torch.reshape(output, ((- 1), (self.embedding_size * 2)))
else:
raise Exception(('Unknown aggregator: ' + self.aggregator_class))
output = self.linear_layers[i](output)
output = torch.reshape(output, [self.batch_size, (- 1), self.embedding_size])
if (i == (self.n_iter - 1)):
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(entity_vectors[0], (self.batch_size, self.embedding_size))
return res |
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
'Predict the label of items by label smoothness.\n\n Args:\n user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],\n user(torch.FloatTensor): the index of users, shape: [batch_size*2]\n entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.\n dimensions of entities: {[batch_size*2, 1],\n [batch_size*2, n_neighbor],\n [batch_size*2, n_neighbor^2],\n ...,\n [batch_size*2, n_neighbor^n_iter]}\n relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.\n relations have the same shape as entities.\n\n Returns:\n predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]\n '
entity_labels = []
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1)
user_entity_concat = ((users * self.offset) + entities_per_iter)
if (holdout_item_for_user is None):
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(reset_mask, holdout_mask)
initial_label = ((holdout_mask.float() * initial_label) + (torch.logical_not(holdout_mask).float() * 0.5))
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
reset_masks = reset_masks[:(- 1)]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range((self.n_iter - i)):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[(hop + 1)], [self.batch_size, (- 1), self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop], [self.batch_size, (- 1), self.neighbor_sample_size, self.embedding_size])
user_embeddings = torch.reshape(user_embeddings, [self.batch_size, 1, 1, self.embedding_size])
user_relation_scores = torch.mean((user_embeddings * neighbor_relations), dim=(- 1))
user_relation_scores_normalized = self.softmax(user_relation_scores)
neighbors_aggregated_label = torch.mean((user_relation_scores_normalized * neighbor_labels), dim=2)
output = ((masks.float() * self_labels) + (torch.logical_not(masks).float() * neighbors_aggregated_label))
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze((- 1))
return predicted_labels | 1,820,534,822,281,268,500 | Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2] | recbole/model/knowledge_aware_recommender/kgnnls.py | label_smoothness_predict | xingkongxiaxia/RecBole | python | def label_smoothness_predict(self, user_embeddings, user, entities, relations):
'Predict the label of items by label smoothness.\n\n Args:\n user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],\n user(torch.FloatTensor): the index of users, shape: [batch_size*2]\n entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.\n dimensions of entities: {[batch_size*2, 1],\n [batch_size*2, n_neighbor],\n [batch_size*2, n_neighbor^2],\n ...,\n [batch_size*2, n_neighbor^n_iter]}\n relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.\n relations have the same shape as entities.\n\n Returns:\n predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]\n '
entity_labels = []
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1)
user_entity_concat = ((users * self.offset) + entities_per_iter)
if (holdout_item_for_user is None):
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(reset_mask, holdout_mask)
initial_label = ((holdout_mask.float() * initial_label) + (torch.logical_not(holdout_mask).float() * 0.5))
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
reset_masks = reset_masks[:(- 1)]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range((self.n_iter - i)):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[(hop + 1)], [self.batch_size, (- 1), self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop], [self.batch_size, (- 1), self.neighbor_sample_size, self.embedding_size])
user_embeddings = torch.reshape(user_embeddings, [self.batch_size, 1, 1, self.embedding_size])
user_relation_scores = torch.mean((user_embeddings * neighbor_relations), dim=(- 1))
user_relation_scores_normalized = self.softmax(user_relation_scores)
neighbors_aggregated_label = torch.mean((user_relation_scores_normalized * neighbor_labels), dim=2)
output = ((masks.float() * self_labels) + (torch.logical_not(masks).float() * neighbors_aggregated_label))
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze((- 1))
return predicted_labels |
def calculate_ls_loss(self, user, item, target):
'Calculate label smoothness loss.\n\n Args:\n user(torch.FloatTensor): the index of users, shape: [batch_size*2],\n item(torch.FloatTensor): the index of items, shape: [batch_size*2],\n target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],\n\n Returns:\n ls_loss: label smoothness loss\n '
user_e = self.user_embedding(user)
(entities, relations) = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss | -1,483,023,215,681,424,100 | Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss | recbole/model/knowledge_aware_recommender/kgnnls.py | calculate_ls_loss | xingkongxiaxia/RecBole | python | def calculate_ls_loss(self, user, item, target):
'Calculate label smoothness loss.\n\n Args:\n user(torch.FloatTensor): the index of users, shape: [batch_size*2],\n item(torch.FloatTensor): the index of items, shape: [batch_size*2],\n target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],\n\n Returns:\n ls_loss: label smoothness loss\n '
user_e = self.user_embedding(user)
(entities, relations) = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss |
def to_python(self, value):
'Convert our string value to JSON after we load it from the DB'
if ((value is None) or (value == '')):
return {}
elif isinstance(value, basestring):
res = loads(value)
if isinstance(res, dict):
return JSONDict(**res)
else:
return JSONList(res)
else:
return value | -834,000,970,839,273,900 | Convert our string value to JSON after we load it from the DB | vendor-local/src/django-extensions/build/lib/django_extensions/db/fields/json.py | to_python | Mozilla-GitHub-Standards/b6a5bb5c98b18d87c72c770f29c4270008fc6fc6b787d531a2afcd382dc4cbad | python | def to_python(self, value):
if ((value is None) or (value == )):
return {}
elif isinstance(value, basestring):
res = loads(value)
if isinstance(res, dict):
return JSONDict(**res)
else:
return JSONList(res)
else:
return value |
def get_db_prep_save(self, value, connection):
'Convert our JSON object to a string before we save'
if (not isinstance(value, (list, dict))):
return super(JSONField, self).get_db_prep_save('', connection=connection)
else:
return super(JSONField, self).get_db_prep_save(dumps(value), connection=connection) | -3,618,754,902,002,978,000 | Convert our JSON object to a string before we save | vendor-local/src/django-extensions/build/lib/django_extensions/db/fields/json.py | get_db_prep_save | Mozilla-GitHub-Standards/b6a5bb5c98b18d87c72c770f29c4270008fc6fc6b787d531a2afcd382dc4cbad | python | def get_db_prep_save(self, value, connection):
if (not isinstance(value, (list, dict))):
return super(JSONField, self).get_db_prep_save(, connection=connection)
else:
return super(JSONField, self).get_db_prep_save(dumps(value), connection=connection) |
def south_field_triple(self):
'Returns a suitable description of this field for South.'
from south.modelsinspector import introspector
field_class = 'django.db.models.fields.TextField'
(args, kwargs) = introspector(self)
return (field_class, args, kwargs) | -532,884,842,270,397,060 | Returns a suitable description of this field for South. | vendor-local/src/django-extensions/build/lib/django_extensions/db/fields/json.py | south_field_triple | Mozilla-GitHub-Standards/b6a5bb5c98b18d87c72c770f29c4270008fc6fc6b787d531a2afcd382dc4cbad | python | def south_field_triple(self):
from south.modelsinspector import introspector
field_class = 'django.db.models.fields.TextField'
(args, kwargs) = introspector(self)
return (field_class, args, kwargs) |
@cached_property
def additional_properties_type():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n '
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type) | 1,702,168,743,392,494,600 | This must be a method because a model may have properties that are
of type self, this must run after the class is loaded | code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20013.py | additional_properties_type | factset/enterprise-sdk | python | @cached_property
def additional_properties_type():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n '
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type) |
@cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'data': ([InlineResponse20013Data],), 'meta': (InlineResponse200Meta,)} | 7,408,037,427,849,946,000 | This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type. | code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20013.py | openapi_types | factset/enterprise-sdk | python | @cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'data': ([InlineResponse20013Data],), 'meta': (InlineResponse200Meta,)} |
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
'InlineResponse20013 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n data ([InlineResponse20013Data]): List of Internet media types.. [optional] # noqa: E501\n meta (InlineResponse200Meta): [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self | 9,188,032,339,138,415,000 | InlineResponse20013 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([InlineResponse20013Data]): List of Internet media types.. [optional] # noqa: E501
meta (InlineResponse200Meta): [optional] # noqa: E501 | code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20013.py | _from_openapi_data | factset/enterprise-sdk | python | @classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
'InlineResponse20013 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n data ([InlineResponse20013Data]): List of Internet media types.. [optional] # noqa: E501\n meta (InlineResponse200Meta): [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self |
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
'InlineResponse20013 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n data ([InlineResponse20013Data]): List of Internet media types.. [optional] # noqa: E501\n meta (InlineResponse200Meta): [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') | -3,725,525,108,762,265,600 | InlineResponse20013 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([InlineResponse20013Data]): List of Internet media types.. [optional] # noqa: E501
meta (InlineResponse200Meta): [optional] # noqa: E501 | code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20013.py | __init__ | factset/enterprise-sdk | python | @convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
'InlineResponse20013 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n data ([InlineResponse20013Data]): List of Internet media types.. [optional] # noqa: E501\n meta (InlineResponse200Meta): [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
def load_data(traindir, valdir, **kwargs):
'generate the train and val dataloader, you can change this for your specific task\n\n Args:\n traindir (str): train dataset dir\n valdir (str): validation dataset dir\n\n Returns:\n tuple: the train dataset and validation dataset\n '
train_transform = T.Compose([T.RandomCrop(512), T.RandomHorizontalFlip(), T.RandomVerticalFlip(), T.ToTensor(), T.Normalize()])
val_transform = T.Compose([T.ToTensor(), T.Normalize()])
dataset_train = ChangeDetectionDataset(traindir, extentions=kwargs['extensions'], transforms=train_transform)
dataset_val = ChangeDetectionDataset(valdir, extentions=kwargs['extensions'], transforms=val_transform)
return (dataset_train, dataset_val) | 5,464,277,367,755,207,000 | generate the train and val dataloader, you can change this for your specific task
Args:
traindir (str): train dataset dir
valdir (str): validation dataset dir
Returns:
tuple: the train dataset and validation dataset | torchsat/scripts/train_cd.py | load_data | alina2204/contrastive_SSL_ship_detection | python | def load_data(traindir, valdir, **kwargs):
'generate the train and val dataloader, you can change this for your specific task\n\n Args:\n traindir (str): train dataset dir\n valdir (str): validation dataset dir\n\n Returns:\n tuple: the train dataset and validation dataset\n '
train_transform = T.Compose([T.RandomCrop(512), T.RandomHorizontalFlip(), T.RandomVerticalFlip(), T.ToTensor(), T.Normalize()])
val_transform = T.Compose([T.ToTensor(), T.Normalize()])
dataset_train = ChangeDetectionDataset(traindir, extentions=kwargs['extensions'], transforms=train_transform)
dataset_val = ChangeDetectionDataset(valdir, extentions=kwargs['extensions'], transforms=val_transform)
return (dataset_train, dataset_val) |
def aggregate_gradients_using_nccl(replica_grads):
'Aggregate gradients using nccl allreduce.'
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for (g, _) in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append([(g, v) for (g, (_, v)) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v | -460,152,936,942,818,200 | Aggregate gradients using nccl allreduce. | tensorflow/python/distribute/cross_device_utils.py | aggregate_gradients_using_nccl | DeuroIO/Deuro-tensorflow | python | def aggregate_gradients_using_nccl(replica_grads):
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for (g, _) in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append([(g, v) for (g, (_, v)) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v |
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
'Aggregate gradients using hierarchical copies.\n\n Args:\n avail_devices: available GPU devices.\n replica_grads: List of lists of (gradient, variable) tuples. The outer list\n is over replicas. The inner list is over individual gradients.\n\n Returns:\n The list of (aggregated_gradient, variable), where the gradient has been\n summed across all replicas and the variable is chosen from the first\n replica.\n '
agg_grads = []
num_devices = len(avail_devices)
group_size = (num_devices // 2)
for (i, single_grads) in enumerate(zip(*replica_grads)):
group_0_main_device = (i % num_devices)
group_1_main_device = ((group_0_main_device + group_size) % num_devices)
if (group_0_main_device < group_size):
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
group_0_device_grads = single_grads[group_0_begin:(group_0_begin + group_size)]
with ops.device(avail_devices[group_0_main_device]):
(group_0_agg_grads, _) = aggregate_single_gradient_using_copy(group_0_device_grads, False, False)
group_1_device_grads = single_grads[group_1_begin:(group_1_begin + group_size)]
with ops.device(avail_devices[group_1_main_device]):
(group_1_agg_grads, _) = aggregate_single_gradient_using_copy(group_1_device_grads, False, False)
with ops.device(avail_devices[group_0_main_device]):
((agg_total_grads, _), _) = aggregate_single_gradient_using_copy([group_0_agg_grads, group_1_agg_grads], False, False)
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
if ((group_0_main_device < group_size) == (j < group_size)):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append([(g, v) for (g, (_, v)) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads | -5,807,300,737,462,545,000 | Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica. | tensorflow/python/distribute/cross_device_utils.py | aggregate_gradients_using_hierarchical_copy | DeuroIO/Deuro-tensorflow | python | def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
'Aggregate gradients using hierarchical copies.\n\n Args:\n avail_devices: available GPU devices.\n replica_grads: List of lists of (gradient, variable) tuples. The outer list\n is over replicas. The inner list is over individual gradients.\n\n Returns:\n The list of (aggregated_gradient, variable), where the gradient has been\n summed across all replicas and the variable is chosen from the first\n replica.\n '
agg_grads = []
num_devices = len(avail_devices)
group_size = (num_devices // 2)
for (i, single_grads) in enumerate(zip(*replica_grads)):
group_0_main_device = (i % num_devices)
group_1_main_device = ((group_0_main_device + group_size) % num_devices)
if (group_0_main_device < group_size):
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
group_0_device_grads = single_grads[group_0_begin:(group_0_begin + group_size)]
with ops.device(avail_devices[group_0_main_device]):
(group_0_agg_grads, _) = aggregate_single_gradient_using_copy(group_0_device_grads, False, False)
group_1_device_grads = single_grads[group_1_begin:(group_1_begin + group_size)]
with ops.device(avail_devices[group_1_main_device]):
(group_1_agg_grads, _) = aggregate_single_gradient_using_copy(group_1_device_grads, False, False)
with ops.device(avail_devices[group_0_main_device]):
((agg_total_grads, _), _) = aggregate_single_gradient_using_copy([group_0_agg_grads, group_1_agg_grads], False, False)
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
if ((group_0_main_device < group_size) == (j < group_size)):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append([(g, v) for (g, (_, v)) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads |
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean, check_inf_nan):
'Calculate the average gradient for a shared variable across all replicas.\n\n Note that this function provides a synchronization point across all replicas.\n\n Args:\n grad_and_vars: A list or tuple of (gradient, variable) tuples. Each\n (gradient, variable) pair within the outer list represents the gradient\n of the variable calculated for a single replica, and the number of pairs\n equals the number of replicas.\n use_mean: if True, mean is taken, else sum of gradients is taken.\n check_inf_nan: check grads for nans and infs.\n\n Returns:\n The tuple ([(average_gradient, variable),], has_nan_or_inf) where the\n gradient has been averaged across all replicas. The variable is chosen\n from the first replica. The has_nan_or_inf indicates the grads has nan or\n inf.\n '
grads = [g for (g, _) in grad_and_vars]
grad = math_ops.add_n(grads)
if (use_mean and (len(grads) > 1)):
grad = array_ops.multiply(grad, (1.0 / len(grads)))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(array_ops.reduce_all(array_ops.is_finite(grads)))
return ((grad, v), has_nan_or_inf)
else:
return ((grad, v), None) | -739,028,824,022,532,700 | Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf. | tensorflow/python/distribute/cross_device_utils.py | aggregate_single_gradient_using_copy | DeuroIO/Deuro-tensorflow | python | def aggregate_single_gradient_using_copy(grad_and_vars, use_mean, check_inf_nan):
'Calculate the average gradient for a shared variable across all replicas.\n\n Note that this function provides a synchronization point across all replicas.\n\n Args:\n grad_and_vars: A list or tuple of (gradient, variable) tuples. Each\n (gradient, variable) pair within the outer list represents the gradient\n of the variable calculated for a single replica, and the number of pairs\n equals the number of replicas.\n use_mean: if True, mean is taken, else sum of gradients is taken.\n check_inf_nan: check grads for nans and infs.\n\n Returns:\n The tuple ([(average_gradient, variable),], has_nan_or_inf) where the\n gradient has been averaged across all replicas. The variable is chosen\n from the first replica. The has_nan_or_inf indicates the grads has nan or\n inf.\n '
grads = [g for (g, _) in grad_and_vars]
grad = math_ops.add_n(grads)
if (use_mean and (len(grads) > 1)):
grad = array_ops.multiply(grad, (1.0 / len(grads)))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(array_ops.reduce_all(array_ops.is_finite(grads)))
return ((grad, v), has_nan_or_inf)
else:
return ((grad, v), None) |
def group_device_names(devices, group_size):
'Group device names into groups of group_size.\n\n Args:\n devices: a list of canonical device strings.\n group_size: integer which is equal to or greater than 1.\n\n Returns:\n list of lists of devices, where each inner list is group_size long,\n and each device appears at least once in an inner list. If\n len(devices) % group_size == 0 then each device will appear exactly once.\n\n Raises:\n ValueError: if group_size > len(devices)\n '
num_devices = len(devices)
if (group_size > num_devices):
raise ValueError(('only %d devices, but group_size=%d' % (num_devices, group_size)))
num_groups = ((num_devices // group_size) + (1 if ((num_devices % group_size) != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range((num_groups * group_size)):
groups[(i % num_groups)].append(devices[(i % num_devices)])
return groups | -402,665,421,571,026,240 | Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices) | tensorflow/python/distribute/cross_device_utils.py | group_device_names | DeuroIO/Deuro-tensorflow | python | def group_device_names(devices, group_size):
'Group device names into groups of group_size.\n\n Args:\n devices: a list of canonical device strings.\n group_size: integer which is equal to or greater than 1.\n\n Returns:\n list of lists of devices, where each inner list is group_size long,\n and each device appears at least once in an inner list. If\n len(devices) % group_size == 0 then each device will appear exactly once.\n\n Raises:\n ValueError: if group_size > len(devices)\n '
num_devices = len(devices)
if (group_size > num_devices):
raise ValueError(('only %d devices, but group_size=%d' % (num_devices, group_size)))
num_groups = ((num_devices // group_size) + (1 if ((num_devices % group_size) != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range((num_groups * group_size)):
groups[(i % num_groups)].append(devices[(i % num_devices)])
return groups |
def split_grads_by_size(threshold_size, device_grads):
'Break gradients into two sets according to tensor size.\n\n Args:\n threshold_size: int size cutoff for small vs large tensor.\n device_grads: List of lists of (gradient, variable) tuples. The outer\n list is over devices. The inner list is over individual gradients.\n\n Returns:\n small_grads: Subset of device_grads where shape is <= threshold_size\n elements.\n large_grads: Subset of device_grads where shape is > threshold_size\n elements.\n '
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if (tensor_size <= threshold_size):
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return (small_grads, large_grads) | 7,115,999,087,250,416,000 | Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements. | tensorflow/python/distribute/cross_device_utils.py | split_grads_by_size | DeuroIO/Deuro-tensorflow | python | def split_grads_by_size(threshold_size, device_grads):
'Break gradients into two sets according to tensor size.\n\n Args:\n threshold_size: int size cutoff for small vs large tensor.\n device_grads: List of lists of (gradient, variable) tuples. The outer\n list is over devices. The inner list is over individual gradients.\n\n Returns:\n small_grads: Subset of device_grads where shape is <= threshold_size\n elements.\n large_grads: Subset of device_grads where shape is > threshold_size\n elements.\n '
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if (tensor_size <= threshold_size):
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return (small_grads, large_grads) |
def build_collective_reduce(input_tensors, num_workers, collective_keys, reduction_op='Add', unary_op='Id'):
'Build a subgraph that does one full all-reduce, using the collective Op.\n\n Args:\n input_tensors: tensors within a single worker graph that are to be reduced\n together; must be one per device.\n num_workers: total number of workers with identical independent graphs that\n will be doing this same reduction. The reduction will actually include\n the corresponding tensors at all these workers.\n collective_keys: a CollectiveKeys object.\n reduction_op: string naming the reduction op.\n unary_op: string naming the unary final op.\n\n Returns:\n An array of final tensors, one per device, computed by the full reduction.\n\n Raises:\n ValueError: There must be at least two tensors over all the workers.\n '
group_size = (len(input_tensors) * num_workers)
if (group_size < 2):
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0]
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(input_tensors[d], group_size, group_key, instance_key, reduction_op, unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors | -1,641,276,473,819,680,500 | Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers. | tensorflow/python/distribute/cross_device_utils.py | build_collective_reduce | DeuroIO/Deuro-tensorflow | python | def build_collective_reduce(input_tensors, num_workers, collective_keys, reduction_op='Add', unary_op='Id'):
'Build a subgraph that does one full all-reduce, using the collective Op.\n\n Args:\n input_tensors: tensors within a single worker graph that are to be reduced\n together; must be one per device.\n num_workers: total number of workers with identical independent graphs that\n will be doing this same reduction. The reduction will actually include\n the corresponding tensors at all these workers.\n collective_keys: a CollectiveKeys object.\n reduction_op: string naming the reduction op.\n unary_op: string naming the unary final op.\n\n Returns:\n An array of final tensors, one per device, computed by the full reduction.\n\n Raises:\n ValueError: There must be at least two tensors over all the workers.\n '
group_size = (len(input_tensors) * num_workers)
if (group_size < 2):
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0]
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(input_tensors[d], group_size, group_key, instance_key, reduction_op, unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors |
def sum_grad_and_var_all_reduce(grad_and_vars, num_workers, alg, gpu_indices, aux_devices=None, num_shards=1):
'Apply all-reduce algorithm over specified gradient tensors.'
with ops.name_scope('allreduce'):
scaled_grads = [g for (g, _) in grad_and_vars]
if (alg == 'nccl'):
summed_grads = nccl_ops.all_sum(scaled_grads)
elif (alg == 'xring'):
summed_grads = all_reduce.build_ring_all_reduce(scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif (alg == 'nccl/xring'):
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards, math_ops.add)
elif (alg == 'nccl/rechd'):
summed_grads = all_reduce.build_nccl_then_recursive_hd(scaled_grads, math_ops.add)
elif (alg == 'nccl/pscpu'):
summed_grads = all_reduce.build_nccl_then_shuffle(scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif (alg == 'pscpu/pscpu'):
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif (alg in ['pscpu', 'psgpu']):
summed_grads = all_reduce.build_shuffle_all_reduce(scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for ((_, v), g) in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result | -2,988,830,372,582,981,600 | Apply all-reduce algorithm over specified gradient tensors. | tensorflow/python/distribute/cross_device_utils.py | sum_grad_and_var_all_reduce | DeuroIO/Deuro-tensorflow | python | def sum_grad_and_var_all_reduce(grad_and_vars, num_workers, alg, gpu_indices, aux_devices=None, num_shards=1):
with ops.name_scope('allreduce'):
scaled_grads = [g for (g, _) in grad_and_vars]
if (alg == 'nccl'):
summed_grads = nccl_ops.all_sum(scaled_grads)
elif (alg == 'xring'):
summed_grads = all_reduce.build_ring_all_reduce(scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif (alg == 'nccl/xring'):
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards, math_ops.add)
elif (alg == 'nccl/rechd'):
summed_grads = all_reduce.build_nccl_then_recursive_hd(scaled_grads, math_ops.add)
elif (alg == 'nccl/pscpu'):
summed_grads = all_reduce.build_nccl_then_shuffle(scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif (alg == 'pscpu/pscpu'):
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif (alg in ['pscpu', 'psgpu']):
summed_grads = all_reduce.build_shuffle_all_reduce(scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for ((_, v), g) in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result |
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg, num_shards, gpu_indices):
'Apply all-reduce algorithm over specified gradient tensors.\n\n Args:\n dev_prefixes: list of prefix strings to use to generate PS device names.\n replica_grads: the gradients to reduce.\n num_workers: number of worker processes across entire job.\n alg: the all-reduce algorithm to apply.\n num_shards: alg-specific sharding factor.\n gpu_indices: indices of local GPUs in order usable for ring-reduce.\n\n Returns:\n list of reduced tensors\n '
alg_contains_shuffle = any([(n in alg) for n in ['pscpu', 'psgpu']])
is_hierarchical = ('/' in alg)
if ('pscpu' in alg):
aux_devices = [(prefix + '/cpu:0') for prefix in dev_prefixes]
elif ('psgpu' in alg):
aux_devices = [(prefix + ('/gpu:%d' % i)) for i in range(len(gpu_indices)) for prefix in dev_prefixes]
else:
aux_devices = ['/job:localhost/cpu:0']
aux_device_groups = group_device_names(aux_devices, (num_shards if alg_contains_shuffle else 1))
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(sum_grad_and_var_all_reduce(grad_and_vars, num_workers, alg, gpu_indices, (aux_devices if is_hierarchical else aux_device_groups[group_index]), num_shards))
group_index = ((group_index + 1) % len(aux_device_groups))
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads | 4,814,734,914,225,489,000 | Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors | tensorflow/python/distribute/cross_device_utils.py | sum_gradients_all_reduce | DeuroIO/Deuro-tensorflow | python | def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg, num_shards, gpu_indices):
'Apply all-reduce algorithm over specified gradient tensors.\n\n Args:\n dev_prefixes: list of prefix strings to use to generate PS device names.\n replica_grads: the gradients to reduce.\n num_workers: number of worker processes across entire job.\n alg: the all-reduce algorithm to apply.\n num_shards: alg-specific sharding factor.\n gpu_indices: indices of local GPUs in order usable for ring-reduce.\n\n Returns:\n list of reduced tensors\n '
alg_contains_shuffle = any([(n in alg) for n in ['pscpu', 'psgpu']])
is_hierarchical = ('/' in alg)
if ('pscpu' in alg):
aux_devices = [(prefix + '/cpu:0') for prefix in dev_prefixes]
elif ('psgpu' in alg):
aux_devices = [(prefix + ('/gpu:%d' % i)) for i in range(len(gpu_indices)) for prefix in dev_prefixes]
else:
aux_devices = ['/job:localhost/cpu:0']
aux_device_groups = group_device_names(aux_devices, (num_shards if alg_contains_shuffle else 1))
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(sum_grad_and_var_all_reduce(grad_and_vars, num_workers, alg, gpu_indices, (aux_devices if is_hierarchical else aux_device_groups[group_index]), num_shards))
group_index = ((group_index + 1) % len(aux_device_groups))
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads |
def extract_ranges(index_list, range_size_limit=32):
'Extract consecutive ranges and singles from index_list.\n\n Args:\n index_list: List of monotone increasing non-negative integers.\n range_size_limit: Largest size range to return. If a larger\n consecutive range exists, it will be returned as multiple\n ranges.\n\n Returns:\n (ranges, singles) where ranges is a list of [first, last] pairs of\n consecutive elements in index_list, and singles is all of the\n other elements, in original order.\n '
if (not index_list):
return ([], [])
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if ((i == (last + 1)) and ((last - first) <= range_size_limit)):
last = i
else:
if (last > first):
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if (last > first):
ranges.append([first, last])
else:
singles.append(first)
return (ranges, singles) | 7,368,154,166,958,740,000 | Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order. | tensorflow/python/distribute/cross_device_utils.py | extract_ranges | DeuroIO/Deuro-tensorflow | python | def extract_ranges(index_list, range_size_limit=32):
'Extract consecutive ranges and singles from index_list.\n\n Args:\n index_list: List of monotone increasing non-negative integers.\n range_size_limit: Largest size range to return. If a larger\n consecutive range exists, it will be returned as multiple\n ranges.\n\n Returns:\n (ranges, singles) where ranges is a list of [first, last] pairs of\n consecutive elements in index_list, and singles is all of the\n other elements, in original order.\n '
if (not index_list):
return ([], [])
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if ((i == (last + 1)) and ((last - first) <= range_size_limit)):
last = i
else:
if (last > first):
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if (last > first):
ranges.append([first, last])
else:
singles.append(first)
return (ranges, singles) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.