body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def process_articles(articles_list):
'\n function that checks the articles and processes them into instances\n '
articles_object = []
for article_item in articles_list:
author = article_item.get('name')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
image = article_item.get('urlToImage')
date = article_item.get('publishedAt')
if image:
articles_result = Articles(author, title, description, url, image, date)
articles_object.append(articles_result)
return articles_object | 360,291,847,631,122,400 | function that checks the articles and processes them into instances | app/requests.py | process_articles | ClarisseU/newsHighlight | python | def process_articles(articles_list):
'\n \n '
articles_object = []
for article_item in articles_list:
author = article_item.get('name')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
image = article_item.get('urlToImage')
date = article_item.get('publishedAt')
if image:
articles_result = Articles(author, title, description, url, image, date)
articles_object.append(articles_result)
return articles_object |
def validate(self, task):
"Checks required info on 'driver_info' and validates node with OneView\n\n Validates whether the 'driver_info' property of the supplied\n task's node contains the required info such as server_hardware_uri,\n server_hardware_type, server_profile_template_uri and\n enclosure_group_uri. Also, checks if the server profile of the node is\n applied, if NICs are valid for the server profile of the node, and if\n the server hardware attributes (ram, memory, vcpus count) are\n consistent with OneView.\n\n :param task: a task from TaskManager.\n :raises: InvalidParameterValue if parameters set are inconsistent with\n resources in OneView\n "
common.verify_node_info(task.node)
try:
common.validate_oneview_resources_compatibility(task)
except exception.OneViewError as oneview_exc:
raise exception.InvalidParameterValue(oneview_exc) | -960,376,113,376,328,300 | Checks required info on 'driver_info' and validates node with OneView
Validates whether the 'driver_info' property of the supplied
task's node contains the required info such as server_hardware_uri,
server_hardware_type, server_profile_template_uri and
enclosure_group_uri. Also, checks if the server profile of the node is
applied, if NICs are valid for the server profile of the node, and if
the server hardware attributes (ram, memory, vcpus count) are
consistent with OneView.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if parameters set are inconsistent with
resources in OneView | ironic/drivers/modules/oneview/management.py | validate | ISCAS-VDI/ironic-base | python | def validate(self, task):
"Checks required info on 'driver_info' and validates node with OneView\n\n Validates whether the 'driver_info' property of the supplied\n task's node contains the required info such as server_hardware_uri,\n server_hardware_type, server_profile_template_uri and\n enclosure_group_uri. Also, checks if the server profile of the node is\n applied, if NICs are valid for the server profile of the node, and if\n the server hardware attributes (ram, memory, vcpus count) are\n consistent with OneView.\n\n :param task: a task from TaskManager.\n :raises: InvalidParameterValue if parameters set are inconsistent with\n resources in OneView\n "
common.verify_node_info(task.node)
try:
common.validate_oneview_resources_compatibility(task)
except exception.OneViewError as oneview_exc:
raise exception.InvalidParameterValue(oneview_exc) |
def get_supported_boot_devices(self, task):
'Gets a list of the supported boot devices.\n\n :param task: a task from TaskManager.\n :returns: A list with the supported boot devices defined\n in :mod:`ironic.common.boot_devices`.\n '
return sorted(BOOT_DEVICE_MAPPING_TO_OV.keys()) | 6,042,941,514,869,878,000 | Gets a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`. | ironic/drivers/modules/oneview/management.py | get_supported_boot_devices | ISCAS-VDI/ironic-base | python | def get_supported_boot_devices(self, task):
'Gets a list of the supported boot devices.\n\n :param task: a task from TaskManager.\n :returns: A list with the supported boot devices defined\n in :mod:`ironic.common.boot_devices`.\n '
return sorted(BOOT_DEVICE_MAPPING_TO_OV.keys()) |
@task_manager.require_exclusive_lock
@common.node_has_server_profile
def set_boot_device(self, task, device, persistent=False):
'Sets the boot device for a node.\n\n Sets the boot device to use on next reboot of the node.\n\n :param task: a task from TaskManager.\n :param device: the boot device, one of the supported devices\n listed in :mod:`ironic.common.boot_devices`.\n :param persistent: Boolean value. True if the boot device will\n persist to all future boots, False if not.\n Default: False.\n :raises: InvalidParameterValue if an invalid boot device is\n specified.\n :raises: OperationNotPermitted if the server has no server profile or\n if the server is already powered on.\n :raises: OneViewError if the communication with OneView fails\n '
oneview_info = common.get_oneview_info(task.node)
if (device not in self.get_supported_boot_devices(task)):
raise exception.InvalidParameterValue((_('Invalid boot device %s specified.') % device))
LOG.debug('Setting boot device to %(device)s for node %(node)s', {'device': device, 'node': task.node.uuid})
try:
oneview_client = common.get_oneview_client()
device_to_oneview = BOOT_DEVICE_MAPPING_TO_OV.get(device)
oneview_client.set_boot_device(oneview_info, device_to_oneview)
except oneview_exceptions.OneViewException as oneview_exc:
msg = (_('Error setting boot device on OneView. Error: %s') % oneview_exc)
LOG.error(msg)
raise exception.OneViewError(error=msg) | -2,033,430,274,960,628,500 | Sets the boot device for a node.
Sets the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of the supported devices
listed in :mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is
specified.
:raises: OperationNotPermitted if the server has no server profile or
if the server is already powered on.
:raises: OneViewError if the communication with OneView fails | ironic/drivers/modules/oneview/management.py | set_boot_device | ISCAS-VDI/ironic-base | python | @task_manager.require_exclusive_lock
@common.node_has_server_profile
def set_boot_device(self, task, device, persistent=False):
'Sets the boot device for a node.\n\n Sets the boot device to use on next reboot of the node.\n\n :param task: a task from TaskManager.\n :param device: the boot device, one of the supported devices\n listed in :mod:`ironic.common.boot_devices`.\n :param persistent: Boolean value. True if the boot device will\n persist to all future boots, False if not.\n Default: False.\n :raises: InvalidParameterValue if an invalid boot device is\n specified.\n :raises: OperationNotPermitted if the server has no server profile or\n if the server is already powered on.\n :raises: OneViewError if the communication with OneView fails\n '
oneview_info = common.get_oneview_info(task.node)
if (device not in self.get_supported_boot_devices(task)):
raise exception.InvalidParameterValue((_('Invalid boot device %s specified.') % device))
LOG.debug('Setting boot device to %(device)s for node %(node)s', {'device': device, 'node': task.node.uuid})
try:
oneview_client = common.get_oneview_client()
device_to_oneview = BOOT_DEVICE_MAPPING_TO_OV.get(device)
oneview_client.set_boot_device(oneview_info, device_to_oneview)
except oneview_exceptions.OneViewException as oneview_exc:
msg = (_('Error setting boot device on OneView. Error: %s') % oneview_exc)
LOG.error(msg)
raise exception.OneViewError(error=msg) |
@common.node_has_server_profile
def get_boot_device(self, task):
"Get the current boot device for the task's node.\n\n Provides the current boot device of the node.\n\n :param task: a task from TaskManager.\n :returns: a dictionary containing:\n :boot_device: the boot device, one of\n :mod:`ironic.common.boot_devices` [PXE, DISK, CDROM]\n :persistent: Whether the boot device will persist to all\n future boots or not, None if it is unknown.\n :raises: OperationNotPermitted if no Server Profile is associated with\n the node\n :raises: InvalidParameterValue if the boot device is unknown\n :raises: OneViewError if the communication with OneView fails\n "
oneview_info = common.get_oneview_info(task.node)
try:
oneview_client = common.get_oneview_client()
boot_order = oneview_client.get_boot_order(oneview_info)
except oneview_exceptions.OneViewException as oneview_exc:
msg = (_('Error getting boot device from OneView. Error: %s') % oneview_exc)
LOG.error(msg)
raise exception.OneViewError(msg)
primary_device = boot_order[0]
if (primary_device not in BOOT_DEVICE_OV_TO_GENERIC):
raise exception.InvalidParameterValue((_('Unsupported boot Device %(device)s for Node: %(node)s') % {'device': primary_device, 'node': task.node.uuid}))
boot_device = {'boot_device': BOOT_DEVICE_OV_TO_GENERIC.get(primary_device), 'persistent': True}
return boot_device | 4,658,699,646,099,227,000 | Get the current boot device for the task's node.
Provides the current boot device of the node.
:param task: a task from TaskManager.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` [PXE, DISK, CDROM]
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
:raises: OperationNotPermitted if no Server Profile is associated with
the node
:raises: InvalidParameterValue if the boot device is unknown
:raises: OneViewError if the communication with OneView fails | ironic/drivers/modules/oneview/management.py | get_boot_device | ISCAS-VDI/ironic-base | python | @common.node_has_server_profile
def get_boot_device(self, task):
"Get the current boot device for the task's node.\n\n Provides the current boot device of the node.\n\n :param task: a task from TaskManager.\n :returns: a dictionary containing:\n :boot_device: the boot device, one of\n :mod:`ironic.common.boot_devices` [PXE, DISK, CDROM]\n :persistent: Whether the boot device will persist to all\n future boots or not, None if it is unknown.\n :raises: OperationNotPermitted if no Server Profile is associated with\n the node\n :raises: InvalidParameterValue if the boot device is unknown\n :raises: OneViewError if the communication with OneView fails\n "
oneview_info = common.get_oneview_info(task.node)
try:
oneview_client = common.get_oneview_client()
boot_order = oneview_client.get_boot_order(oneview_info)
except oneview_exceptions.OneViewException as oneview_exc:
msg = (_('Error getting boot device from OneView. Error: %s') % oneview_exc)
LOG.error(msg)
raise exception.OneViewError(msg)
primary_device = boot_order[0]
if (primary_device not in BOOT_DEVICE_OV_TO_GENERIC):
raise exception.InvalidParameterValue((_('Unsupported boot Device %(device)s for Node: %(node)s') % {'device': primary_device, 'node': task.node.uuid}))
boot_device = {'boot_device': BOOT_DEVICE_OV_TO_GENERIC.get(primary_device), 'persistent': True}
return boot_device |
def get_sensors_data(self, task):
'Get sensors data.\n\n Not implemented by this driver.\n :param task: a TaskManager instance.\n '
raise NotImplementedError() | 5,636,585,762,757,366,000 | Get sensors data.
Not implemented by this driver.
:param task: a TaskManager instance. | ironic/drivers/modules/oneview/management.py | get_sensors_data | ISCAS-VDI/ironic-base | python | def get_sensors_data(self, task):
'Get sensors data.\n\n Not implemented by this driver.\n :param task: a TaskManager instance.\n '
raise NotImplementedError() |
def post(self, body: 'CustomerSignin', *, headers: typing.Dict[(str, str)]=None, options: typing.Dict[(str, typing.Any)]=None) -> typing.Optional['CustomerSignInResult']:
'Authenticate Customer (Sign In). Retrieves the authenticated\n customer (a customer that matches the given email/password pair).\n If used with an access token for Anonymous Sessions,\n all orders and carts belonging to the anonymousId will be assigned to the newly created customer.\n If a cart is is returned as part of the CustomerSignInResult,\n it has been recalculated (It will have up-to-date prices, taxes and discounts,\n and invalid line items have been removed.).\n\n '
headers = ({} if (headers is None) else headers)
response = self._client._post(endpoint=f'/{self._project_key}/login', params={}, json=body.serialize(), headers={'Content-Type': 'application/json', **headers}, options=options)
if (response.status_code in (201, 200)):
return CustomerSignInResult.deserialize(response.json())
elif (response.status_code in (400, 401, 403, 500, 503)):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif (response.status_code == 404):
return None
elif (response.status_code == 200):
return None
warnings.warn(('Unhandled status code %d' % response.status_code)) | -218,325,888,209,012,030 | Authenticate Customer (Sign In). Retrieves the authenticated
customer (a customer that matches the given email/password pair).
If used with an access token for Anonymous Sessions,
all orders and carts belonging to the anonymousId will be assigned to the newly created customer.
If a cart is is returned as part of the CustomerSignInResult,
it has been recalculated (It will have up-to-date prices, taxes and discounts,
and invalid line items have been removed.). | src/commercetools/platform/client/login/by_project_key_login_request_builder.py | post | labd/commercetools-python-sdk | python | def post(self, body: 'CustomerSignin', *, headers: typing.Dict[(str, str)]=None, options: typing.Dict[(str, typing.Any)]=None) -> typing.Optional['CustomerSignInResult']:
'Authenticate Customer (Sign In). Retrieves the authenticated\n customer (a customer that matches the given email/password pair).\n If used with an access token for Anonymous Sessions,\n all orders and carts belonging to the anonymousId will be assigned to the newly created customer.\n If a cart is is returned as part of the CustomerSignInResult,\n it has been recalculated (It will have up-to-date prices, taxes and discounts,\n and invalid line items have been removed.).\n\n '
headers = ({} if (headers is None) else headers)
response = self._client._post(endpoint=f'/{self._project_key}/login', params={}, json=body.serialize(), headers={'Content-Type': 'application/json', **headers}, options=options)
if (response.status_code in (201, 200)):
return CustomerSignInResult.deserialize(response.json())
elif (response.status_code in (400, 401, 403, 500, 503)):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif (response.status_code == 404):
return None
elif (response.status_code == 200):
return None
warnings.warn(('Unhandled status code %d' % response.status_code)) |
def __init__(self, vpcId, subnetId, instanceVersion, instanceName, azId, instanceClass, ipVersion=None, dedicatedMaster=None, coordinating=None, autoSnapshot=None, authConfig=None):
'\n :param vpcId: 私有网络vpcId\n :param subnetId: 子网subnetId\n :param instanceVersion: es版本,当前支持5.6.9和6.5.4\n :param instanceName: es集群名称,不可为空,只支持大小写字母、数字、英文下划线或者中划线,以字母开头且不能超过32位\n :param azId: 可用区,各可用区编码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/restrictions\n :param instanceClass: 规格配置,规格代码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/specifications\n :param ipVersion: (Optional) 是否支持ipv6,支持值为v4&v6,不支持为空\n :param dedicatedMaster: (Optional) 是否包含专用主节点,默认false\n :param coordinating: (Optional) 是否包含协调节点,默认false\n :param autoSnapshot: (Optional) 自动快照设置。\n :param authConfig: (Optional) es数据面身份验证设置信息\n '
self.vpcId = vpcId
self.subnetId = subnetId
self.instanceVersion = instanceVersion
self.instanceName = instanceName
self.azId = azId
self.instanceClass = instanceClass
self.ipVersion = ipVersion
self.dedicatedMaster = dedicatedMaster
self.coordinating = coordinating
self.autoSnapshot = autoSnapshot
self.authConfig = authConfig | 6,539,072,422,323,259,000 | :param vpcId: 私有网络vpcId
:param subnetId: 子网subnetId
:param instanceVersion: es版本,当前支持5.6.9和6.5.4
:param instanceName: es集群名称,不可为空,只支持大小写字母、数字、英文下划线或者中划线,以字母开头且不能超过32位
:param azId: 可用区,各可用区编码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/restrictions
:param instanceClass: 规格配置,规格代码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/specifications
:param ipVersion: (Optional) 是否支持ipv6,支持值为v4&v6,不支持为空
:param dedicatedMaster: (Optional) 是否包含专用主节点,默认false
:param coordinating: (Optional) 是否包含协调节点,默认false
:param autoSnapshot: (Optional) 自动快照设置。
:param authConfig: (Optional) es数据面身份验证设置信息 | jdcloud_sdk/services/es/models/InstanceSpec.py | __init__ | Tanc009/jdcloud-sdk-python | python | def __init__(self, vpcId, subnetId, instanceVersion, instanceName, azId, instanceClass, ipVersion=None, dedicatedMaster=None, coordinating=None, autoSnapshot=None, authConfig=None):
'\n :param vpcId: 私有网络vpcId\n :param subnetId: 子网subnetId\n :param instanceVersion: es版本,当前支持5.6.9和6.5.4\n :param instanceName: es集群名称,不可为空,只支持大小写字母、数字、英文下划线或者中划线,以字母开头且不能超过32位\n :param azId: 可用区,各可用区编码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/restrictions\n :param instanceClass: 规格配置,规格代码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/specifications\n :param ipVersion: (Optional) 是否支持ipv6,支持值为v4&v6,不支持为空\n :param dedicatedMaster: (Optional) 是否包含专用主节点,默认false\n :param coordinating: (Optional) 是否包含协调节点,默认false\n :param autoSnapshot: (Optional) 自动快照设置。\n :param authConfig: (Optional) es数据面身份验证设置信息\n '
self.vpcId = vpcId
self.subnetId = subnetId
self.instanceVersion = instanceVersion
self.instanceName = instanceName
self.azId = azId
self.instanceClass = instanceClass
self.ipVersion = ipVersion
self.dedicatedMaster = dedicatedMaster
self.coordinating = coordinating
self.autoSnapshot = autoSnapshot
self.authConfig = authConfig |
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
'Checks whether the casing config is consistent with the checkpoint name.'
if (not init_checkpoint):
return
m = re.match('^.*?([A-Za-z0-9_-]+)/bert_model.ckpt', init_checkpoint)
if (m is None):
return
model_name = m.group(1)
lower_models = ['uncased_L-24_H-1024_A-16', 'uncased_L-12_H-768_A-12', 'multilingual_L-12_H-768_A-12', 'chinese_L-12_H-768_A-12']
cased_models = ['cased_L-12_H-768_A-12', 'cased_L-24_H-1024_A-16', 'multi_cased_L-12_H-768_A-12']
is_bad_config = False
if ((model_name in lower_models) and (not do_lower_case)):
is_bad_config = True
actual_flag = 'False'
case_name = 'lowercased'
opposite_flag = 'True'
if ((model_name in cased_models) and do_lower_case):
is_bad_config = True
actual_flag = 'True'
case_name = 'cased'
opposite_flag = 'False'
if is_bad_config:
raise ValueError(('You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. However, `%s` seems to be a %s model, so you should pass in `--do_lower_case=%s` so that the fine-tuning matches how the model was pre-training. If this error is wrong, please just comment out this check.' % (actual_flag, init_checkpoint, model_name, case_name, opposite_flag))) | 2,961,492,226,719,122,400 | Checks whether the casing config is consistent with the checkpoint name. | preliminary_contest/nezha_pretrain/tokenization.py | validate_case_matches_checkpoint | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
if (not init_checkpoint):
return
m = re.match('^.*?([A-Za-z0-9_-]+)/bert_model.ckpt', init_checkpoint)
if (m is None):
return
model_name = m.group(1)
lower_models = ['uncased_L-24_H-1024_A-16', 'uncased_L-12_H-768_A-12', 'multilingual_L-12_H-768_A-12', 'chinese_L-12_H-768_A-12']
cased_models = ['cased_L-12_H-768_A-12', 'cased_L-24_H-1024_A-16', 'multi_cased_L-12_H-768_A-12']
is_bad_config = False
if ((model_name in lower_models) and (not do_lower_case)):
is_bad_config = True
actual_flag = 'False'
case_name = 'lowercased'
opposite_flag = 'True'
if ((model_name in cased_models) and do_lower_case):
is_bad_config = True
actual_flag = 'True'
case_name = 'cased'
opposite_flag = 'False'
if is_bad_config:
raise ValueError(('You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. However, `%s` seems to be a %s model, so you should pass in `--do_lower_case=%s` so that the fine-tuning matches how the model was pre-training. If this error is wrong, please just comment out this check.' % (actual_flag, init_checkpoint, model_name, case_name, opposite_flag))) |
def convert_to_unicode(text):
"Converts `text` to Unicode (if it's not already), assuming utf-8 input."
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode('utf-8', 'ignore')
elif isinstance(text, unicode):
return text
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
else:
raise ValueError('Not running on Python2 or Python 3?') | 7,858,268,315,781,390,000 | Converts `text` to Unicode (if it's not already), assuming utf-8 input. | preliminary_contest/nezha_pretrain/tokenization.py | convert_to_unicode | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def convert_to_unicode(text):
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode('utf-8', 'ignore')
elif isinstance(text, unicode):
return text
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
else:
raise ValueError('Not running on Python2 or Python 3?') |
def printable_text(text):
'Returns text encoded in a way suitable for print or `tf.logging`.'
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode('utf-8')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
else:
raise ValueError('Not running on Python2 or Python 3?') | -7,835,982,317,921,145,000 | Returns text encoded in a way suitable for print or `tf.logging`. | preliminary_contest/nezha_pretrain/tokenization.py | printable_text | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def printable_text(text):
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode('utf-8')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
else:
raise ValueError('Not running on Python2 or Python 3?') |
def load_vocab(vocab_file):
'Loads a vocabulary file into a dictionary.'
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, 'r') as reader:
while True:
token = convert_to_unicode(reader.readline())
if (not token):
break
token = token.strip()
vocab[token] = index
index += 1
return vocab | -2,813,742,898,773,739,500 | Loads a vocabulary file into a dictionary. | preliminary_contest/nezha_pretrain/tokenization.py | load_vocab | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def load_vocab(vocab_file):
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, 'r') as reader:
while True:
token = convert_to_unicode(reader.readline())
if (not token):
break
token = token.strip()
vocab[token] = index
index += 1
return vocab |
def convert_by_vocab(vocab, items):
'Converts a sequence of [tokens|ids] using the vocab.'
output = []
for item in items:
output.append(vocab[item])
return output | 2,467,283,768,439,950,300 | Converts a sequence of [tokens|ids] using the vocab. | preliminary_contest/nezha_pretrain/tokenization.py | convert_by_vocab | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def convert_by_vocab(vocab, items):
output = []
for item in items:
output.append(vocab[item])
return output |
def whitespace_tokenize(text):
'Runs basic whitespace cleaning and splitting on a piece of text.'
text = text.strip()
if (not text):
return []
tokens = text.split()
return tokens | -484,864,102,338,264,200 | Runs basic whitespace cleaning and splitting on a piece of text. | preliminary_contest/nezha_pretrain/tokenization.py | whitespace_tokenize | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def whitespace_tokenize(text):
text = text.strip()
if (not text):
return []
tokens = text.split()
return tokens |
def _is_whitespace(char):
'Checks whether `chars` is a whitespace character.'
if ((char == ' ') or (char == '\t') or (char == '\n') or (char == '\r')):
return True
cat = unicodedata.category(char)
if (cat == 'Zs'):
return True
return False | 3,881,062,857,002,170,400 | Checks whether `chars` is a whitespace character. | preliminary_contest/nezha_pretrain/tokenization.py | _is_whitespace | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def _is_whitespace(char):
if ((char == ' ') or (char == '\t') or (char == '\n') or (char == '\r')):
return True
cat = unicodedata.category(char)
if (cat == 'Zs'):
return True
return False |
def _is_control(char):
'Checks whether `chars` is a control character.'
if ((char == '\t') or (char == '\n') or (char == '\r')):
return False
cat = unicodedata.category(char)
if (cat in ('Cc', 'Cf')):
return True
return False | 5,162,404,181,535,495,000 | Checks whether `chars` is a control character. | preliminary_contest/nezha_pretrain/tokenization.py | _is_control | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def _is_control(char):
if ((char == '\t') or (char == '\n') or (char == '\r')):
return False
cat = unicodedata.category(char)
if (cat in ('Cc', 'Cf')):
return True
return False |
def _is_punctuation(char):
'Checks whether `chars` is a punctuation character.'
cp = ord(char)
if ((33 <= cp <= 47) or (58 <= cp <= 64) or (91 <= cp <= 96) or (123 <= cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith('P'):
return True
return False | -2,548,854,895,040,612,000 | Checks whether `chars` is a punctuation character. | preliminary_contest/nezha_pretrain/tokenization.py | _is_punctuation | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def _is_punctuation(char):
cp = ord(char)
if ((33 <= cp <= 47) or (58 <= cp <= 64) or (91 <= cp <= 96) or (123 <= cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith('P'):
return True
return False |
def __init__(self, do_lower_case=True):
'Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n '
self.do_lower_case = do_lower_case | 5,147,819,055,757,263,000 | Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input. | preliminary_contest/nezha_pretrain/tokenization.py | __init__ | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def __init__(self, do_lower_case=True):
'Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n '
self.do_lower_case = do_lower_case |
def tokenize(self, text):
'Tokenizes a piece of text.'
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(' '.join(split_tokens))
return output_tokens | 4,853,689,665,837,514,000 | Tokenizes a piece of text. | preliminary_contest/nezha_pretrain/tokenization.py | tokenize | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def tokenize(self, text):
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(' '.join(split_tokens))
return output_tokens |
def _run_strip_accents(self, text):
'Strips accents from a piece of text.'
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return ''.join(output) | 4,298,915,465,928,504,300 | Strips accents from a piece of text. | preliminary_contest/nezha_pretrain/tokenization.py | _run_strip_accents | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def _run_strip_accents(self, text):
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return .join(output) |
def _run_split_on_punc(self, text):
'Splits punctuation on a piece of text.'
chars = list(text)
i = 0
start_new_word = True
output = []
while (i < len(chars)):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[(- 1)].append(char)
i += 1
return [''.join(x) for x in output] | -707,336,820,577,759,600 | Splits punctuation on a piece of text. | preliminary_contest/nezha_pretrain/tokenization.py | _run_split_on_punc | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def _run_split_on_punc(self, text):
chars = list(text)
i = 0
start_new_word = True
output = []
while (i < len(chars)):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[(- 1)].append(char)
i += 1
return [.join(x) for x in output] |
def _tokenize_chinese_chars(self, text):
'Adds whitespace around any CJK character.'
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return ''.join(output) | 7,613,325,756,461,659,000 | Adds whitespace around any CJK character. | preliminary_contest/nezha_pretrain/tokenization.py | _tokenize_chinese_chars | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return .join(output) |
def _is_chinese_char(self, cp):
'Checks whether CP is the codepoint of a CJK character.'
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False | -2,258,667,811,557,816,800 | Checks whether CP is the codepoint of a CJK character. | preliminary_contest/nezha_pretrain/tokenization.py | _is_chinese_char | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def _is_chinese_char(self, cp):
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False |
def _clean_text(self, text):
'Performs invalid character removal and whitespace cleanup on text.'
output = []
for char in text:
cp = ord(char)
if ((cp == 0) or (cp == 65533) or _is_control(char)):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output) | 5,410,140,919,715,612,000 | Performs invalid character removal and whitespace cleanup on text. | preliminary_contest/nezha_pretrain/tokenization.py | _clean_text | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if ((cp == 0) or (cp == 65533) or _is_control(char)):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return .join(output) |
def tokenize(self, text):
'Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = "unaffable"\n output = ["un", "##aff", "##able"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n '
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if (len(chars) > self.max_input_chars_per_word):
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while (start < len(chars)):
end = len(chars)
cur_substr = None
while (start < end):
substr = ''.join(chars[start:end])
if (start > 0):
substr = ('##' + substr)
if (substr in self.vocab):
cur_substr = substr
break
end -= 1
if (cur_substr is None):
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens | 6,067,884,556,913,027,000 | Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens. | preliminary_contest/nezha_pretrain/tokenization.py | tokenize | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | python | def tokenize(self, text):
'Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = "unaffable"\n output = ["un", "##aff", "##able"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n '
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if (len(chars) > self.max_input_chars_per_word):
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while (start < len(chars)):
end = len(chars)
cur_substr = None
while (start < end):
substr = .join(chars[start:end])
if (start > 0):
substr = ('##' + substr)
if (substr in self.vocab):
cur_substr = substr
break
end -= 1
if (cur_substr is None):
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens |
def get_source_responses(self, api_url: URL) -> List[requests.Response]:
'Override because we need to do a post request and need to separately get the entities.'
auth = self.basic_auth_credentials()
response = requests.post(api_url, timeout=self.TIMEOUT, auth=auth, json=dict(query=self.parameters.get('wiql', '')))
ids = ','.join([str(work_item['id']) for work_item in response.json().get('workItems', [])])
if (not ids):
return [response]
work_items_url = URL(f'{super().api_url()}/_apis/wit/workitems?ids={ids}&api-version=4.1')
return [response, requests.get(work_items_url, timeout=self.TIMEOUT, auth=auth)] | 5,593,757,160,839,150,000 | Override because we need to do a post request and need to separately get the entities. | components/collector/src/collectors/azure_devops.py | get_source_responses | Hedde/quality-time | python | def get_source_responses(self, api_url: URL) -> List[requests.Response]:
auth = self.basic_auth_credentials()
response = requests.post(api_url, timeout=self.TIMEOUT, auth=auth, json=dict(query=self.parameters.get('wiql', )))
ids = ','.join([str(work_item['id']) for work_item in response.json().get('workItems', [])])
if (not ids):
return [response]
work_items_url = URL(f'{super().api_url()}/_apis/wit/workitems?ids={ids}&api-version=4.1')
return [response, requests.get(work_items_url, timeout=self.TIMEOUT, auth=auth)] |
def pick_dump_format(fmts):
'Choose a supported wave dumping format\n\n fmts is a list of formats that the chosen tool supports. Return the first\n that we think is possible (e.g. not fsdb if Verdi is not installed).\n\n '
assert fmts
fmt = fmts[0]
if ((fmt == 'fsdb') and (not shutil.which('verdi'))):
return pick_dump_format(fmts[1:])
return fmt | 6,492,956,016,820,254,000 | Choose a supported wave dumping format
fmts is a list of formats that the chosen tool supports. Return the first
that we think is possible (e.g. not fsdb if Verdi is not installed). | util/dvsim/SimCfg.py | pick_dump_format | courageheart/opentitan | python | def pick_dump_format(fmts):
'Choose a supported wave dumping format\n\n fmts is a list of formats that the chosen tool supports. Return the first\n that we think is possible (e.g. not fsdb if Verdi is not installed).\n\n '
assert fmts
fmt = fmts[0]
if ((fmt == 'fsdb') and (not shutil.which('verdi'))):
return pick_dump_format(fmts[1:])
return fmt |
def resolve_dump_format(tool, dump):
'Decide on the correct dumping format\n\n This is called after reading the config file. tool is the chosen tool,\n which will always have been resolved by this point. waves is a boolean\n which determines whether waves should be dumped at all (from the --waves\n argument). dump is the dumping format chosen on the command line or None.\n\n '
assert (tool is not None)
SUPPORTED_DUMP_FMTS = {'vcs': ['fsdb', 'vpd'], 'xcelium': ['fsdb', 'shm', 'vpd']}
fmts = SUPPORTED_DUMP_FMTS.get(tool)
if (dump is not None):
if ((fmts is not None) and (dump not in fmts)):
log.error('Chosen tool ({}) does not support wave dumping format {!r}.'.format(tool, dump))
sys.exit(1)
return dump
if (not fmts):
return 'vpd'
return pick_dump_format(fmts) | 7,731,358,679,955,228,000 | Decide on the correct dumping format
This is called after reading the config file. tool is the chosen tool,
which will always have been resolved by this point. waves is a boolean
which determines whether waves should be dumped at all (from the --waves
argument). dump is the dumping format chosen on the command line or None. | util/dvsim/SimCfg.py | resolve_dump_format | courageheart/opentitan | python | def resolve_dump_format(tool, dump):
'Decide on the correct dumping format\n\n This is called after reading the config file. tool is the chosen tool,\n which will always have been resolved by this point. waves is a boolean\n which determines whether waves should be dumped at all (from the --waves\n argument). dump is the dumping format chosen on the command line or None.\n\n '
assert (tool is not None)
SUPPORTED_DUMP_FMTS = {'vcs': ['fsdb', 'vpd'], 'xcelium': ['fsdb', 'shm', 'vpd']}
fmts = SUPPORTED_DUMP_FMTS.get(tool)
if (dump is not None):
if ((fmts is not None) and (dump not in fmts)):
log.error('Chosen tool ({}) does not support wave dumping format {!r}.'.format(tool, dump))
sys.exit(1)
return dump
if (not fmts):
return 'vpd'
return pick_dump_format(fmts) |
def kill(self):
'kill running processes and jobs gracefully\n '
super().kill()
for item in self.cov_deploys:
item.kill() | 3,999,927,447,842,825,000 | kill running processes and jobs gracefully | util/dvsim/SimCfg.py | kill | courageheart/opentitan | python | def kill(self):
'\n '
super().kill()
for item in self.cov_deploys:
item.kill() |
def _create_dirs(self):
'Create initial set of directories\n '
create_link_dirs_cmd = ''
for link in self.links.keys():
create_link_dirs_cmd += (('/bin/rm -rf ' + self.links[link]) + ' && ')
create_link_dirs_cmd += (('mkdir -p ' + self.links[link]) + ' && ')
create_link_dirs_cmd += ' true'
try:
os.system(create_link_dirs_cmd)
except IOError:
log.error('Error running when running the cmd "%s"', create_link_dirs_cmd)
sys.exit(1) | -5,108,986,233,381,809,000 | Create initial set of directories | util/dvsim/SimCfg.py | _create_dirs | courageheart/opentitan | python | def _create_dirs(self):
'\n '
create_link_dirs_cmd =
for link in self.links.keys():
create_link_dirs_cmd += (('/bin/rm -rf ' + self.links[link]) + ' && ')
create_link_dirs_cmd += (('mkdir -p ' + self.links[link]) + ' && ')
create_link_dirs_cmd += ' true'
try:
os.system(create_link_dirs_cmd)
except IOError:
log.error('Error running when running the cmd "%s"', create_link_dirs_cmd)
sys.exit(1) |
def _create_deploy_objects(self):
'Create deploy objects from the build and run lists.\n '
self._create_build_and_run_list()
builds = []
build_map = {}
for build in self.build_list:
item = CompileSim(build, self)
builds.append(item)
build_map[build] = item
runs = []
for test in self.run_list:
for num in range(test.reseed):
item = RunTest(num, test, self)
if (self.build_only is False):
build_map[test.build_mode].sub.append(item)
runs.append(item)
self.builds = builds
self.runs = runs
if (self.run_only is True):
self.deploy = runs
else:
self.deploy = builds
if self.cov:
self.cov_merge_deploy = CovMerge(self)
self.cov_report_deploy = CovReport(self)
self.cov_merge_deploy.sub.append(self.cov_report_deploy)
self._create_dirs() | 870,195,557,923,621,200 | Create deploy objects from the build and run lists. | util/dvsim/SimCfg.py | _create_deploy_objects | courageheart/opentitan | python | def _create_deploy_objects(self):
'\n '
self._create_build_and_run_list()
builds = []
build_map = {}
for build in self.build_list:
item = CompileSim(build, self)
builds.append(item)
build_map[build] = item
runs = []
for test in self.run_list:
for num in range(test.reseed):
item = RunTest(num, test, self)
if (self.build_only is False):
build_map[test.build_mode].sub.append(item)
runs.append(item)
self.builds = builds
self.runs = runs
if (self.run_only is True):
self.deploy = runs
else:
self.deploy = builds
if self.cov:
self.cov_merge_deploy = CovMerge(self)
self.cov_report_deploy = CovReport(self)
self.cov_merge_deploy.sub.append(self.cov_report_deploy)
self._create_dirs() |
def create_deploy_objects(self):
'Public facing API for _create_deploy_objects().\n '
super().create_deploy_objects()
if self.cov:
for item in self.cfgs:
if item.cov:
self.cov_deploys.append(item.cov_merge_deploy) | 7,024,525,020,178,827,000 | Public facing API for _create_deploy_objects(). | util/dvsim/SimCfg.py | create_deploy_objects | courageheart/opentitan | python | def create_deploy_objects(self):
'\n '
super().create_deploy_objects()
if self.cov:
for item in self.cfgs:
if item.cov:
self.cov_deploys.append(item.cov_merge_deploy) |
def deploy_objects(self):
'This is a public facing API, so we use "self.cfgs" instead of self.\n '
super().deploy_objects()
if self.cov:
Deploy.deploy(self.cov_deploys) | 5,689,941,422,391,038,000 | This is a public facing API, so we use "self.cfgs" instead of self. | util/dvsim/SimCfg.py | deploy_objects | courageheart/opentitan | python | def deploy_objects(self):
'\n '
super().deploy_objects()
if self.cov:
Deploy.deploy(self.cov_deploys) |
def _cov_analyze(self):
'Use the last regression coverage data to open up the GUI tool to\n analyze the coverage.\n '
cov_analyze_deploy = CovAnalyze(self)
self.deploy = [cov_analyze_deploy] | -6,492,807,051,187,470,000 | Use the last regression coverage data to open up the GUI tool to
analyze the coverage. | util/dvsim/SimCfg.py | _cov_analyze | courageheart/opentitan | python | def _cov_analyze(self):
'Use the last regression coverage data to open up the GUI tool to\n analyze the coverage.\n '
cov_analyze_deploy = CovAnalyze(self)
self.deploy = [cov_analyze_deploy] |
def cov_analyze(self):
'Public facing API for analyzing coverage.\n '
for item in self.cfgs:
item._cov_analyze() | -4,459,116,738,002,939,000 | Public facing API for analyzing coverage. | util/dvsim/SimCfg.py | cov_analyze | courageheart/opentitan | python | def cov_analyze(self):
'\n '
for item in self.cfgs:
item._cov_analyze() |
def _gen_results(self):
'\n The function is called after the regression has completed. It collates the\n status of all run targets and generates a dict. It parses the testplan and\n maps the generated result to the testplan entries to generate a final table\n (list). It also prints the full list of failures for debug / triage. If cov\n is enabled, then the summary coverage report is also generated. The final\n result is in markdown format.\n '
def retrieve_result(name, results):
for item in results:
if (name == item['name']):
return item
return None
def gen_results_sub(items, results, fail_msgs):
'\n Generate the results table from the test runs (builds are ignored).\n The table has 3 columns - name, passing and total as a list of dicts.\n This is populated for all tests. The number of passing and total is\n in reference to the number of iterations or reseeds for that test.\n This list of dicts is directly consumed by the Testplan::results_table\n method for testplan mapping / annotation.\n '
for item in items:
if (item.status == 'F'):
fail_msgs += item.fail_msg
if (item.target == 'run'):
result = retrieve_result(item.name, results)
if (result is None):
result = {'name': item.name, 'passing': 0, 'total': 0}
results.append(result)
if (item.status == 'P'):
result['passing'] += 1
result['total'] += 1
(results, fail_msgs) = gen_results_sub(item.sub, results, fail_msgs)
return (results, fail_msgs)
regr_results = []
fail_msgs = ''
deployed_items = self.deploy
if self.cov:
deployed_items.append(self.cov_merge_deploy)
(regr_results, fail_msgs) = gen_results_sub(deployed_items, regr_results, fail_msgs)
if (fail_msgs != ''):
fail_msgs = ('\n## List of Failures\n' + fail_msgs)
self.errors_seen = True
results_str = (('## ' + self.results_title) + '\n')
results_str += (('### ' + self.timestamp_long) + '\n')
if hasattr(self, 'testplan_doc_path'):
testplan = ((('https://' + self.doc_server) + '/') + getattr(self, 'testplan_doc_path'))
else:
testplan = ((('https://' + self.doc_server) + '/') + self.rel_path)
testplan = testplan.replace('/dv', '/doc/dv_plan/#testplan')
results_str += (('### [Testplan](' + testplan) + ')\n')
results_str += (('### Simulator: ' + self.tool.upper()) + '\n\n')
if (regr_results == []):
results_str += 'No results to display.\n'
else:
results_str += self.testplan.results_table(regr_results=regr_results, map_full_testplan=self.map_full_testplan)
results_str += '\n'
self.results_summary = self.testplan.results_summary
if self.cov:
if (self.cov_report_deploy.status == 'P'):
results_str += '\n## Coverage Results\n'
if hasattr(self, 'cov_report_page'):
results_str += '\n### [Coverage Dashboard]'
results_str += '({})\n\n'.format(getattr(self, 'cov_report_page'))
results_str += self.cov_report_deploy.cov_results
self.results_summary['Coverage'] = self.cov_report_deploy.cov_total
else:
self.results_summary['Coverage'] = '--'
self.results_summary['Name'] = self._get_results_page_link(self.results_summary['Name'])
self.results_md = (results_str + fail_msgs)
results_str += fail_msgs
results_file = (((self.scratch_path + '/results_') + self.timestamp) + '.md')
f = open(results_file, 'w')
f.write(self.results_md)
f.close()
log.info('[results page]: [%s] [%s]', self.name, results_file)
return results_str | -6,924,591,667,108,118,000 | The function is called after the regression has completed. It collates the
status of all run targets and generates a dict. It parses the testplan and
maps the generated result to the testplan entries to generate a final table
(list). It also prints the full list of failures for debug / triage. If cov
is enabled, then the summary coverage report is also generated. The final
result is in markdown format. | util/dvsim/SimCfg.py | _gen_results | courageheart/opentitan | python | def _gen_results(self):
'\n The function is called after the regression has completed. It collates the\n status of all run targets and generates a dict. It parses the testplan and\n maps the generated result to the testplan entries to generate a final table\n (list). It also prints the full list of failures for debug / triage. If cov\n is enabled, then the summary coverage report is also generated. The final\n result is in markdown format.\n '
def retrieve_result(name, results):
for item in results:
if (name == item['name']):
return item
return None
def gen_results_sub(items, results, fail_msgs):
'\n Generate the results table from the test runs (builds are ignored).\n The table has 3 columns - name, passing and total as a list of dicts.\n This is populated for all tests. The number of passing and total is\n in reference to the number of iterations or reseeds for that test.\n This list of dicts is directly consumed by the Testplan::results_table\n method for testplan mapping / annotation.\n '
for item in items:
if (item.status == 'F'):
fail_msgs += item.fail_msg
if (item.target == 'run'):
result = retrieve_result(item.name, results)
if (result is None):
result = {'name': item.name, 'passing': 0, 'total': 0}
results.append(result)
if (item.status == 'P'):
result['passing'] += 1
result['total'] += 1
(results, fail_msgs) = gen_results_sub(item.sub, results, fail_msgs)
return (results, fail_msgs)
regr_results = []
fail_msgs =
deployed_items = self.deploy
if self.cov:
deployed_items.append(self.cov_merge_deploy)
(regr_results, fail_msgs) = gen_results_sub(deployed_items, regr_results, fail_msgs)
if (fail_msgs != ):
fail_msgs = ('\n## List of Failures\n' + fail_msgs)
self.errors_seen = True
results_str = (('## ' + self.results_title) + '\n')
results_str += (('### ' + self.timestamp_long) + '\n')
if hasattr(self, 'testplan_doc_path'):
testplan = ((('https://' + self.doc_server) + '/') + getattr(self, 'testplan_doc_path'))
else:
testplan = ((('https://' + self.doc_server) + '/') + self.rel_path)
testplan = testplan.replace('/dv', '/doc/dv_plan/#testplan')
results_str += (('### [Testplan](' + testplan) + ')\n')
results_str += (('### Simulator: ' + self.tool.upper()) + '\n\n')
if (regr_results == []):
results_str += 'No results to display.\n'
else:
results_str += self.testplan.results_table(regr_results=regr_results, map_full_testplan=self.map_full_testplan)
results_str += '\n'
self.results_summary = self.testplan.results_summary
if self.cov:
if (self.cov_report_deploy.status == 'P'):
results_str += '\n## Coverage Results\n'
if hasattr(self, 'cov_report_page'):
results_str += '\n### [Coverage Dashboard]'
results_str += '({})\n\n'.format(getattr(self, 'cov_report_page'))
results_str += self.cov_report_deploy.cov_results
self.results_summary['Coverage'] = self.cov_report_deploy.cov_total
else:
self.results_summary['Coverage'] = '--'
self.results_summary['Name'] = self._get_results_page_link(self.results_summary['Name'])
self.results_md = (results_str + fail_msgs)
results_str += fail_msgs
results_file = (((self.scratch_path + '/results_') + self.timestamp) + '.md')
f = open(results_file, 'w')
f.write(self.results_md)
f.close()
log.info('[results page]: [%s] [%s]', self.name, results_file)
return results_str |
def _publish_results(self):
'Publish coverage results to the opentitan web server.'
super()._publish_results()
if self.cov:
results_server_dir_url = self.results_server_dir.replace(self.results_server_prefix, self.results_server_url_prefix)
log.info('Publishing coverage results to %s', results_server_dir_url)
cmd = ((((self.results_server_cmd + ' -m cp -R ') + self.cov_report_deploy.cov_report_dir) + ' ') + self.results_server_dir)
try:
cmd_output = subprocess.run(args=cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
log.log(VERBOSE, cmd_output.stdout.decode('utf-8'))
except Exception as e:
log.error('%s: Failed to publish results:\n"%s"', e, str(cmd)) | 4,380,815,372,334,516,700 | Publish coverage results to the opentitan web server. | util/dvsim/SimCfg.py | _publish_results | courageheart/opentitan | python | def _publish_results(self):
super()._publish_results()
if self.cov:
results_server_dir_url = self.results_server_dir.replace(self.results_server_prefix, self.results_server_url_prefix)
log.info('Publishing coverage results to %s', results_server_dir_url)
cmd = ((((self.results_server_cmd + ' -m cp -R ') + self.cov_report_deploy.cov_report_dir) + ' ') + self.results_server_dir)
try:
cmd_output = subprocess.run(args=cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
log.log(VERBOSE, cmd_output.stdout.decode('utf-8'))
except Exception as e:
log.error('%s: Failed to publish results:\n"%s"', e, str(cmd)) |
def gen_results_sub(items, results, fail_msgs):
'\n Generate the results table from the test runs (builds are ignored).\n The table has 3 columns - name, passing and total as a list of dicts.\n This is populated for all tests. The number of passing and total is\n in reference to the number of iterations or reseeds for that test.\n This list of dicts is directly consumed by the Testplan::results_table\n method for testplan mapping / annotation.\n '
for item in items:
if (item.status == 'F'):
fail_msgs += item.fail_msg
if (item.target == 'run'):
result = retrieve_result(item.name, results)
if (result is None):
result = {'name': item.name, 'passing': 0, 'total': 0}
results.append(result)
if (item.status == 'P'):
result['passing'] += 1
result['total'] += 1
(results, fail_msgs) = gen_results_sub(item.sub, results, fail_msgs)
return (results, fail_msgs) | 6,035,573,499,776,601,000 | Generate the results table from the test runs (builds are ignored).
The table has 3 columns - name, passing and total as a list of dicts.
This is populated for all tests. The number of passing and total is
in reference to the number of iterations or reseeds for that test.
This list of dicts is directly consumed by the Testplan::results_table
method for testplan mapping / annotation. | util/dvsim/SimCfg.py | gen_results_sub | courageheart/opentitan | python | def gen_results_sub(items, results, fail_msgs):
'\n Generate the results table from the test runs (builds are ignored).\n The table has 3 columns - name, passing and total as a list of dicts.\n This is populated for all tests. The number of passing and total is\n in reference to the number of iterations or reseeds for that test.\n This list of dicts is directly consumed by the Testplan::results_table\n method for testplan mapping / annotation.\n '
for item in items:
if (item.status == 'F'):
fail_msgs += item.fail_msg
if (item.target == 'run'):
result = retrieve_result(item.name, results)
if (result is None):
result = {'name': item.name, 'passing': 0, 'total': 0}
results.append(result)
if (item.status == 'P'):
result['passing'] += 1
result['total'] += 1
(results, fail_msgs) = gen_results_sub(item.sub, results, fail_msgs)
return (results, fail_msgs) |
def _build_client(token=None):
'Utility function to create Neutron client.'
params = {'timeout': CONF.neutron.url_timeout, 'retries': CONF.neutron.retries, 'insecure': CONF.keystone_authtoken.insecure, 'ca_cert': CONF.keystone_authtoken.certfile}
if (CONF.neutron.auth_strategy not in ['noauth', 'keystone']):
raise exception.ConfigInvalid(_('Neutron auth_strategy should be either "noauth" or "keystone".'))
if (CONF.neutron.auth_strategy == 'noauth'):
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = 'noauth'
elif ((CONF.neutron.auth_strategy == 'keystone') and (token is None)):
params['endpoint_url'] = (CONF.neutron.url or keystone.get_service_url('neutron'))
params['username'] = CONF.keystone_authtoken.admin_user
params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name
params['password'] = CONF.keystone_authtoken.admin_password
params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '')
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
else:
params['token'] = token
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = None
return clientv20.Client(**params) | -5,738,843,228,165,864,000 | Utility function to create Neutron client. | ironic/dhcp/neutron.py | _build_client | overcastcloud/ironic | python | def _build_client(token=None):
params = {'timeout': CONF.neutron.url_timeout, 'retries': CONF.neutron.retries, 'insecure': CONF.keystone_authtoken.insecure, 'ca_cert': CONF.keystone_authtoken.certfile}
if (CONF.neutron.auth_strategy not in ['noauth', 'keystone']):
raise exception.ConfigInvalid(_('Neutron auth_strategy should be either "noauth" or "keystone".'))
if (CONF.neutron.auth_strategy == 'noauth'):
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = 'noauth'
elif ((CONF.neutron.auth_strategy == 'keystone') and (token is None)):
params['endpoint_url'] = (CONF.neutron.url or keystone.get_service_url('neutron'))
params['username'] = CONF.keystone_authtoken.admin_user
params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name
params['password'] = CONF.keystone_authtoken.admin_password
params['auth_url'] = (CONF.keystone_authtoken.auth_uri or )
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
else:
params['token'] = token
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = None
return clientv20.Client(**params) |
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"Update a port's attributes.\n\n Update one or more DHCP options on the specified port.\n For the relevant API spec, see\n http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html\n\n :param port_id: designate which port these attributes\n will be applied to.\n :param dhcp_options: this will be a list of dicts, e.g.\n\n ::\n\n [{'opt_name': 'bootfile-name',\n 'opt_value': 'pxelinux.0'},\n {'opt_name': 'server-ip-address',\n 'opt_value': '123.123.123.456'},\n {'opt_name': 'tftp-server',\n 'opt_value': '0.0.0.0'}]\n :param token: optional auth token.\n\n :raises: FailedToUpdateDHCPOptOnPort\n "
port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE('Failed to update Neutron port %s.'), port_id)
raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id) | 3,283,669,068,899,312,000 | Update a port's attributes.
Update one or more DHCP options on the specified port.
For the relevant API spec, see
http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '0.0.0.0'}]
:param token: optional auth token.
:raises: FailedToUpdateDHCPOptOnPort | ironic/dhcp/neutron.py | update_port_dhcp_opts | overcastcloud/ironic | python | def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"Update a port's attributes.\n\n Update one or more DHCP options on the specified port.\n For the relevant API spec, see\n http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html\n\n :param port_id: designate which port these attributes\n will be applied to.\n :param dhcp_options: this will be a list of dicts, e.g.\n\n ::\n\n [{'opt_name': 'bootfile-name',\n 'opt_value': 'pxelinux.0'},\n {'opt_name': 'server-ip-address',\n 'opt_value': '123.123.123.456'},\n {'opt_name': 'tftp-server',\n 'opt_value': '0.0.0.0'}]\n :param token: optional auth token.\n\n :raises: FailedToUpdateDHCPOptOnPort\n "
port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE('Failed to update Neutron port %s.'), port_id)
raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id) |
def update_port_address(self, port_id, address, token=None):
"Update a port's mac address.\n\n :param port_id: Neutron port id.\n :param address: new MAC address.\n :param token: optional auth token.\n :raises: FailedToUpdateMacOnPort\n "
port_req_body = {'port': {'mac_address': address}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE('Failed to update MAC address on Neutron port %s.'), port_id)
raise exception.FailedToUpdateMacOnPort(port_id=port_id) | -5,523,271,498,786,543,000 | Update a port's mac address.
:param port_id: Neutron port id.
:param address: new MAC address.
:param token: optional auth token.
:raises: FailedToUpdateMacOnPort | ironic/dhcp/neutron.py | update_port_address | overcastcloud/ironic | python | def update_port_address(self, port_id, address, token=None):
"Update a port's mac address.\n\n :param port_id: Neutron port id.\n :param address: new MAC address.\n :param token: optional auth token.\n :raises: FailedToUpdateMacOnPort\n "
port_req_body = {'port': {'mac_address': address}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE('Failed to update MAC address on Neutron port %s.'), port_id)
raise exception.FailedToUpdateMacOnPort(port_id=port_id) |
def update_dhcp_opts(self, task, options, vifs=None):
"Send or update the DHCP BOOT options for this node.\n\n :param task: A TaskManager instance.\n :param options: this will be a list of dicts, e.g.\n\n ::\n\n [{'opt_name': 'bootfile-name',\n 'opt_value': 'pxelinux.0'},\n {'opt_name': 'server-ip-address',\n 'opt_value': '123.123.123.456'},\n {'opt_name': 'tftp-server',\n 'opt_value': '0.0.0.0'}]\n :param vifs: a dict of Neutron port dicts to update DHCP options on.\n The keys should be Ironic port UUIDs, and the values should be\n Neutron port UUIDs\n If the value is None, will get the list of ports from the Ironic\n port objects.\n "
if (vifs is None):
vifs = network.get_node_vif_ids(task)
if (not vifs):
raise exception.FailedToUpdateDHCPOptOnPort((_('No VIFs found for node %(node)s when attempting to update DHCP BOOT options.') % {'node': task.node.uuid}))
failures = []
for (port_id, port_vif) in vifs.items():
try:
self.update_port_dhcp_opts(port_vif, options, token=task.context.auth_token)
except exception.FailedToUpdateDHCPOptOnPort:
failures.append(port_id)
if failures:
if (len(failures) == len(vifs)):
raise exception.FailedToUpdateDHCPOptOnPort((_('Failed to set DHCP BOOT options for any port on node %s.') % task.node.uuid))
else:
LOG.warning(_LW('Some errors were encountered when updating the DHCP BOOT options for node %(node)s on the following ports: %(ports)s.'), {'node': task.node.uuid, 'ports': failures})
if isinstance(task.driver.power, ssh.SSHPower):
LOG.debug('Waiting 15 seconds for Neutron.')
time.sleep(15) | -959,911,370,649,814,800 | Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '0.0.0.0'}]
:param vifs: a dict of Neutron port dicts to update DHCP options on.
The keys should be Ironic port UUIDs, and the values should be
Neutron port UUIDs
If the value is None, will get the list of ports from the Ironic
port objects. | ironic/dhcp/neutron.py | update_dhcp_opts | overcastcloud/ironic | python | def update_dhcp_opts(self, task, options, vifs=None):
"Send or update the DHCP BOOT options for this node.\n\n :param task: A TaskManager instance.\n :param options: this will be a list of dicts, e.g.\n\n ::\n\n [{'opt_name': 'bootfile-name',\n 'opt_value': 'pxelinux.0'},\n {'opt_name': 'server-ip-address',\n 'opt_value': '123.123.123.456'},\n {'opt_name': 'tftp-server',\n 'opt_value': '0.0.0.0'}]\n :param vifs: a dict of Neutron port dicts to update DHCP options on.\n The keys should be Ironic port UUIDs, and the values should be\n Neutron port UUIDs\n If the value is None, will get the list of ports from the Ironic\n port objects.\n "
if (vifs is None):
vifs = network.get_node_vif_ids(task)
if (not vifs):
raise exception.FailedToUpdateDHCPOptOnPort((_('No VIFs found for node %(node)s when attempting to update DHCP BOOT options.') % {'node': task.node.uuid}))
failures = []
for (port_id, port_vif) in vifs.items():
try:
self.update_port_dhcp_opts(port_vif, options, token=task.context.auth_token)
except exception.FailedToUpdateDHCPOptOnPort:
failures.append(port_id)
if failures:
if (len(failures) == len(vifs)):
raise exception.FailedToUpdateDHCPOptOnPort((_('Failed to set DHCP BOOT options for any port on node %s.') % task.node.uuid))
else:
LOG.warning(_LW('Some errors were encountered when updating the DHCP BOOT options for node %(node)s on the following ports: %(ports)s.'), {'node': task.node.uuid, 'ports': failures})
if isinstance(task.driver.power, ssh.SSHPower):
LOG.debug('Waiting 15 seconds for Neutron.')
time.sleep(15) |
def _get_fixed_ip_address(self, port_uuid, client):
"Get a port's fixed ip address.\n\n :param port_uuid: Neutron port id.\n :param client: Neutron client instance.\n :returns: Neutron port ip address.\n :raises: FailedToGetIPAddressOnPort\n :raises: InvalidIPv4Address\n "
ip_address = None
try:
neutron_port = client.show_port(port_uuid).get('port')
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE('Failed to Get IP address on Neutron port %s.'), port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
fixed_ips = neutron_port.get('fixed_ips')
if fixed_ips:
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
if netutils.is_valid_ipv4(ip_address):
return ip_address
else:
LOG.error(_LE('Neutron returned invalid IPv4 address %s.'), ip_address)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error(_LE('No IP address assigned to Neutron port %s.'), port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid) | 6,938,815,195,611,214,000 | Get a port's fixed ip address.
:param port_uuid: Neutron port id.
:param client: Neutron client instance.
:returns: Neutron port ip address.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address | ironic/dhcp/neutron.py | _get_fixed_ip_address | overcastcloud/ironic | python | def _get_fixed_ip_address(self, port_uuid, client):
"Get a port's fixed ip address.\n\n :param port_uuid: Neutron port id.\n :param client: Neutron client instance.\n :returns: Neutron port ip address.\n :raises: FailedToGetIPAddressOnPort\n :raises: InvalidIPv4Address\n "
ip_address = None
try:
neutron_port = client.show_port(port_uuid).get('port')
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE('Failed to Get IP address on Neutron port %s.'), port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
fixed_ips = neutron_port.get('fixed_ips')
if fixed_ips:
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
if netutils.is_valid_ipv4(ip_address):
return ip_address
else:
LOG.error(_LE('Neutron returned invalid IPv4 address %s.'), ip_address)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error(_LE('No IP address assigned to Neutron port %s.'), port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid) |
def _get_port_ip_address(self, task, port_uuid, client):
"Get ip address of ironic port assigned by neutron.\n\n :param task: a TaskManager instance.\n :param port_uuid: ironic Node's port UUID.\n :param client: Neutron client instance.\n :returns: Neutron port ip address associated with Node's port.\n :raises: FailedToGetIPAddressOnPort\n :raises: InvalidIPv4Address\n "
vifs = network.get_node_vif_ids(task)
if (not vifs):
LOG.warning(_LW('No VIFs found for node %(node)s when attempting to get port IP address.'), {'node': task.node.uuid})
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
port_vif = vifs[port_uuid]
port_ip_address = self._get_fixed_ip_address(port_vif, client)
return port_ip_address | 4,342,479,345,130,977,000 | Get ip address of ironic port assigned by neutron.
:param task: a TaskManager instance.
:param port_uuid: ironic Node's port UUID.
:param client: Neutron client instance.
:returns: Neutron port ip address associated with Node's port.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address | ironic/dhcp/neutron.py | _get_port_ip_address | overcastcloud/ironic | python | def _get_port_ip_address(self, task, port_uuid, client):
"Get ip address of ironic port assigned by neutron.\n\n :param task: a TaskManager instance.\n :param port_uuid: ironic Node's port UUID.\n :param client: Neutron client instance.\n :returns: Neutron port ip address associated with Node's port.\n :raises: FailedToGetIPAddressOnPort\n :raises: InvalidIPv4Address\n "
vifs = network.get_node_vif_ids(task)
if (not vifs):
LOG.warning(_LW('No VIFs found for node %(node)s when attempting to get port IP address.'), {'node': task.node.uuid})
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
port_vif = vifs[port_uuid]
port_ip_address = self._get_fixed_ip_address(port_vif, client)
return port_ip_address |
def get_ip_addresses(self, task):
'Get IP addresses for all ports in `task`.\n\n :param task: a TaskManager instance.\n :returns: List of IP addresses associated with task.ports.\n '
client = _build_client(task.context.auth_token)
failures = []
ip_addresses = []
for port in task.ports:
try:
port_ip_address = self._get_port_ip_address(task, port.uuid, client)
ip_addresses.append(port_ip_address)
except (exception.FailedToGetIPAddressOnPort, exception.InvalidIPv4Address):
failures.append(port.uuid)
if failures:
LOG.warn(_LW('Some errors were encountered on node %(node)s while retrieving IP address on the following ports: %(ports)s.'), {'node': task.node.uuid, 'ports': failures})
return ip_addresses | -321,930,604,656,494,500 | Get IP addresses for all ports in `task`.
:param task: a TaskManager instance.
:returns: List of IP addresses associated with task.ports. | ironic/dhcp/neutron.py | get_ip_addresses | overcastcloud/ironic | python | def get_ip_addresses(self, task):
'Get IP addresses for all ports in `task`.\n\n :param task: a TaskManager instance.\n :returns: List of IP addresses associated with task.ports.\n '
client = _build_client(task.context.auth_token)
failures = []
ip_addresses = []
for port in task.ports:
try:
port_ip_address = self._get_port_ip_address(task, port.uuid, client)
ip_addresses.append(port_ip_address)
except (exception.FailedToGetIPAddressOnPort, exception.InvalidIPv4Address):
failures.append(port.uuid)
if failures:
LOG.warn(_LW('Some errors were encountered on node %(node)s while retrieving IP address on the following ports: %(ports)s.'), {'node': task.node.uuid, 'ports': failures})
return ip_addresses |
def create_cleaning_ports(self, task):
"Create neutron ports for each port on task.node to boot the ramdisk.\n\n :param task: a TaskManager instance.\n :raises: InvalidParameterValue if the cleaning network is None\n :returns: a dictionary in the form {port.uuid: neutron_port['id']}\n "
if (not CONF.neutron.cleaning_network_uuid):
raise exception.InvalidParameterValue(_('Valid cleaning network UUID not provided'))
neutron_client = _build_client(task.context.auth_token)
body = {'port': {'network_id': CONF.neutron.cleaning_network_uuid, 'admin_state_up': True}}
ports = {}
for ironic_port in task.ports:
body['port']['mac_address'] = ironic_port.address
try:
port = neutron_client.create_port(body)
except neutron_client_exc.ConnectionFailed as e:
self._rollback_cleaning_ports(task)
msg = (_('Could not create cleaning port on network %(net)s from %(node)s. %(exc)s') % {'net': CONF.neutron.cleaning_network_uuid, 'node': task.node.uuid, 'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
if ((not port.get('port')) or (not port['port'].get('id'))):
self._rollback_cleaning_ports(task)
msg = (_('Failed to create cleaning ports for node %(node)s') % task.node.uuid)
LOG.error(msg)
raise exception.NodeCleaningFailure(msg)
ports[ironic_port.uuid] = port['port']['id']
return ports | 5,445,447,233,773,019,000 | Create neutron ports for each port on task.node to boot the ramdisk.
:param task: a TaskManager instance.
:raises: InvalidParameterValue if the cleaning network is None
:returns: a dictionary in the form {port.uuid: neutron_port['id']} | ironic/dhcp/neutron.py | create_cleaning_ports | overcastcloud/ironic | python | def create_cleaning_ports(self, task):
"Create neutron ports for each port on task.node to boot the ramdisk.\n\n :param task: a TaskManager instance.\n :raises: InvalidParameterValue if the cleaning network is None\n :returns: a dictionary in the form {port.uuid: neutron_port['id']}\n "
if (not CONF.neutron.cleaning_network_uuid):
raise exception.InvalidParameterValue(_('Valid cleaning network UUID not provided'))
neutron_client = _build_client(task.context.auth_token)
body = {'port': {'network_id': CONF.neutron.cleaning_network_uuid, 'admin_state_up': True}}
ports = {}
for ironic_port in task.ports:
body['port']['mac_address'] = ironic_port.address
try:
port = neutron_client.create_port(body)
except neutron_client_exc.ConnectionFailed as e:
self._rollback_cleaning_ports(task)
msg = (_('Could not create cleaning port on network %(net)s from %(node)s. %(exc)s') % {'net': CONF.neutron.cleaning_network_uuid, 'node': task.node.uuid, 'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
if ((not port.get('port')) or (not port['port'].get('id'))):
self._rollback_cleaning_ports(task)
msg = (_('Failed to create cleaning ports for node %(node)s') % task.node.uuid)
LOG.error(msg)
raise exception.NodeCleaningFailure(msg)
ports[ironic_port.uuid] = port['port']['id']
return ports |
def delete_cleaning_ports(self, task):
'Deletes the neutron port created for booting the ramdisk.\n\n :param task: a TaskManager instance.\n '
neutron_client = _build_client(task.context.auth_token)
macs = [p.address for p in task.ports]
params = {'network_id': CONF.neutron.cleaning_network_uuid}
try:
ports = neutron_client.list_ports(**params)
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not get cleaning network vif for %(node)s from Neutron, possible network issue. %(exc)s') % {'node': task.node.uuid, 'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
for neutron_port in ports.get('ports', []):
if (neutron_port.get('mac_address') in macs):
try:
neutron_client.delete_port(neutron_port.get('id'))
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not remove cleaning ports on network %(net)s from %(node)s, possible network issue. %(exc)s') % {'net': CONF.neutron.cleaning_network_uuid, 'node': task.node.uuid, 'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg) | -1,162,994,540,094,415,400 | Deletes the neutron port created for booting the ramdisk.
:param task: a TaskManager instance. | ironic/dhcp/neutron.py | delete_cleaning_ports | overcastcloud/ironic | python | def delete_cleaning_ports(self, task):
'Deletes the neutron port created for booting the ramdisk.\n\n :param task: a TaskManager instance.\n '
neutron_client = _build_client(task.context.auth_token)
macs = [p.address for p in task.ports]
params = {'network_id': CONF.neutron.cleaning_network_uuid}
try:
ports = neutron_client.list_ports(**params)
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not get cleaning network vif for %(node)s from Neutron, possible network issue. %(exc)s') % {'node': task.node.uuid, 'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
for neutron_port in ports.get('ports', []):
if (neutron_port.get('mac_address') in macs):
try:
neutron_client.delete_port(neutron_port.get('id'))
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not remove cleaning ports on network %(net)s from %(node)s, possible network issue. %(exc)s') % {'net': CONF.neutron.cleaning_network_uuid, 'node': task.node.uuid, 'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg) |
def _rollback_cleaning_ports(self, task):
'Attempts to delete any ports created by cleaning\n\n Purposefully will not raise any exceptions so error handling can\n continue.\n\n :param task: a TaskManager instance.\n '
try:
self.delete_cleaning_ports(task)
except Exception:
LOG.exception((_LE('Failed to rollback cleaning port changes for node %s') % task.node.uuid)) | -6,487,329,255,461,111,000 | Attempts to delete any ports created by cleaning
Purposefully will not raise any exceptions so error handling can
continue.
:param task: a TaskManager instance. | ironic/dhcp/neutron.py | _rollback_cleaning_ports | overcastcloud/ironic | python | def _rollback_cleaning_ports(self, task):
'Attempts to delete any ports created by cleaning\n\n Purposefully will not raise any exceptions so error handling can\n continue.\n\n :param task: a TaskManager instance.\n '
try:
self.delete_cleaning_ports(task)
except Exception:
LOG.exception((_LE('Failed to rollback cleaning port changes for node %s') % task.node.uuid)) |
def test_create_endpoint_project_association(self):
'PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Valid endpoint and project id test case.\n\n '
self.put(self.default_request_url) | -5,016,497,199,877,473,000 | PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid endpoint and project id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_create_endpoint_project_association | hashnfv/hashnfv-moon | python | def test_create_endpoint_project_association(self):
'PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Valid endpoint and project id test case.\n\n '
self.put(self.default_request_url) |
def test_create_endpoint_project_association_with_invalid_project(self):
'PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Invalid project id test case.\n\n '
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id}), expected_status=http_client.NOT_FOUND) | -3,211,361,648,378,916,400 | PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_create_endpoint_project_association_with_invalid_project | hashnfv/hashnfv-moon | python | def test_create_endpoint_project_association_with_invalid_project(self):
'PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Invalid project id test case.\n\n '
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id}), expected_status=http_client.NOT_FOUND) |
def test_create_endpoint_project_association_with_invalid_endpoint(self):
'PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Invalid endpoint id test case.\n\n '
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex}), expected_status=http_client.NOT_FOUND) | -3,711,525,024,927,123,500 | PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_create_endpoint_project_association_with_invalid_endpoint | hashnfv/hashnfv-moon | python | def test_create_endpoint_project_association_with_invalid_endpoint(self):
'PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Invalid endpoint id test case.\n\n '
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex}), expected_status=http_client.NOT_FOUND) |
def test_create_endpoint_project_association_with_unexpected_body(self):
'PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Unexpected body in request. The body should be ignored.\n\n '
self.put(self.default_request_url, body={'project_id': self.default_domain_project_id}) | 5,952,392,179,876,235,000 | PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Unexpected body in request. The body should be ignored. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_create_endpoint_project_association_with_unexpected_body | hashnfv/hashnfv-moon | python | def test_create_endpoint_project_association_with_unexpected_body(self):
'PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Unexpected body in request. The body should be ignored.\n\n '
self.put(self.default_request_url, body={'project_id': self.default_domain_project_id}) |
def test_check_endpoint_project_association(self):
'HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Valid project and endpoint id test case.\n\n '
self.put(self.default_request_url)
self.head(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.default_domain_project_id, 'endpoint_id': self.endpoint_id})) | -648,043,250,287,718,100 | HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project and endpoint id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_check_endpoint_project_association | hashnfv/hashnfv-moon | python | def test_check_endpoint_project_association(self):
'HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Valid project and endpoint id test case.\n\n '
self.put(self.default_request_url)
self.head(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.default_domain_project_id, 'endpoint_id': self.endpoint_id})) |
def test_check_endpoint_project_association_with_invalid_project(self):
'HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Invalid project id test case.\n\n '
self.put(self.default_request_url)
self.head(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id}), expected_status=http_client.NOT_FOUND) | -3,015,608,723,167,743,500 | HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_check_endpoint_project_association_with_invalid_project | hashnfv/hashnfv-moon | python | def test_check_endpoint_project_association_with_invalid_project(self):
'HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Invalid project id test case.\n\n '
self.put(self.default_request_url)
self.head(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id}), expected_status=http_client.NOT_FOUND) |
def test_check_endpoint_project_association_with_invalid_endpoint(self):
'HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Invalid endpoint id test case.\n\n '
self.put(self.default_request_url)
self.head(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex}), expected_status=http_client.NOT_FOUND) | 3,061,519,089,517,447,000 | HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_check_endpoint_project_association_with_invalid_endpoint | hashnfv/hashnfv-moon | python | def test_check_endpoint_project_association_with_invalid_endpoint(self):
'HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Invalid endpoint id test case.\n\n '
self.put(self.default_request_url)
self.head(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex}), expected_status=http_client.NOT_FOUND) |
def test_list_endpoints_associated_with_valid_project(self):
'GET /OS-EP-FILTER/projects/{project_id}/endpoints\n\n Valid project and endpoint id test case.\n\n '
self.put(self.default_request_url)
resource_url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {'project_id': self.default_domain_project_id})
r = self.get(resource_url)
self.assertValidEndpointListResponse(r, self.endpoint, resource_url=resource_url) | 9,020,954,374,531,678,000 | GET /OS-EP-FILTER/projects/{project_id}/endpoints
Valid project and endpoint id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_list_endpoints_associated_with_valid_project | hashnfv/hashnfv-moon | python | def test_list_endpoints_associated_with_valid_project(self):
'GET /OS-EP-FILTER/projects/{project_id}/endpoints\n\n Valid project and endpoint id test case.\n\n '
self.put(self.default_request_url)
resource_url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {'project_id': self.default_domain_project_id})
r = self.get(resource_url)
self.assertValidEndpointListResponse(r, self.endpoint, resource_url=resource_url) |
def test_list_endpoints_associated_with_invalid_project(self):
'GET /OS-EP-FILTER/projects/{project_id}/endpoints\n\n Invalid project id test case.\n\n '
self.put(self.default_request_url)
self.get(('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {'project_id': uuid.uuid4().hex}), expected_status=http_client.NOT_FOUND) | 668,460,018,294,357,400 | GET /OS-EP-FILTER/projects/{project_id}/endpoints
Invalid project id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_list_endpoints_associated_with_invalid_project | hashnfv/hashnfv-moon | python | def test_list_endpoints_associated_with_invalid_project(self):
'GET /OS-EP-FILTER/projects/{project_id}/endpoints\n\n Invalid project id test case.\n\n '
self.put(self.default_request_url)
self.get(('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {'project_id': uuid.uuid4().hex}), expected_status=http_client.NOT_FOUND) |
def test_list_projects_associated_with_endpoint(self):
'GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects\n\n Valid endpoint-project association test case.\n\n '
self.put(self.default_request_url)
resource_url = ('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {'endpoint_id': self.endpoint_id})
r = self.get(resource_url)
self.assertValidProjectListResponse(r, self.default_domain_project, resource_url=resource_url) | 2,197,070,725,591,229,700 | GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Valid endpoint-project association test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_list_projects_associated_with_endpoint | hashnfv/hashnfv-moon | python | def test_list_projects_associated_with_endpoint(self):
'GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects\n\n Valid endpoint-project association test case.\n\n '
self.put(self.default_request_url)
resource_url = ('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {'endpoint_id': self.endpoint_id})
r = self.get(resource_url)
self.assertValidProjectListResponse(r, self.default_domain_project, resource_url=resource_url) |
def test_list_projects_with_no_endpoint_project_association(self):
'GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects\n\n Valid endpoint id but no endpoint-project associations test case.\n\n '
r = self.get(('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {'endpoint_id': self.endpoint_id}))
self.assertValidProjectListResponse(r, expected_length=0) | 8,078,478,500,204,905,000 | GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Valid endpoint id but no endpoint-project associations test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_list_projects_with_no_endpoint_project_association | hashnfv/hashnfv-moon | python | def test_list_projects_with_no_endpoint_project_association(self):
'GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects\n\n Valid endpoint id but no endpoint-project associations test case.\n\n '
r = self.get(('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {'endpoint_id': self.endpoint_id}))
self.assertValidProjectListResponse(r, expected_length=0) |
def test_list_projects_associated_with_invalid_endpoint(self):
'GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects\n\n Invalid endpoint id test case.\n\n '
self.get(('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {'endpoint_id': uuid.uuid4().hex}), expected_status=http_client.NOT_FOUND) | -3,965,067,759,983,694,300 | GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Invalid endpoint id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_list_projects_associated_with_invalid_endpoint | hashnfv/hashnfv-moon | python | def test_list_projects_associated_with_invalid_endpoint(self):
'GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects\n\n Invalid endpoint id test case.\n\n '
self.get(('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {'endpoint_id': uuid.uuid4().hex}), expected_status=http_client.NOT_FOUND) |
def test_remove_endpoint_project_association(self):
'DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Valid project id and endpoint id test case.\n\n '
self.put(self.default_request_url)
self.delete(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.default_domain_project_id, 'endpoint_id': self.endpoint_id})) | 5,680,347,250,701,612,000 | DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project id and endpoint id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_remove_endpoint_project_association | hashnfv/hashnfv-moon | python | def test_remove_endpoint_project_association(self):
'DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Valid project id and endpoint id test case.\n\n '
self.put(self.default_request_url)
self.delete(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.default_domain_project_id, 'endpoint_id': self.endpoint_id})) |
def test_remove_endpoint_project_association_with_invalid_project(self):
'DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Invalid project id test case.\n\n '
self.put(self.default_request_url)
self.delete(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id}), expected_status=http_client.NOT_FOUND) | -8,052,567,714,237,409,000 | DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_remove_endpoint_project_association_with_invalid_project | hashnfv/hashnfv-moon | python | def test_remove_endpoint_project_association_with_invalid_project(self):
'DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Invalid project id test case.\n\n '
self.put(self.default_request_url)
self.delete(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id}), expected_status=http_client.NOT_FOUND) |
def test_remove_endpoint_project_association_with_invalid_endpoint(self):
'DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Invalid endpoint id test case.\n\n '
self.put(self.default_request_url)
self.delete(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex}), expected_status=http_client.NOT_FOUND) | -7,830,655,444,837,404,000 | DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_remove_endpoint_project_association_with_invalid_endpoint | hashnfv/hashnfv-moon | python | def test_remove_endpoint_project_association_with_invalid_endpoint(self):
'DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}\n\n Invalid endpoint id test case.\n\n '
self.put(self.default_request_url)
self.delete(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex}), expected_status=http_client.NOT_FOUND) |
def test_project_scoped_token_using_endpoint_filter(self):
'Verify endpoints from project scoped token filtered.'
ref = unit.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
self.put(('/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {'user_id': self.user['id'], 'project_id': project['id'], 'role_id': self.role['id']}))
body = {'user': {'default_project_id': project['id']}}
r = self.patch(('/users/%(user_id)s' % {'user_id': self.user['id']}), body=body)
self.assertValidUserResponse(r)
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': project['id'], 'endpoint_id': self.endpoint_id}))
auth_data = self.build_authentication_request(user_id=self.user['id'], password=self.user['password'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1)
self.assertEqual(project['id'], r.result['token']['project']['id']) | 2,905,780,249,212,762,600 | Verify endpoints from project scoped token filtered. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_project_scoped_token_using_endpoint_filter | hashnfv/hashnfv-moon | python | def test_project_scoped_token_using_endpoint_filter(self):
ref = unit.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
self.put(('/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {'user_id': self.user['id'], 'project_id': project['id'], 'role_id': self.role['id']}))
body = {'user': {'default_project_id': project['id']}}
r = self.patch(('/users/%(user_id)s' % {'user_id': self.user['id']}), body=body)
self.assertValidUserResponse(r)
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': project['id'], 'endpoint_id': self.endpoint_id}))
auth_data = self.build_authentication_request(user_id=self.user['id'], password=self.user['password'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1)
self.assertEqual(project['id'], r.result['token']['project']['id']) |
def test_default_scoped_token_using_endpoint_filter(self):
'Verify endpoints from default scoped token filtered.'
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}))
auth_data = self.build_authentication_request(user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1)
self.assertEqual(self.project['id'], r.result['token']['project']['id'])
self.assertIn('name', r.result['token']['catalog'][0])
endpoint = r.result['token']['catalog'][0]['endpoints'][0]
self.assertIn('region', endpoint)
self.assertIn('region_id', endpoint)
self.assertEqual(endpoint['region'], endpoint['region_id']) | 2,149,789,995,254,649,900 | Verify endpoints from default scoped token filtered. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_default_scoped_token_using_endpoint_filter | hashnfv/hashnfv-moon | python | def test_default_scoped_token_using_endpoint_filter(self):
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}))
auth_data = self.build_authentication_request(user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1)
self.assertEqual(self.project['id'], r.result['token']['project']['id'])
self.assertIn('name', r.result['token']['catalog'][0])
endpoint = r.result['token']['catalog'][0]['endpoints'][0]
self.assertIn('region', endpoint)
self.assertIn('region_id', endpoint)
self.assertEqual(endpoint['region'], endpoint['region_id']) |
def test_scoped_token_with_no_catalog_using_endpoint_filter(self):
'Verify endpoint filter does not affect no catalog.'
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}))
auth_data = self.build_authentication_request(user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(r, require_catalog=False)
self.assertEqual(self.project['id'], r.result['token']['project']['id']) | 5,140,142,324,192,086,000 | Verify endpoint filter does not affect no catalog. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_scoped_token_with_no_catalog_using_endpoint_filter | hashnfv/hashnfv-moon | python | def test_scoped_token_with_no_catalog_using_endpoint_filter(self):
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}))
auth_data = self.build_authentication_request(user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(r, require_catalog=False)
self.assertEqual(self.project['id'], r.result['token']['project']['id']) |
def test_invalid_endpoint_project_association(self):
'Verify an invalid endpoint-project association is handled.'
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}))
endpoint_id2 = uuid.uuid4().hex
endpoint2 = unit.new_endpoint_ref(service_id=self.service_id, region_id=self.region_id, interface='public', id=endpoint_id2)
self.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy())
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': endpoint_id2}))
self.catalog_api.delete_endpoint(endpoint_id2)
auth_data = self.build_authentication_request(user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1)
self.assertEqual(self.project['id'], r.result['token']['project']['id']) | -1,311,745,232,435,130,000 | Verify an invalid endpoint-project association is handled. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_invalid_endpoint_project_association | hashnfv/hashnfv-moon | python | def test_invalid_endpoint_project_association(self):
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}))
endpoint_id2 = uuid.uuid4().hex
endpoint2 = unit.new_endpoint_ref(service_id=self.service_id, region_id=self.region_id, interface='public', id=endpoint_id2)
self.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy())
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': endpoint_id2}))
self.catalog_api.delete_endpoint(endpoint_id2)
auth_data = self.build_authentication_request(user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1)
self.assertEqual(self.project['id'], r.result['token']['project']['id']) |
def test_disabled_endpoint(self):
'Test that a disabled endpoint is handled.'
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}))
disabled_endpoint_ref = copy.copy(self.endpoint)
disabled_endpoint_id = uuid.uuid4().hex
disabled_endpoint_ref.update({'id': disabled_endpoint_id, 'enabled': False, 'interface': 'internal'})
self.catalog_api.create_endpoint(disabled_endpoint_id, disabled_endpoint_ref)
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': disabled_endpoint_id}))
auth_data = self.build_authentication_request(user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
endpoints = r.result['token']['catalog'][0]['endpoints']
endpoint_ids = [ep['id'] for ep in endpoints]
self.assertEqual([self.endpoint_id], endpoint_ids) | 6,247,031,904,212,547,000 | Test that a disabled endpoint is handled. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_disabled_endpoint | hashnfv/hashnfv-moon | python | def test_disabled_endpoint(self):
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': self.endpoint_id}))
disabled_endpoint_ref = copy.copy(self.endpoint)
disabled_endpoint_id = uuid.uuid4().hex
disabled_endpoint_ref.update({'id': disabled_endpoint_id, 'enabled': False, 'interface': 'internal'})
self.catalog_api.create_endpoint(disabled_endpoint_id, disabled_endpoint_ref)
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': disabled_endpoint_id}))
auth_data = self.build_authentication_request(user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
endpoints = r.result['token']['catalog'][0]['endpoints']
endpoint_ids = [ep['id'] for ep in endpoints]
self.assertEqual([self.endpoint_id], endpoint_ids) |
def test_create_endpoint_group(self):
'POST /OS-EP-FILTER/endpoint_groups\n\n Valid endpoint group test case.\n\n '
r = self.post(self.DEFAULT_ENDPOINT_GROUP_URL, body=self.DEFAULT_ENDPOINT_GROUP_BODY)
expected_filters = self.DEFAULT_ENDPOINT_GROUP_BODY['endpoint_group']['filters']
expected_name = self.DEFAULT_ENDPOINT_GROUP_BODY['endpoint_group']['name']
self.assertEqual(expected_filters, r.result['endpoint_group']['filters'])
self.assertEqual(expected_name, r.result['endpoint_group']['name'])
self.assertThat(r.result['endpoint_group']['links']['self'], matchers.EndsWith(('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': r.result['endpoint_group']['id']}))) | -6,268,002,636,270,793,000 | POST /OS-EP-FILTER/endpoint_groups
Valid endpoint group test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_create_endpoint_group | hashnfv/hashnfv-moon | python | def test_create_endpoint_group(self):
'POST /OS-EP-FILTER/endpoint_groups\n\n Valid endpoint group test case.\n\n '
r = self.post(self.DEFAULT_ENDPOINT_GROUP_URL, body=self.DEFAULT_ENDPOINT_GROUP_BODY)
expected_filters = self.DEFAULT_ENDPOINT_GROUP_BODY['endpoint_group']['filters']
expected_name = self.DEFAULT_ENDPOINT_GROUP_BODY['endpoint_group']['name']
self.assertEqual(expected_filters, r.result['endpoint_group']['filters'])
self.assertEqual(expected_name, r.result['endpoint_group']['name'])
self.assertThat(r.result['endpoint_group']['links']['self'], matchers.EndsWith(('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': r.result['endpoint_group']['id']}))) |
def test_create_invalid_endpoint_group(self):
'POST /OS-EP-FILTER/endpoint_groups\n\n Invalid endpoint group creation test case.\n\n '
invalid_body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
invalid_body['endpoint_group']['filters'] = {'foobar': 'admin'}
self.post(self.DEFAULT_ENDPOINT_GROUP_URL, body=invalid_body, expected_status=http_client.BAD_REQUEST) | 9,015,613,689,413,443,000 | POST /OS-EP-FILTER/endpoint_groups
Invalid endpoint group creation test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_create_invalid_endpoint_group | hashnfv/hashnfv-moon | python | def test_create_invalid_endpoint_group(self):
'POST /OS-EP-FILTER/endpoint_groups\n\n Invalid endpoint group creation test case.\n\n '
invalid_body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
invalid_body['endpoint_group']['filters'] = {'foobar': 'admin'}
self.post(self.DEFAULT_ENDPOINT_GROUP_URL, body=invalid_body, expected_status=http_client.BAD_REQUEST) |
def test_get_endpoint_group(self):
'GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Valid endpoint group test case.\n\n '
response = self.post(self.DEFAULT_ENDPOINT_GROUP_URL, body=self.DEFAULT_ENDPOINT_GROUP_BODY)
endpoint_group_id = response.result['endpoint_group']['id']
endpoint_group_filters = response.result['endpoint_group']['filters']
endpoint_group_name = response.result['endpoint_group']['name']
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.get(url)
self.assertEqual(endpoint_group_id, response.result['endpoint_group']['id'])
self.assertEqual(endpoint_group_filters, response.result['endpoint_group']['filters'])
self.assertEqual(endpoint_group_name, response.result['endpoint_group']['name'])
self.assertThat(response.result['endpoint_group']['links']['self'], matchers.EndsWith(url)) | -6,138,546,730,807,783,000 | GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_get_endpoint_group | hashnfv/hashnfv-moon | python | def test_get_endpoint_group(self):
'GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Valid endpoint group test case.\n\n '
response = self.post(self.DEFAULT_ENDPOINT_GROUP_URL, body=self.DEFAULT_ENDPOINT_GROUP_BODY)
endpoint_group_id = response.result['endpoint_group']['id']
endpoint_group_filters = response.result['endpoint_group']['filters']
endpoint_group_name = response.result['endpoint_group']['name']
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.get(url)
self.assertEqual(endpoint_group_id, response.result['endpoint_group']['id'])
self.assertEqual(endpoint_group_filters, response.result['endpoint_group']['filters'])
self.assertEqual(endpoint_group_name, response.result['endpoint_group']['name'])
self.assertThat(response.result['endpoint_group']['links']['self'], matchers.EndsWith(url)) |
def test_get_invalid_endpoint_group(self):
'GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Invalid endpoint group test case.\n\n '
endpoint_group_id = 'foobar'
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.get(url, expected_status=http_client.NOT_FOUND) | -640,298,996,416,632,700 | GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_get_invalid_endpoint_group | hashnfv/hashnfv-moon | python | def test_get_invalid_endpoint_group(self):
'GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Invalid endpoint group test case.\n\n '
endpoint_group_id = 'foobar'
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.get(url, expected_status=http_client.NOT_FOUND) |
def test_check_endpoint_group(self):
'HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}\n\n Valid endpoint_group_id test case.\n\n '
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.head(url, expected_status=http_client.OK) | -3,382,640,610,027,043,000 | HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
Valid endpoint_group_id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_check_endpoint_group | hashnfv/hashnfv-moon | python | def test_check_endpoint_group(self):
'HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}\n\n Valid endpoint_group_id test case.\n\n '
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.head(url, expected_status=http_client.OK) |
def test_check_invalid_endpoint_group(self):
'HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}\n\n Invalid endpoint_group_id test case.\n\n '
endpoint_group_id = 'foobar'
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.head(url, expected_status=http_client.NOT_FOUND) | -6,621,008,291,509,379,000 | HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
Invalid endpoint_group_id test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_check_invalid_endpoint_group | hashnfv/hashnfv-moon | python | def test_check_invalid_endpoint_group(self):
'HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}\n\n Invalid endpoint_group_id test case.\n\n '
endpoint_group_id = 'foobar'
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.head(url, expected_status=http_client.NOT_FOUND) |
def test_patch_endpoint_group(self):
'PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Valid endpoint group patch test case.\n\n '
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'region_id': 'UK'}
body['endpoint_group']['name'] = 'patch_test'
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
r = self.patch(url, body=body)
self.assertEqual(endpoint_group_id, r.result['endpoint_group']['id'])
self.assertEqual(body['endpoint_group']['filters'], r.result['endpoint_group']['filters'])
self.assertThat(r.result['endpoint_group']['links']['self'], matchers.EndsWith(url)) | 8,589,211,646,248,130,000 | PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group patch test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_patch_endpoint_group | hashnfv/hashnfv-moon | python | def test_patch_endpoint_group(self):
'PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Valid endpoint group patch test case.\n\n '
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'region_id': 'UK'}
body['endpoint_group']['name'] = 'patch_test'
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
r = self.patch(url, body=body)
self.assertEqual(endpoint_group_id, r.result['endpoint_group']['id'])
self.assertEqual(body['endpoint_group']['filters'], r.result['endpoint_group']['filters'])
self.assertThat(r.result['endpoint_group']['links']['self'], matchers.EndsWith(url)) |
def test_patch_nonexistent_endpoint_group(self):
'PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Invalid endpoint group patch test case.\n\n '
body = {'endpoint_group': {'name': 'patch_test'}}
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': 'ABC'})
self.patch(url, body=body, expected_status=http_client.NOT_FOUND) | -8,862,230,324,248,649,000 | PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group patch test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_patch_nonexistent_endpoint_group | hashnfv/hashnfv-moon | python | def test_patch_nonexistent_endpoint_group(self):
'PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Invalid endpoint group patch test case.\n\n '
body = {'endpoint_group': {'name': 'patch_test'}}
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': 'ABC'})
self.patch(url, body=body, expected_status=http_client.NOT_FOUND) |
def test_patch_invalid_endpoint_group(self):
'PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Valid endpoint group patch test case.\n\n '
body = {'endpoint_group': {'description': 'endpoint group description', 'filters': {'region': 'UK'}, 'name': 'patch_test'}}
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.patch(url, body=body, expected_status=http_client.BAD_REQUEST)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
r = self.get(url)
del r.result['endpoint_group']['id']
del r.result['endpoint_group']['links']
self.assertDictEqual(self.DEFAULT_ENDPOINT_GROUP_BODY, r.result) | -4,709,729,750,761,354,000 | PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group patch test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_patch_invalid_endpoint_group | hashnfv/hashnfv-moon | python | def test_patch_invalid_endpoint_group(self):
'PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Valid endpoint group patch test case.\n\n '
body = {'endpoint_group': {'description': 'endpoint group description', 'filters': {'region': 'UK'}, 'name': 'patch_test'}}
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.patch(url, body=body, expected_status=http_client.BAD_REQUEST)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
r = self.get(url)
del r.result['endpoint_group']['id']
del r.result['endpoint_group']['links']
self.assertDictEqual(self.DEFAULT_ENDPOINT_GROUP_BODY, r.result) |
def test_delete_endpoint_group(self):
'GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Valid endpoint group test case.\n\n '
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.delete(url)
self.get(url, expected_status=http_client.NOT_FOUND) | -3,760,673,105,546,269,000 | GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_delete_endpoint_group | hashnfv/hashnfv-moon | python | def test_delete_endpoint_group(self):
'GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Valid endpoint group test case.\n\n '
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.delete(url)
self.get(url, expected_status=http_client.NOT_FOUND) |
def test_delete_invalid_endpoint_group(self):
'GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Invalid endpoint group test case.\n\n '
endpoint_group_id = 'foobar'
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.delete(url, expected_status=http_client.NOT_FOUND) | 2,402,647,937,678,251,000 | GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_delete_invalid_endpoint_group | hashnfv/hashnfv-moon | python | def test_delete_invalid_endpoint_group(self):
'GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}\n\n Invalid endpoint group test case.\n\n '
endpoint_group_id = 'foobar'
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.delete(url, expected_status=http_client.NOT_FOUND) |
def test_add_endpoint_group_to_project(self):
'Create a valid endpoint group and project association.'
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
self._create_endpoint_group_project_association(endpoint_group_id, self.project_id) | 89,821,026,172,905,740 | Create a valid endpoint group and project association. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_add_endpoint_group_to_project | hashnfv/hashnfv-moon | python | def test_add_endpoint_group_to_project(self):
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
self._create_endpoint_group_project_association(endpoint_group_id, self.project_id) |
def test_add_endpoint_group_to_project_with_invalid_project_id(self):
'Create an invalid endpoint group and project association.'
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(endpoint_group_id, project_id)
self.put(url, expected_status=http_client.NOT_FOUND) | 3,382,342,094,785,825,300 | Create an invalid endpoint group and project association. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_add_endpoint_group_to_project_with_invalid_project_id | hashnfv/hashnfv-moon | python | def test_add_endpoint_group_to_project_with_invalid_project_id(self):
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(endpoint_group_id, project_id)
self.put(url, expected_status=http_client.NOT_FOUND) |
def test_get_endpoint_group_in_project(self):
'Test retrieving project endpoint group association.'
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = self._get_project_endpoint_group_url(endpoint_group_id, self.project_id)
self.put(url)
response = self.get(url)
self.assertEqual(endpoint_group_id, response.result['project_endpoint_group']['endpoint_group_id'])
self.assertEqual(self.project_id, response.result['project_endpoint_group']['project_id']) | 1,895,786,076,841,884,000 | Test retrieving project endpoint group association. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_get_endpoint_group_in_project | hashnfv/hashnfv-moon | python | def test_get_endpoint_group_in_project(self):
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = self._get_project_endpoint_group_url(endpoint_group_id, self.project_id)
self.put(url)
response = self.get(url)
self.assertEqual(endpoint_group_id, response.result['project_endpoint_group']['endpoint_group_id'])
self.assertEqual(self.project_id, response.result['project_endpoint_group']['project_id']) |
def test_get_invalid_endpoint_group_in_project(self):
'Test retrieving project endpoint group association.'
endpoint_group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(endpoint_group_id, project_id)
self.get(url, expected_status=http_client.NOT_FOUND) | 1,386,172,826,415,082,200 | Test retrieving project endpoint group association. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_get_invalid_endpoint_group_in_project | hashnfv/hashnfv-moon | python | def test_get_invalid_endpoint_group_in_project(self):
endpoint_group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(endpoint_group_id, project_id)
self.get(url, expected_status=http_client.NOT_FOUND) |
def test_list_endpoint_groups_in_project(self):
'GET /OS-EP-FILTER/projects/{project_id}/endpoint_groups.'
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = self._get_project_endpoint_group_url(endpoint_group_id, self.project_id)
self.put(url)
url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' % {'project_id': self.project_id})
response = self.get(url)
self.assertEqual(endpoint_group_id, response.result['endpoint_groups'][0]['id']) | -244,413,357,852,432,580 | GET /OS-EP-FILTER/projects/{project_id}/endpoint_groups. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_list_endpoint_groups_in_project | hashnfv/hashnfv-moon | python | def test_list_endpoint_groups_in_project(self):
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = self._get_project_endpoint_group_url(endpoint_group_id, self.project_id)
self.put(url)
url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' % {'project_id': self.project_id})
response = self.get(url)
self.assertEqual(endpoint_group_id, response.result['endpoint_groups'][0]['id']) |
def test_list_endpoint_groups_in_invalid_project(self):
'Test retrieving from invalid project.'
project_id = uuid.uuid4().hex
url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' % {'project_id': project_id})
self.get(url, expected_status=http_client.NOT_FOUND) | 7,433,993,406,476,337,000 | Test retrieving from invalid project. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_list_endpoint_groups_in_invalid_project | hashnfv/hashnfv-moon | python | def test_list_endpoint_groups_in_invalid_project(self):
project_id = uuid.uuid4().hex
url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' % {'project_id': project_id})
self.get(url, expected_status=http_client.NOT_FOUND) |
def test_empty_endpoint_groups_in_project(self):
'Test when no endpoint groups associated with the project.'
url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' % {'project_id': self.project_id})
response = self.get(url)
self.assertEqual(0, len(response.result['endpoint_groups'])) | -4,744,587,521,110,524,000 | Test when no endpoint groups associated with the project. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_empty_endpoint_groups_in_project | hashnfv/hashnfv-moon | python | def test_empty_endpoint_groups_in_project(self):
url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' % {'project_id': self.project_id})
response = self.get(url)
self.assertEqual(0, len(response.result['endpoint_groups'])) |
def test_check_endpoint_group_to_project(self):
'Test HEAD with a valid endpoint group and project association.'
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
self._create_endpoint_group_project_association(endpoint_group_id, self.project_id)
url = self._get_project_endpoint_group_url(endpoint_group_id, self.project_id)
self.head(url, expected_status=http_client.OK) | 1,134,971,295,154,629,400 | Test HEAD with a valid endpoint group and project association. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_check_endpoint_group_to_project | hashnfv/hashnfv-moon | python | def test_check_endpoint_group_to_project(self):
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
self._create_endpoint_group_project_association(endpoint_group_id, self.project_id)
url = self._get_project_endpoint_group_url(endpoint_group_id, self.project_id)
self.head(url, expected_status=http_client.OK) |
def test_check_endpoint_group_to_project_with_invalid_project_id(self):
'Test HEAD with an invalid endpoint group and project association.'
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = self._get_project_endpoint_group_url(endpoint_group_id, self.project_id)
self.put(url)
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(endpoint_group_id, project_id)
self.head(url, expected_status=http_client.NOT_FOUND) | 7,077,571,031,613,298,000 | Test HEAD with an invalid endpoint group and project association. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_check_endpoint_group_to_project_with_invalid_project_id | hashnfv/hashnfv-moon | python | def test_check_endpoint_group_to_project_with_invalid_project_id(self):
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = self._get_project_endpoint_group_url(endpoint_group_id, self.project_id)
self.put(url)
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(endpoint_group_id, project_id)
self.head(url, expected_status=http_client.NOT_FOUND) |
def test_list_endpoint_groups(self):
'GET /OS-EP-FILTER/endpoint_groups.'
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups'
r = self.get(url)
self.assertNotEmpty(r.result['endpoint_groups'])
self.assertEqual(endpoint_group_id, r.result['endpoint_groups'][0].get('id')) | -4,560,750,767,999,101,400 | GET /OS-EP-FILTER/endpoint_groups. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_list_endpoint_groups | hashnfv/hashnfv-moon | python | def test_list_endpoint_groups(self):
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups'
r = self.get(url)
self.assertNotEmpty(r.result['endpoint_groups'])
self.assertEqual(endpoint_group_id, r.result['endpoint_groups'][0].get('id')) |
def test_list_projects_associated_with_endpoint_group(self):
'GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects\n\n Valid endpoint group test case.\n\n '
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
self._create_endpoint_group_project_association(endpoint_group_id, self.project_id)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s/projects' % {'endpoint_group_id': endpoint_group_id})
self.get(url) | 4,753,046,475,020,127,000 | GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects
Valid endpoint group test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_list_projects_associated_with_endpoint_group | hashnfv/hashnfv-moon | python | def test_list_projects_associated_with_endpoint_group(self):
'GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects\n\n Valid endpoint group test case.\n\n '
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
self._create_endpoint_group_project_association(endpoint_group_id, self.project_id)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s/projects' % {'endpoint_group_id': endpoint_group_id})
self.get(url) |
def test_list_endpoints_associated_with_endpoint_group(self):
'GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/endpoints\n\n Valid endpoint group test case.\n\n '
service_ref = unit.new_service_ref()
response = self.post('/services', body={'service': service_ref})
service_id = response.result['service']['id']
endpoint_ref = unit.new_endpoint_ref(service_id=service_id, interface='public', region_id=self.region_id)
response = self.post('/endpoints', body={'endpoint': endpoint_ref})
endpoint_id = response.result['endpoint']['id']
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'service_id': service_id}
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, body)
self._create_endpoint_group_project_association(endpoint_group_id, self.project_id)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s/endpoints' % {'endpoint_group_id': endpoint_group_id})
r = self.get(url)
self.assertNotEmpty(r.result['endpoints'])
self.assertEqual(endpoint_id, r.result['endpoints'][0].get('id')) | 576,142,743,041,128,800 | GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/endpoints
Valid endpoint group test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_list_endpoints_associated_with_endpoint_group | hashnfv/hashnfv-moon | python | def test_list_endpoints_associated_with_endpoint_group(self):
'GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/endpoints\n\n Valid endpoint group test case.\n\n '
service_ref = unit.new_service_ref()
response = self.post('/services', body={'service': service_ref})
service_id = response.result['service']['id']
endpoint_ref = unit.new_endpoint_ref(service_id=service_id, interface='public', region_id=self.region_id)
response = self.post('/endpoints', body={'endpoint': endpoint_ref})
endpoint_id = response.result['endpoint']['id']
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'service_id': service_id}
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, body)
self._create_endpoint_group_project_association(endpoint_group_id, self.project_id)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s/endpoints' % {'endpoint_group_id': endpoint_group_id})
r = self.get(url)
self.assertNotEmpty(r.result['endpoints'])
self.assertEqual(endpoint_id, r.result['endpoints'][0].get('id')) |
def test_list_endpoints_associated_with_project_endpoint_group(self):
'GET /OS-EP-FILTER/projects/{project_id}/endpoints\n\n Valid project, endpoint id, and endpoint group test case.\n\n '
service_ref = unit.new_service_ref()
response = self.post('/services', body={'service': service_ref})
service_id2 = response.result['service']['id']
self._create_endpoint_and_associations(self.default_domain_project_id, service_id2)
self._create_endpoint_and_associations(self.default_domain_project_id)
self.put(self.default_request_url)
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'service_id': service_id2}
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, body)
self._create_endpoint_group_project_association(endpoint_group_id, self.default_domain_project_id)
endpoints_url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {'project_id': self.default_domain_project_id})
r = self.get(endpoints_url)
endpoints = self.assertValidEndpointListResponse(r)
self.assertEqual(2, len(endpoints))
user_id = uuid.uuid4().hex
catalog_list = self.catalog_api.get_v3_catalog(user_id, self.default_domain_project_id)
self.assertEqual(2, len(catalog_list))
url = self._get_project_endpoint_group_url(endpoint_group_id, self.default_domain_project_id)
self.delete(url)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.delete(url)
r = self.get(endpoints_url)
endpoints = self.assertValidEndpointListResponse(r)
self.assertEqual(1, len(endpoints))
catalog_list = self.catalog_api.get_v3_catalog(user_id, self.default_domain_project_id)
self.assertEqual(1, len(catalog_list)) | 5,900,104,677,645,789,000 | GET /OS-EP-FILTER/projects/{project_id}/endpoints
Valid project, endpoint id, and endpoint group test case. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | test_list_endpoints_associated_with_project_endpoint_group | hashnfv/hashnfv-moon | python | def test_list_endpoints_associated_with_project_endpoint_group(self):
'GET /OS-EP-FILTER/projects/{project_id}/endpoints\n\n Valid project, endpoint id, and endpoint group test case.\n\n '
service_ref = unit.new_service_ref()
response = self.post('/services', body={'service': service_ref})
service_id2 = response.result['service']['id']
self._create_endpoint_and_associations(self.default_domain_project_id, service_id2)
self._create_endpoint_and_associations(self.default_domain_project_id)
self.put(self.default_request_url)
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'service_id': service_id2}
endpoint_group_id = self._create_valid_endpoint_group(self.DEFAULT_ENDPOINT_GROUP_URL, body)
self._create_endpoint_group_project_association(endpoint_group_id, self.default_domain_project_id)
endpoints_url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {'project_id': self.default_domain_project_id})
r = self.get(endpoints_url)
endpoints = self.assertValidEndpointListResponse(r)
self.assertEqual(2, len(endpoints))
user_id = uuid.uuid4().hex
catalog_list = self.catalog_api.get_v3_catalog(user_id, self.default_domain_project_id)
self.assertEqual(2, len(catalog_list))
url = self._get_project_endpoint_group_url(endpoint_group_id, self.default_domain_project_id)
self.delete(url)
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id})
self.delete(url)
r = self.get(endpoints_url)
endpoints = self.assertValidEndpointListResponse(r)
self.assertEqual(1, len(endpoints))
catalog_list = self.catalog_api.get_v3_catalog(user_id, self.default_domain_project_id)
self.assertEqual(1, len(catalog_list)) |
def _create_endpoint_and_associations(self, project_id, service_id=None):
'Creates an endpoint associated with service and project.'
if (not service_id):
service_ref = unit.new_service_ref()
response = self.post('/services', body={'service': service_ref})
service_id = response.result['service']['id']
endpoint_ref = unit.new_endpoint_ref(service_id=service_id, interface='public', region_id=self.region_id)
response = self.post('/endpoints', body={'endpoint': endpoint_ref})
endpoint = response.result['endpoint']
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': endpoint['id']}))
return endpoint | 6,360,263,848,377,317,000 | Creates an endpoint associated with service and project. | keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | _create_endpoint_and_associations | hashnfv/hashnfv-moon | python | def _create_endpoint_and_associations(self, project_id, service_id=None):
if (not service_id):
service_ref = unit.new_service_ref()
response = self.post('/services', body={'service': service_ref})
service_id = response.result['service']['id']
endpoint_ref = unit.new_endpoint_ref(service_id=service_id, interface='public', region_id=self.region_id)
response = self.post('/endpoints', body={'endpoint': endpoint_ref})
endpoint = response.result['endpoint']
self.put(('/OS-EP-FILTER/projects/%(project_id)s/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': endpoint['id']}))
return endpoint |
def LysinimicrobiumLuteum(directed: bool=False, preprocess: bool=True, load_nodes: bool=True, verbose: int=2, cache: bool=True, cache_path: str='graphs/string', version: str='links.v11.5', **additional_graph_kwargs: Dict) -> Graph:
'Return new instance of the Lysinimicrobium luteum graph.\n\n The graph is automatically retrieved from the STRING repository.\t\n\n Parameters\n -------------------\n directed: bool = False\n Wether to load the graph as directed or undirected.\n By default false.\n preprocess: bool = True\n Whether to preprocess the graph to be loaded in \n optimal time and memory.\n load_nodes: bool = True,\n Whether to load the nodes vocabulary or treat the nodes\n simply as a numeric range.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache: bool = True\n Whether to use cache, i.e. download files only once\n and preprocess them only once.\n cache_path: str = "graphs"\n Where to store the downloaded graphs.\n version: str = "links.v11.5"\n The version of the graph to retrieve.\t\t\n\tThe available versions are:\n\t\t\t- homology.v11.5\n\t\t\t- physical.links.v11.5\n\t\t\t- links.v11.5\n additional_graph_kwargs: Dict\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Lysinimicrobium luteum graph.\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t```bib\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t```\n '
return AutomaticallyRetrievedGraph(graph_name='LysinimicrobiumLuteum', repository='string', version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)() | -8,324,764,101,466,745,000 | Return new instance of the Lysinimicrobium luteum graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Lysinimicrobium luteum graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
``` | bindings/python/ensmallen/datasets/string/lysinimicrobiumluteum.py | LysinimicrobiumLuteum | AnacletoLAB/ensmallen | python | def LysinimicrobiumLuteum(directed: bool=False, preprocess: bool=True, load_nodes: bool=True, verbose: int=2, cache: bool=True, cache_path: str='graphs/string', version: str='links.v11.5', **additional_graph_kwargs: Dict) -> Graph:
'Return new instance of the Lysinimicrobium luteum graph.\n\n The graph is automatically retrieved from the STRING repository.\t\n\n Parameters\n -------------------\n directed: bool = False\n Wether to load the graph as directed or undirected.\n By default false.\n preprocess: bool = True\n Whether to preprocess the graph to be loaded in \n optimal time and memory.\n load_nodes: bool = True,\n Whether to load the nodes vocabulary or treat the nodes\n simply as a numeric range.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache: bool = True\n Whether to use cache, i.e. download files only once\n and preprocess them only once.\n cache_path: str = "graphs"\n Where to store the downloaded graphs.\n version: str = "links.v11.5"\n The version of the graph to retrieve.\t\t\n\tThe available versions are:\n\t\t\t- homology.v11.5\n\t\t\t- physical.links.v11.5\n\t\t\t- links.v11.5\n additional_graph_kwargs: Dict\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Lysinimicrobium luteum graph.\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t```bib\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t```\n '
return AutomaticallyRetrievedGraph(graph_name='LysinimicrobiumLuteum', repository='string', version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)() |
def length(head) -> int:
'\n The method length, which accepts a linked list\n (head), and returns the length of the list.\n :param head:\n :return:\n '
i = 0
if (head is None):
return 0
while (head.next is not None):
head = head.next
i += 1
return (i + 1) | -2,736,852,956,723,450,000 | The method length, which accepts a linked list
(head), and returns the length of the list.
:param head:
:return: | kyu_7/fun_with_lists_length/length.py | length | iKostanOrg/codewars | python | def length(head) -> int:
'\n The method length, which accepts a linked list\n (head), and returns the length of the list.\n :param head:\n :return:\n '
i = 0
if (head is None):
return 0
while (head.next is not None):
head = head.next
i += 1
return (i + 1) |
def mzcompose_location(mz_root: str) -> pathlib.Path:
'Return the absolute path to mzcompose.\n\n MZ_ROOT is expected to be set via pyactivate.\n '
return pathlib.Path(mz_root, 'bin', 'mzcompose') | 2,074,487,505,857,838,300 | Return the absolute path to mzcompose.
MZ_ROOT is expected to be set via pyactivate. | misc/python/materialize/cli/mzbench.py | mzcompose_location | antifuchs/materialize | python | def mzcompose_location(mz_root: str) -> pathlib.Path:
'Return the absolute path to mzcompose.\n\n MZ_ROOT is expected to be set via pyactivate.\n '
return pathlib.Path(mz_root, 'bin', 'mzcompose') |
def enumerate_cpu_counts() -> typing.List[int]:
'This program prints the number of CPU counts to benchmark on this machine.\n\n We remove some percentage of CPU cores off the top for system / background processing. With\n the CPUs that remain, we generate a list of evenly spaced worker counts. The list is limited\n by the number of trials desired. This is meant to help us explore the number of CPUs that\n should be dedicated to MZ_WORKERS, not as a prescription for the correct values to choose.\n\n On a Macbook with 8 cores, this will return [6, 4, 3, 2].\n\n On a 56 core machine, this returns [24, 18, 12, 6].\n\n On a 96 core machine, this returns [41, 30, 20, 10].\n '
max_cpus = round((multiprocessing.cpu_count() * 0.425))
num_trials = 4
worker_counts = [round(((i * max_cpus) / num_trials)) for i in range(num_trials, 0, (- 1))]
return list(reversed(sorted(set(worker_counts)))) | 5,399,508,490,001,828,000 | This program prints the number of CPU counts to benchmark on this machine.
We remove some percentage of CPU cores off the top for system / background processing. With
the CPUs that remain, we generate a list of evenly spaced worker counts. The list is limited
by the number of trials desired. This is meant to help us explore the number of CPUs that
should be dedicated to MZ_WORKERS, not as a prescription for the correct values to choose.
On a Macbook with 8 cores, this will return [6, 4, 3, 2].
On a 56 core machine, this returns [24, 18, 12, 6].
On a 96 core machine, this returns [41, 30, 20, 10]. | misc/python/materialize/cli/mzbench.py | enumerate_cpu_counts | antifuchs/materialize | python | def enumerate_cpu_counts() -> typing.List[int]:
'This program prints the number of CPU counts to benchmark on this machine.\n\n We remove some percentage of CPU cores off the top for system / background processing. With\n the CPUs that remain, we generate a list of evenly spaced worker counts. The list is limited\n by the number of trials desired. This is meant to help us explore the number of CPUs that\n should be dedicated to MZ_WORKERS, not as a prescription for the correct values to choose.\n\n On a Macbook with 8 cores, this will return [6, 4, 3, 2].\n\n On a 56 core machine, this returns [24, 18, 12, 6].\n\n On a 96 core machine, this returns [41, 30, 20, 10].\n '
max_cpus = round((multiprocessing.cpu_count() * 0.425))
num_trials = 4
worker_counts = [round(((i * max_cpus) / num_trials)) for i in range(num_trials, 0, (- 1))]
return list(reversed(sorted(set(worker_counts)))) |
def __init__(self, Ksfill=None, winding=(- 1), slot=(- 1), L1=0.35, mat_type=(- 1), Nrvd=0, Wrvd=0, Kf1=0.95, is_internal=True, Rint=0, Rext=1, is_stator=True, axial_vent=(- 1), notch=(- 1), init_dict=None, init_str=None):
'Constructor of the class. Can be use in three ways :\n - __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values\n for pyleecan type, -1 will call the default constructor\n - __init__ (init_dict = d) d must be a dictionnary with property names as keys\n - __init__ (init_str = s) s must be a string\n s is the file path to load\n\n ndarray or list can be given for Vector and Matrix\n object or dict can be given for pyleecan Object'
if (init_str is not None):
init_dict = load_init_dict(init_str)[1]
if (init_dict is not None):
assert (type(init_dict) is dict)
if ('Ksfill' in list(init_dict.keys())):
Ksfill = init_dict['Ksfill']
if ('winding' in list(init_dict.keys())):
winding = init_dict['winding']
if ('slot' in list(init_dict.keys())):
slot = init_dict['slot']
if ('L1' in list(init_dict.keys())):
L1 = init_dict['L1']
if ('mat_type' in list(init_dict.keys())):
mat_type = init_dict['mat_type']
if ('Nrvd' in list(init_dict.keys())):
Nrvd = init_dict['Nrvd']
if ('Wrvd' in list(init_dict.keys())):
Wrvd = init_dict['Wrvd']
if ('Kf1' in list(init_dict.keys())):
Kf1 = init_dict['Kf1']
if ('is_internal' in list(init_dict.keys())):
is_internal = init_dict['is_internal']
if ('Rint' in list(init_dict.keys())):
Rint = init_dict['Rint']
if ('Rext' in list(init_dict.keys())):
Rext = init_dict['Rext']
if ('is_stator' in list(init_dict.keys())):
is_stator = init_dict['is_stator']
if ('axial_vent' in list(init_dict.keys())):
axial_vent = init_dict['axial_vent']
if ('notch' in list(init_dict.keys())):
notch = init_dict['notch']
self.Ksfill = Ksfill
self.winding = winding
super(LamSlotWind, self).__init__(slot=slot, L1=L1, mat_type=mat_type, Nrvd=Nrvd, Wrvd=Wrvd, Kf1=Kf1, is_internal=is_internal, Rint=Rint, Rext=Rext, is_stator=is_stator, axial_vent=axial_vent, notch=notch) | 1,021,043,929,717,883,100 | Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object | pyleecan/Classes/LamSlotWind.py | __init__ | IrakozeFD/pyleecan | python | def __init__(self, Ksfill=None, winding=(- 1), slot=(- 1), L1=0.35, mat_type=(- 1), Nrvd=0, Wrvd=0, Kf1=0.95, is_internal=True, Rint=0, Rext=1, is_stator=True, axial_vent=(- 1), notch=(- 1), init_dict=None, init_str=None):
'Constructor of the class. Can be use in three ways :\n - __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values\n for pyleecan type, -1 will call the default constructor\n - __init__ (init_dict = d) d must be a dictionnary with property names as keys\n - __init__ (init_str = s) s must be a string\n s is the file path to load\n\n ndarray or list can be given for Vector and Matrix\n object or dict can be given for pyleecan Object'
if (init_str is not None):
init_dict = load_init_dict(init_str)[1]
if (init_dict is not None):
assert (type(init_dict) is dict)
if ('Ksfill' in list(init_dict.keys())):
Ksfill = init_dict['Ksfill']
if ('winding' in list(init_dict.keys())):
winding = init_dict['winding']
if ('slot' in list(init_dict.keys())):
slot = init_dict['slot']
if ('L1' in list(init_dict.keys())):
L1 = init_dict['L1']
if ('mat_type' in list(init_dict.keys())):
mat_type = init_dict['mat_type']
if ('Nrvd' in list(init_dict.keys())):
Nrvd = init_dict['Nrvd']
if ('Wrvd' in list(init_dict.keys())):
Wrvd = init_dict['Wrvd']
if ('Kf1' in list(init_dict.keys())):
Kf1 = init_dict['Kf1']
if ('is_internal' in list(init_dict.keys())):
is_internal = init_dict['is_internal']
if ('Rint' in list(init_dict.keys())):
Rint = init_dict['Rint']
if ('Rext' in list(init_dict.keys())):
Rext = init_dict['Rext']
if ('is_stator' in list(init_dict.keys())):
is_stator = init_dict['is_stator']
if ('axial_vent' in list(init_dict.keys())):
axial_vent = init_dict['axial_vent']
if ('notch' in list(init_dict.keys())):
notch = init_dict['notch']
self.Ksfill = Ksfill
self.winding = winding
super(LamSlotWind, self).__init__(slot=slot, L1=L1, mat_type=mat_type, Nrvd=Nrvd, Wrvd=Wrvd, Kf1=Kf1, is_internal=is_internal, Rint=Rint, Rext=Rext, is_stator=is_stator, axial_vent=axial_vent, notch=notch) |
def __str__(self):
'Convert this object in a readeable string (for print)'
LamSlotWind_str = ''
LamSlotWind_str += super(LamSlotWind, self).__str__()
LamSlotWind_str += (('Ksfill = ' + str(self.Ksfill)) + linesep)
if (self.winding is not None):
tmp = self.winding.__str__().replace(linesep, (linesep + '\t')).rstrip('\t')
LamSlotWind_str += ('winding = ' + tmp)
else:
LamSlotWind_str += (('winding = None' + linesep) + linesep)
return LamSlotWind_str | 8,961,390,577,734,420,000 | Convert this object in a readeable string (for print) | pyleecan/Classes/LamSlotWind.py | __str__ | IrakozeFD/pyleecan | python | def __str__(self):
LamSlotWind_str =
LamSlotWind_str += super(LamSlotWind, self).__str__()
LamSlotWind_str += (('Ksfill = ' + str(self.Ksfill)) + linesep)
if (self.winding is not None):
tmp = self.winding.__str__().replace(linesep, (linesep + '\t')).rstrip('\t')
LamSlotWind_str += ('winding = ' + tmp)
else:
LamSlotWind_str += (('winding = None' + linesep) + linesep)
return LamSlotWind_str |
def __eq__(self, other):
'Compare two objects (skip parent)'
if (type(other) != type(self)):
return False
if (not super(LamSlotWind, self).__eq__(other)):
return False
if (other.Ksfill != self.Ksfill):
return False
if (other.winding != self.winding):
return False
return True | 4,571,830,325,294,311,000 | Compare two objects (skip parent) | pyleecan/Classes/LamSlotWind.py | __eq__ | IrakozeFD/pyleecan | python | def __eq__(self, other):
if (type(other) != type(self)):
return False
if (not super(LamSlotWind, self).__eq__(other)):
return False
if (other.Ksfill != self.Ksfill):
return False
if (other.winding != self.winding):
return False
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.