hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a23d1921fe5b6fd01631d6bc07e56ced6fa3e98 | 2,039 | py | Python | src/school_college/migrations/0001_initial.py | paceite/Seelife---An-NGO-Website | 02e6b5ec94d9a76079eccde54b3cd40b9e979def | [
"MIT"
] | null | null | null | src/school_college/migrations/0001_initial.py | paceite/Seelife---An-NGO-Website | 02e6b5ec94d9a76079eccde54b3cd40b9e979def | [
"MIT"
] | null | null | null | src/school_college/migrations/0001_initial.py | paceite/Seelife---An-NGO-Website | 02e6b5ec94d9a76079eccde54b3cd40b9e979def | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-03-17 01:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Attendance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_time', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Parents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('school', models.CharField(max_length=100)),
('is_present', models.BooleanField(default=False)),
('parents', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='school_college.Parents')),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
],
),
migrations.AddField(
model_name='student',
name='teacher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='school_college.Teacher'),
),
]
| 37.759259 | 121 | 0.572339 |
4a23d1d1b6e7498aaf73361acab6916f1d1d06a3 | 12,884 | py | Python | src/olympia/devhub/utils.py | snifhex/addons-server | 2b9dee65c10c0dca700ff2d25f3694c7cf769816 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/devhub/utils.py | snifhex/addons-server | 2b9dee65c10c0dca700ff2d25f3694c7cf769816 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/devhub/utils.py | snifhex/addons-server | 2b9dee65c10c0dca700ff2d25f3694c7cf769816 | [
"BSD-3-Clause"
] | null | null | null | import uuid
import waffle
from celery import chain, chord
from django.conf import settings
from django.forms import ValidationError
from django.utils.translation import gettext
import olympia.core.logger
from olympia import amo, core
from olympia.amo.urlresolvers import linkify_and_clean
from olympia.files.models import File, FileUpload
from olympia.files.tasks import repack_fileupload
from olympia.files.utils import parse_addon, parse_xpi
from olympia.scanners.tasks import run_customs, run_wat, run_yara, call_mad_api
from olympia.translations.models import Translation
from olympia.versions.utils import process_color_value
from . import tasks
log = olympia.core.logger.getLogger('z.devhub')
def process_validation(validation, file_hash=None, channel=amo.RELEASE_CHANNEL_LISTED):
"""Process validation results into the format expected by the web
frontend, including transforming certain fields into HTML, mangling
compatibility messages, and limiting the number of messages displayed."""
validation = fix_addons_linter_output(validation, channel=channel)
# Set an ending tier if we don't have one (which probably means
# we're dealing with mock validation results or the addons-linter).
validation.setdefault('ending_tier', 0)
if not validation['ending_tier'] and validation['messages']:
validation['ending_tier'] = max(
msg.get('tier', -1) for msg in validation['messages']
)
limit_validation_results(validation)
htmlify_validation(validation)
return validation
def limit_validation_results(validation):
"""Limit the number of messages displayed in a set of validation results,
and if truncation has occurred, add a new message explaining so."""
messages = validation['messages']
lim = settings.VALIDATOR_MESSAGE_LIMIT
if lim and len(messages) > lim:
# Sort messages by severity first so that the most important messages
# are the one we keep.
TYPES = {'error': 0, 'warning': 2, 'notice': 3}
def message_key(message):
return TYPES.get(message.get('type'))
messages.sort(key=message_key)
leftover_count = len(messages) - lim
del messages[lim:]
# The type of the truncation message should be the type of the most
# severe message in the results.
if validation['errors']:
msg_type = 'error'
elif validation['warnings']:
msg_type = 'warning'
else:
msg_type = 'notice'
compat_type = (
msg_type if any(msg.get('compatibility_type') for msg in messages) else None
)
message = (
gettext(
'Validation generated too many errors/warnings so %s '
'messages were truncated. After addressing the visible '
"messages, you'll be able to see the others."
)
% leftover_count
)
messages.insert(
0,
{
'tier': 1,
'type': msg_type,
# To respect the message structure, see bug 1139674.
'id': ['validation', 'messages', 'truncated'],
'message': message,
'description': [],
'compatibility_type': compat_type,
},
)
def htmlify_validation(validation):
"""Process the `message` and `description` fields into
safe HTML, with URLs turned into links."""
for msg in validation['messages']:
msg['message'] = linkify_and_clean(msg['message'])
if 'description' in msg:
# Description may be returned as a single string, or list of
# strings. Turn it into lists for simplicity on the client side.
if not isinstance(msg['description'], (list, tuple)):
msg['description'] = [msg['description']]
msg['description'] = [
linkify_and_clean(text) for text in msg['description']
]
def fix_addons_linter_output(validation, channel):
"""Make sure the output from the addons-linter is the same as amo-validator
for backwards compatibility reasons."""
if 'messages' in validation:
# addons-linter doesn't contain this, return the original validation
# untouched
return validation
def _merged_messages():
for type_ in ('errors', 'notices', 'warnings'):
for msg in validation[type_]:
# FIXME: Remove `uid` once addons-linter generates it
msg['uid'] = uuid.uuid4().hex
msg['type'] = msg.pop('_type')
msg['id'] = [msg.pop('code')]
# We don't have the concept of tiers for the addons-linter
# currently
msg['tier'] = 1
yield msg
identified_files = {
name: {'path': path}
for name, path in validation['metadata'].get('jsLibs', {}).items()
}
# Essential metadata.
metadata = {
'listed': channel == amo.RELEASE_CHANNEL_LISTED,
'identified_files': identified_files,
'is_webextension': True,
}
# Add metadata already set by the linter.
metadata.update(validation.get('metadata', {}))
return {
'success': not validation['errors'],
'compatibility_summary': {
'warnings': 0,
'errors': 0,
'notices': 0,
},
'notices': validation['summary']['notices'],
'warnings': validation['summary']['warnings'],
'errors': validation['summary']['errors'],
'messages': list(_merged_messages()),
'metadata': metadata,
'ending_tier': 5,
}
class Validator:
"""
Class which handles creating or fetching validation results for File
and FileUpload instances.
It forwards the actual validation to `devhub.tasks:validate_upload`
and `devhub.tasks:validate_file` but implements shortcuts for
legacy add-ons and search plugins to avoid running the linter.
"""
def __init__(self, file_, addon=None, listed=None, final_task=None):
self.addon = addon
self.file = None
self.prev_file = None
if isinstance(file_, FileUpload):
assert listed is not None
channel = (
amo.RELEASE_CHANNEL_LISTED if listed else amo.RELEASE_CHANNEL_UNLISTED
)
is_mozilla_signed = False
# We're dealing with a bare file upload. Try to extract the
# metadata that we need to match it against a previous upload
# from the file itself.
try:
addon_data = parse_addon(file_, minimal=True)
is_mozilla_signed = addon_data.get('is_mozilla_signed_extension', False)
except ValidationError as form_error:
log.info(
'could not parse addon for upload {}: {}'.format(
file_.pk, form_error
)
)
addon_data = None
else:
file_.update(version=addon_data.get('version'))
assert not file_.validation
validation_tasks = self.create_file_upload_tasks(
upload_pk=file_.pk, channel=channel, is_mozilla_signed=is_mozilla_signed
)
elif isinstance(file_, File):
# The listed flag for a File object should always come from
# the status of its owner Addon. If the caller tries to override
# this, something is wrong.
assert listed is None
channel = file_.version.channel
is_mozilla_signed = file_.is_mozilla_signed_extension
self.file = file_
self.addon = self.file.version.addon
addon_data = {'guid': self.addon.guid, 'version': self.file.version.version}
validation_tasks = [
tasks.create_initial_validation_results.si(),
tasks.validate_file.s(file_.pk),
tasks.handle_file_validation_result.s(file_.pk),
]
else:
raise ValueError
if final_task:
validation_tasks.append(final_task)
self.task = chain(*validation_tasks)
# Create a cache key for the task, so multiple requests to validate the
# same object do not result in duplicate tasks.
opts = file_._meta
self.cache_key = 'validation-task:{}.{}:{}:{}'.format(
opts.app_label, opts.object_name, file_.pk, listed
)
def get_task(self):
"""Return task chain to execute to trigger validation."""
return self.task
def create_file_upload_tasks(self, upload_pk, channel, is_mozilla_signed):
"""
This method creates the validation chain used during the submission
process, combining tasks in parallel (chord) with tasks chained
together (where the output is used as input of the next task).
"""
tasks_in_parallel = [tasks.forward_linter_results.s(upload_pk)]
if waffle.switch_is_active('enable-yara'):
tasks_in_parallel.append(run_yara.s(upload_pk))
if waffle.switch_is_active('enable-customs'):
tasks_in_parallel.append(run_customs.s(upload_pk))
if waffle.switch_is_active('enable-wat'):
tasks_in_parallel.append(run_wat.s(upload_pk))
return [
tasks.create_initial_validation_results.si(),
repack_fileupload.s(upload_pk),
tasks.validate_upload.s(upload_pk, channel),
tasks.check_for_api_keys_in_file.s(upload_pk),
chord(tasks_in_parallel, call_mad_api.s(upload_pk)),
tasks.handle_upload_validation_result.s(
upload_pk, channel, is_mozilla_signed
),
]
def extract_theme_properties(addon, channel):
version = addon.find_latest_version(channel)
if not version or not version.all_files:
return {}
try:
parsed_data = parse_xpi(
version.all_files[0].file_path, addon=addon, user=core.get_user()
)
except ValidationError:
# If we can't parse the existing manifest safely return.
return {}
theme_props = parsed_data.get('theme', {})
# pre-process colors to deprecated colors; strip spaces.
theme_props['colors'] = dict(
process_color_value(prop, color)
for prop, color in theme_props.get('colors', {}).items()
)
# upgrade manifest from deprecated headerURL to theme_frame
if 'headerURL' in theme_props.get('images', {}):
url = theme_props['images'].pop('headerURL')
theme_props['images']['theme_frame'] = url
return theme_props
def wizard_unsupported_properties(data, wizard_fields):
# collect any 'theme' level unsupported properties
unsupported = [key for key in data.keys() if key not in ['colors', 'images']]
# and any unsupported 'colors' properties
unsupported += [key for key in data.get('colors', {}) if key not in wizard_fields]
# and finally any 'images' properties (wizard only supports the background)
unsupported += [key for key in data.get('images', {}) if key != 'theme_frame']
return unsupported
def fetch_existing_translations_from_addon(addon, properties):
translation_ids_gen = (getattr(addon, prop + '_id', None) for prop in properties)
translation_ids = [id_ for id_ in translation_ids_gen if id_]
# Just get all the values together to make it simplier
return {str(value) for value in Translation.objects.filter(id__in=translation_ids)}
def add_manifest_version_error(validation):
mv = validation.get('metadata', {}).get('manifestVersion')
if (
mv != 3
or waffle.switch_is_active('enable-mv3-submissions')
or 'messages' not in validation
):
return
msg = gettext(
'Manifest V3 is currently not supported for upload. '
'{start_href}Read more about the support timeline{end_href}.'
)
url = 'https://blog.mozilla.org/addons/2021/05/27/manifest-v3-update/'
start_href = f'<a href="{url}" target="_blank" rel="noopener">'
new_error_message = msg.format(start_href=start_href, end_href='</a>')
for index, message in enumerate(validation['messages']):
if message.get('dataPath') == '/manifest_version':
# if we find the linter manifest_version=3 warning, replace it
validation['messages'][index]['message'] = new_error_message
break
else:
# otherwise insert a new error at the start of the errors
validation['messages'].insert(
0,
{
'type': 'error',
'message': new_error_message,
'tier': 1,
'fatal': True,
},
)
| 36.292958 | 88 | 0.625349 |
4a23d1dcbf85cceb229c682da5adedc41c7c6fed | 6,209 | py | Python | salt/modules/ps.py | kaptk2/salt | 8b06fff072c20d77f3447d8521a4667413218ab3 | [
"Apache-2.0"
] | 1 | 2015-06-05T13:47:02.000Z | 2015-06-05T13:47:02.000Z | salt/modules/ps.py | kaptk2/salt | 8b06fff072c20d77f3447d8521a4667413218ab3 | [
"Apache-2.0"
] | null | null | null | salt/modules/ps.py | kaptk2/salt | 8b06fff072c20d77f3447d8521a4667413218ab3 | [
"Apache-2.0"
] | null | null | null | '''
A salt interface to psutil, a system and process library.
See http://code.google.com/p/psutil.
:depends: - psutil Python module
'''
# Import python libs
import sys
import time
# Import third party libs
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
def __virtual__():
if not HAS_PSUTIL:
return False
# The python 2.6 version of psutil lacks several functions
# used in this salt module so instead of spaghetti string
# code to try to bring sanity to everything, disable it.
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
return False
return "ps"
def top(num_processes=5, interval=3):
'''
Return a list of top CPU consuming processes during the interval.
num_processes = return the top N CPU consuming processes
interval = the number of seconds to sample CPU usage over
CLI Examples::
salt '*' ps.top
salt '*' ps.top 5 10
'''
result = []
start_usage = {}
for pid in psutil.get_pid_list():
try:
process = psutil.Process(pid)
except psutil.NoSuchProcess:
continue
user, system = process.get_cpu_times()
start_usage[process] = user + system
time.sleep(interval)
usage = set()
for process, start in start_usage.items():
user, system = process.get_cpu_times()
now = user + system
diff = now - start
usage.add((diff, process))
for idx, (diff, process) in enumerate(reversed(sorted(usage))):
if num_processes and idx >= num_processes:
break
if len(process.cmdline) == 0:
cmdline = [process.name]
else:
cmdline = process.cmdline
info = {'cmd': cmdline,
'pid': process.pid,
'create_time': process.create_time}
for key, value in process.get_cpu_times()._asdict().items():
info['cpu.{0}'.format(key)] = value
for key, value in process.get_memory_info()._asdict().items():
info['mem.{0}'.format(key)] = value
result.append(info)
return result
def get_pid_list():
'''
Return a list of process ids (PIDs) for all running processes.
CLI Example::
salt '*' ps.get_pid_list
'''
return psutil.get_pid_list()
def cpu_percent(interval=0.1, per_cpu=False):
'''
Return the percent of time the CPU is busy.
interval
the number of seconds to sample CPU usage over
per_cpu
if True return an array of CPU percent busy for each CPU, otherwise
aggregate all percents into one number
CLI Example::
salt '*' ps.cpu_percent
'''
if per_cpu:
result = list(psutil.cpu_percent(interval, True))
else:
result = psutil.cpu_percent(interval)
return result
def cpu_times(per_cpu=False):
'''
Return the percent of time the CPU spends in each state,
e.g. user, system, idle, nice, iowait, irq, softirq.
per_cpu
if True return an array of percents for each CPU, otherwise aggregate
all percents into one number
CLI Example::
salt '*' ps.cpu_times
'''
if per_cpu:
result = [dict(times._asdict()) for times in psutil.cpu_times(True)]
else:
result = dict(psutil.cpu_times(per_cpu)._asdict())
return result
def physical_memory_usage():
'''
Return a dict that describes free and available physical memory.
CLI Examples::
salt '*' ps.physical_memory_usage
'''
return dict(psutil.phymem_usage()._asdict())
def virtual_memory_usage():
'''
Return a dict that describes free and available memory, both physical
and virtual.
CLI Example::
salt '*' ps.virtual_memory_usage
'''
return dict(psutil.virtmem_usage()._asdict())
def cached_physical_memory():
'''
Return the amount cached memory.
CLI Example::
salt '*' ps.cached_physical_memory
'''
return psutil.cached_phymem()
def physical_memory_buffers():
'''
Return the amount of physical memory buffers.
CLI Example::
salt '*' ps.physical_memory_buffers
'''
return psutil.phymem_buffers()
def disk_partitions(all=False):
'''
Return a list of disk partitions and their device, mount point, and
filesystem type.
all
if set to False, only return local, physical partitions (hard disk,
USB, CD/DVD partitions). If True, return all filesystems.
CLI Example::
salt '*' ps.disk_partitions
'''
result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)]
return result
def disk_usage(path):
'''
Given a path, return a dict listing the total available space as well as
the free space, and used space.
CLI Example::
salt '*' ps.disk_usage /home
'''
return dict(psutil.disk_usage(path)._asdict())
def disk_partition_usage(all=False):
'''
Return a list of disk partitions plus the mount point, filesystem and usage
statistics.
CLI Example::
salt '*' ps.disk_partition_usage
'''
result = disk_partitions(all)
for partition in result:
partition.update(disk_usage(partition['mountpoint']))
return result
def total_physical_memory():
'''
Return the total number of bytes of physical memory.
CLI Example::
salt '*' ps.total_physical_memory
'''
return psutil.TOTAL_PHYMEM
def num_cpus():
'''
Return the number of CPUs.
CLI Example::
salt '*' ps.num_cpus
'''
return psutil.NUM_CPUS
def boot_time():
'''
Return the boot time in number of seconds since the epoch began.
CLI Example::
salt '*' ps.boot_time
'''
return psutil.BOOT_TIME
def network_io_counters():
'''
Return network I/O statisitics.
CLI Example::
salt '*' ps.network_io_counters
'''
return dict(psutil.network_io_counters()._asdict())
def disk_io_counters():
'''
Return disk I/O statisitics.
CLI Example::
salt '*' ps.disk_io_counters
'''
return dict(psutil.disk_io_counters()._asdict())
| 22.660584 | 85 | 0.630375 |
4a23d23d0eff725922cdfb57fe221c56b698c113 | 4,117 | py | Python | pp/samples/mask_custom/test_mask.py | PsiQ/gdsfactory | 34c8ecbed465e8eda0d5116687fd02e95e530f35 | [
"MIT"
] | 16 | 2020-02-03T07:05:31.000Z | 2021-12-29T18:40:09.000Z | pp/samples/mask_custom/test_mask.py | PsiQ/gdsfactory | 34c8ecbed465e8eda0d5116687fd02e95e530f35 | [
"MIT"
] | 2 | 2020-01-31T20:01:40.000Z | 2020-09-26T17:50:55.000Z | pp/samples/mask_custom/test_mask.py | PsiQ/gdsfactory | 34c8ecbed465e8eda0d5116687fd02e95e530f35 | [
"MIT"
] | 7 | 2020-02-09T23:16:18.000Z | 2020-10-30T03:12:04.000Z | """
This is a sample on how to define custom components.
You can make a repo out of this file, having one custom component per file
"""
import os
import shutil
import pytest
import pp
from pp.config import CONFIG
from pp.autoplacer.yaml_placer import place_from_yaml
from pp.components.spiral_inner_io import spiral_inner_io_euler
from pp.add_termination import add_gratings_and_loop_back
from pp.routing.connect import connect_strip_way_points
from pp.add_padding import add_padding_to_grid
from pp.generate_does import generate_does
from pp.mask.merge_metadata import merge_metadata
def _route_filter(*args, **kwargs):
return connect_strip_way_points(
*args, taper_factory=None, start_straight=5.0, end_straight=5.0, **kwargs
)
def add_te(component, **kwargs):
c = pp.routing.add_fiber_array(
component,
grating_coupler=pp.c.grating_coupler_elliptical_te,
route_filter=_route_filter,
**kwargs,
)
c.test = "passive_optical_te"
c = add_padding_to_grid(c)
return c
def add_tm(component, **kwargs):
c = pp.routing.add_fiber_array(
component,
grating_coupler=pp.c.grating_coupler_elliptical_tm,
route_filter=_route_filter,
bend_radius=20,
**kwargs,
)
c = add_padding_to_grid(c)
return c
@pp.autoname
def coupler_te(gap, length, wg_width=0.5, nominal_wg_width=0.5):
""" sample of component cutback """
c = pp.c.coupler(wg_width=wg_width, gap=gap, length=length)
cc = add_te(c)
return cc
@pp.autoname
def spiral_te(wg_width=0.5, length=2):
""" sample of component cutback
Args:
wg_width: um
lenght: mm
"""
c = spiral_inner_io_euler(wg_width=wg_width, length=length)
cc = add_gratings_and_loop_back(
component=c,
grating_coupler=pp.c.grating_coupler_elliptical_te,
bend_factory=pp.c.bend_circular,
)
return cc
@pp.autoname
def spiral_tm(wg_width=0.5, length=2):
""" sample of component cutback """
c = spiral_inner_io_euler(wg_width=wg_width, length=length, dx=10, dy=10, N=5)
cc = add_gratings_and_loop_back(
component=c,
grating_coupler=pp.c.grating_coupler_elliptical_tm,
bend_factory=pp.c.bend_circular,
)
return cc
component_type2factory = dict(
spiral_te=spiral_te, spiral_tm=spiral_tm, coupler_te=coupler_te
)
@pytest.fixture
def cleandir():
build_folder = CONFIG["samples_path"] / "mask_custom" / "build"
if build_folder.exists():
shutil.rmtree(build_folder)
@pytest.fixture
def chdir():
workspace_folder = CONFIG["samples_path"] / "mask_custom"
os.chdir(workspace_folder)
@pytest.mark.usefixtures("cleandir")
def test_mask(precision=2e-9):
workspace_folder = CONFIG["samples_path"] / "mask_custom"
build_path = workspace_folder / "build"
doe_root_path = build_path / "cache_doe"
doe_metadata_path = build_path / "doe"
mask_path = build_path / "mask"
does_yml = workspace_folder / "does.yml"
mask_path.mkdir(parents=True, exist_ok=True)
gdspath = mask_path / "sample_mask.gds"
markdown_path = gdspath.with_suffix(".md")
json_path = gdspath.with_suffix(".json")
test_metadata_path = gdspath.with_suffix(".tp.json")
generate_does(
str(does_yml),
component_type2factory=component_type2factory,
precision=precision,
doe_root_path=doe_root_path,
doe_metadata_path=doe_metadata_path,
)
top_level = place_from_yaml(does_yml, precision=precision, root_does=doe_root_path)
top_level.write(str(gdspath))
merge_metadata(gdspath=gdspath)
assert gdspath.exists()
assert markdown_path.exists()
assert json_path.exists()
assert test_metadata_path.exists()
report = open(markdown_path).read()
assert report.count("#") == 2, f" only {report.count('#')} DOEs in {markdown_path}"
return gdspath
if __name__ == "__main__":
# from pprint import pprint
# pprint(component_type2factory)
c = test_mask()
pp.klive.show(c)
# c = coupler_te(gap=0.3, length=20)
# pp.show(c)
| 27.085526 | 87 | 0.705125 |
4a23d289b12bd9185b0e1f4cf969dd2201d0769f | 4,060 | py | Python | helmion/tests/test_helmchart.py | RangelReale/helmion | e90c2b6de1001b3e90811a75cda98082a02857b8 | [
"MIT"
] | null | null | null | helmion/tests/test_helmchart.py | RangelReale/helmion | e90c2b6de1001b3e90811a75cda98082a02857b8 | [
"MIT"
] | null | null | null | helmion/tests/test_helmchart.py | RangelReale/helmion | e90c2b6de1001b3e90811a75cda98082a02857b8 | [
"MIT"
] | null | null | null | import unittest
from jsonpatchext.mutators import InitItemMutator # type: ignore
from helmion.helmchart import HelmRequest, HelmChart
from helmion.config import BoolFilter
from helmion.processor import DefaultProcessor
class TestInfo(unittest.TestCase):
def setUp(self):
self.req = HelmRequest(repository='https://helm.traefik.io/traefik', chart='traefik', version='9.10.1',
releasename='helmion-traefik', namespace='router')
self.chart = HelmChart(request=self.req, data=[{
'apiVersion': 'apiextensions.k8s.io/v1beta1',
'kind': 'CustomResourceDefinition',
'metadata': {'name': 'ingressroutes.traefik.containo.us'},
'spec': {
'group': 'traefik.containo.us',
'names': {
'kind': 'IngressRoute',
'plural': 'ingressroutes',
'singular': 'ingressroute'
},
'scope': 'Namespaced',
'version': 'v1alpha1'
}
}, {
'apiVersion': 'v1',
'kind': 'ServiceAccount',
'metadata': {
'annotations': None,
'labels': {
'app.kubernetes.io/instance': 'helmion-traefik',
'app.kubernetes.io/managed-by': 'Helm',
'app.kubernetes.io/name': 'traefik',
'helm.sh/chart': 'traefik-9.10.1'
},
'name': 'helmion-traefik'
}
}, {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'annotations': None,
'labels': {
'app.kubernetes.io/instance': 'helmion-traefik',
'app.kubernetes.io/managed-by': 'Helm',
'app.kubernetes.io/name': 'traefik',
'helm.sh/chart': 'traefik-9.10.1'
},
'name': 'helmion-traefik'
},
'spec': {
'ports': [{
'name': 'web',
'port': 80,
'protocol': 'TCP',
'targetPort': 'web'
},
{
'name': 'websecure',
'port': 443,
'protocol': 'TCP',
'targetPort': 'websecure'
}],
'selector': {
'app.kubernetes.io/instance': 'helmion-traefik',
'app.kubernetes.io/name': 'traefik'
},
'type': 'ClusterIP'
}
}, {
'apiVersion': 'traefik.containo.us/v1alpha1',
'kind': 'IngressRoute',
'metadata': {
'annotations': {'helm.sh/hook': 'post-install,post-upgrade'},
'labels': {
'app.kubernetes.io/instance': 'helmion-traefik',
'app.kubernetes.io/managed-by': 'Helm',
'app.kubernetes.io/name': 'traefik',
'helm.sh/chart': 'traefik-9.10.1'
},
'name': 'helmion-traefik-dashboard'
},
'spec': {
'entryPoints': ['traefik'],
'routes': [{
'kind': 'Rule',
'match': 'PathPrefix(`/dashboard`) || PathPrefix(`/api`)',
'services': [{
'kind': 'TraefikService',
'name': 'api@internal'
}]
}]
}
}])
def test_chart_addnamespace(self):
chart = self.chart.process(DefaultProcessor(add_namespace=True))
for d in chart.data:
if d['kind'] in ['CustomResourceDefinition']:
self.assertFalse('namespace' in d['metadata'])
else:
self.assertTrue('namespace' in d['metadata'])
self.assertEqual(d['metadata']['namespace'], self.req.namespace)
| 37.943925 | 111 | 0.4367 |
4a23d2d0ef405cd2dfa6859aced5273e8a52c744 | 2,090 | py | Python | useful_scripts/calculate_volume.py | shirtsgroup/finite-temperature-crystal-scripts | 799bc882d958d9afa264a168dae0b3051bafaf0b | [
"MIT"
] | null | null | null | useful_scripts/calculate_volume.py | shirtsgroup/finite-temperature-crystal-scripts | 799bc882d958d9afa264a168dae0b3051bafaf0b | [
"MIT"
] | 8 | 2017-07-25T04:59:35.000Z | 2021-03-25T22:48:53.000Z | useful_scripts/calculate_volume.py | shirtsgroup/finite-temperature-crystal-scripts | 799bc882d958d9afa264a168dae0b3051bafaf0b | [
"MIT"
] | 1 | 2021-01-04T07:01:25.000Z | 2021-01-04T07:01:25.000Z | #!/usr/bin/python
#
# Calculate the volume of a .gro file
#
# Copyright Michael R. Shirts, University of Virginia, 2014
#
import numpy # numerical array library
import pymbar # multistate Bennett acceptance ratio
from pymbar import timeseries # timeseries analysis
from optparse import OptionParser # for parsing command-line options
import pdb
parser = OptionParser()
parser.add_option('-g', '--gro', dest = 'grofile', help = 'Gromacs File') #.gro file to be resized
(options, args) = parser.parse_args()
fname = options.grofile
#=============================================================================================
# CALCULATE THE GRO VOLUME
#=============================================================================================
# Read in input files
fname = options.grofile
infile = open(fname, 'r')
lines = filter(None, (line.rstrip() for line in infile))
infile.close()
print "loading " + fname
#Read in the crystal basis matrix (the last line of the .gro file)
crystal_basis = numpy.zeros([3,3],float) #Matrix to take a crystal vector into xyz coordinates
xyz_to_crystal = numpy.zeros([3,3],float) #Matrix to take an xyz vector into crystal coordinates
tokens = lines[len(lines)-1].split()
oldvect=[]
for i,token in enumerate(tokens):
if i == 0:
crystal_basis[0,0]=float(token)
oldvect.append(token)
elif i==1:
crystal_basis[1,1]=float(token)
oldvect.append(token)
elif i==2:
crystal_basis[2,2]=float(token)
oldvect.append(token)
elif i==3:
crystal_basis[0,1]=float(token)
oldvect.append(token)
elif i==4:
crystal_basis[0,2]=float(token)
oldvect.append(token)
elif i==5:
crystal_basis[1,0]=float(token)
oldvect.append(token)
elif i==6:
crystal_basis[1,2]=float(token)
oldvect.append(token)
elif i==7:
crystal_basis[2,0]=float(token)
oldvect.append(token)
elif i==8:
crystal_basis[2,1]=float(token)
oldvect.append(token)
xyz_to_crystal = numpy.linalg.inv(crystal_basis)
#Calculate the initial volume of the gro file
Volume = float(numpy.linalg.det(crystal_basis))
print "Volume: " + str(Volume)
| 30.289855 | 102 | 0.657895 |
4a23d3129431a48251cba75a8a14c2c789b18fd5 | 4,304 | py | Python | workspace/asr/conformer/examples/talk.py | shahin-trunk/NeMo | a10ac29a6deb05bcfc672ad287f4a8279c1f9289 | [
"Apache-2.0"
] | null | null | null | workspace/asr/conformer/examples/talk.py | shahin-trunk/NeMo | a10ac29a6deb05bcfc672ad287f4a8279c1f9289 | [
"Apache-2.0"
] | null | null | null | workspace/asr/conformer/examples/talk.py | shahin-trunk/NeMo | a10ac29a6deb05bcfc672ad287f4a8279c1f9289 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python
import argparse
import sys
import time
import wave
import grpc
import numpy as np
import pyaudio
import riva_api.riva_audio_pb2 as ra
import riva_api.riva_tts_pb2 as rtts
import riva_api.riva_tts_pb2_grpc as rtts_srv
def get_args():
parser = argparse.ArgumentParser(description="Streaming transcription via Riva AI Services")
parser.add_argument("--server", default="localhost:50051", type=str, help="URI to GRPC server endpoint")
parser.add_argument("--voice", type=str, help="voice name to use", default="ljspeech")
parser.add_argument("-o", "--output", default=None, type=str, help="Output file to write last utterance")
parser.add_argument("--list-devices", action="store_true", help="list output devices indices")
parser.add_argument("--output-device", type=int, help="Output device to use")
return parser.parse_args()
def main():
args = get_args()
channel = grpc.insecure_channel(args.server)
tts_client = rtts_srv.RivaSpeechSynthesisStub(channel)
audio_handle = pyaudio.PyAudio()
if args.list_devices:
for i in range(audio_handle.get_device_count()):
info = audio_handle.get_device_info_by_index(i)
if info['maxOutputChannels'] < 1:
continue
print(f"{info['index']}: {info['name']}")
sys.exit(0)
print("Example query:")
print(
" Hello, My name is Linda"
+ ", and I am demonstrating speech synthesis with Riva {@EY2}.I. services, running on NVIDIA {@JH}{@IY1}_{@P}{@IY}_{@Y}{@UW0}s."
)
req = rtts.SynthesizeSpeechRequest()
req.text = "Hello"
req.language_code = "en-US"
req.encoding = ra.AudioEncoding.LINEAR_PCM
req.sample_rate_hz = 22050
req.voice_name = args.voice
stream = audio_handle.open(
format=pyaudio.paFloat32, output_device_index=args.output_device, channels=1, rate=22050, output=True
)
while True:
print("Speak: ", end='')
req.text = str(input())
if args.output:
wav = wave.open(args.output, 'wb')
wav.setnchannels(1)
wav.setsampwidth(2)
wav.setframerate(req.sample_rate_hz)
print("Generating audio for request...")
print(f" > '{req.text}': ", end='')
start = time.time()
resp = tts_client.Synthesize(req)
stop = time.time()
print(f"Time to first audio: {(stop-start):.3f}s")
stream.write(resp.audio)
if args.output:
dt = np.float32
f32_output = (np.frombuffer(resp.audio, dtype=np.float32) * 32767).astype(np.int16)
wav.writeframesraw(f32_output)
wav.close()
stream.stop_stream()
stream.close()
if __name__ == '__main__':
main()
| 39.851852 | 136 | 0.693309 |
4a23d4b2ac1699c6555eea1b54e504a8b0b28b34 | 160 | py | Python | Bulb_Switcher/bulb.py | danbaehr/leetcode | 0d135ded6199e191a8afe21b635bc98ce10de3ec | [
"MIT"
] | null | null | null | Bulb_Switcher/bulb.py | danbaehr/leetcode | 0d135ded6199e191a8afe21b635bc98ce10de3ec | [
"MIT"
] | null | null | null | Bulb_Switcher/bulb.py | danbaehr/leetcode | 0d135ded6199e191a8afe21b635bc98ce10de3ec | [
"MIT"
] | null | null | null | class Solution(object):
def bulbSwitch(self, n):
"""
:type n: int
:rtype: int
"""
return int(math.sqrt(n))
| 17.777778 | 32 | 0.44375 |
4a23d511233cfb494a7807aabc67394d71fa7c67 | 5,489 | py | Python | intent/scripts/igt/produce_tagger.py | rgeorgi/intent | 9920798c126f6d354029f7bb0a345e7cdb649f3a | [
"MIT"
] | 3 | 2016-08-05T01:11:57.000Z | 2017-08-26T15:35:51.000Z | intent/scripts/igt/produce_tagger.py | rgeorgi/intent | 9920798c126f6d354029f7bb0a345e7cdb649f3a | [
"MIT"
] | 2 | 2016-03-01T22:41:24.000Z | 2016-09-14T18:39:25.000Z | intent/scripts/igt/produce_tagger.py | rgeorgi/intent | 9920798c126f6d354029f7bb0a345e7cdb649f3a | [
"MIT"
] | null | null | null | '''
Created on Feb 20, 2015
@author: rgeorgi
'''
# Built-in imports -------------------------------------------------------------
import argparse, os, sys, logging
# Internal imports -------------------------------------------------------------
from intent.utils.argutils import existsfile, writefile
from intent.igt.rgxigt import RGCorpus, rgp, ProjectionException, ProjectionTransGlossException
from intent.utils.argpasser import ArgPasser, argp
from intent.utils.env import c
from intent.interfaces.stanford_tagger import StanfordPOSTagger
from intent.interfaces.mallet_maxent import MalletMaxent
#===============================================================================
# Set up logging
#===============================================================================
TAGLOG = logging.getLogger(__name__)
classification = 'classification'
heur_proj = 'heur-proj'
giza_proj = 'giza-proj'
giza_direct = 'giza-proj-direct'
projection = [heur_proj, giza_proj, giza_direct]
normal_proj = [giza_proj, heur_proj]
giza = [giza_proj, giza_direct]
UNK = 'UNK'
class TagProductionException(Exception): pass
@argp
def produce_tagger(inpath, out_f, method, kwargs = None):
if kwargs.get('xc'):
xc = kwargs.get('xc')
else:
# Load the xigt corpus.
xc = RGCorpus.load(inpath)
corp_length = len(xc)
# Before reducing the size of the corpus, filter out
# instances lacking g/t alignment for classification and projection...
if method == classification or method in normal_proj:
xc.require_one_to_one()
corp_length = len(xc)
# Also, filter out instances where a translation line is missing
# if we are projecting. (This overlaps with the above, but leaves
# direct giza alignments to not require one to one alignment.)
if method in projection:
xc.require_trans_lines()
corp_length = len(xc)
limit = kwargs.get('limit', 0, int)
if limit:
xc.igts = xc.igts[:limit]
corp_length = len(xc)
# Giza Realignment ---------------------------------------------------------
# If we are using a giza based approach, we will want to
# realign the corpus now, since it is heuristic by default.
if method == giza_proj:
xc.giza_align_t_g(kwargs.get('resume'))
elif method == giza_direct:
xc.giza_align_l_t()
TAGLOG.info('Producing tagfile for "%s"' % os.path.relpath(out_f.name))
#===========================================================================
# ADD PUNC
#===========================================================================
out_f.write('''./PUNC
?/PUNC
“/PUNC
"/PUNC
''/PUNC
'/PUNC
,/PUNC
…/PUNC
//PUNC
--/PUNC
``/PUNC
:/PUNC
;/PUNC
«/PUNC
»/PUNC
-/PUNC\n''')
for i, inst in enumerate(xc):
if i % 25 == 0:
TAGLOG.info('Processing instance %d' % i)
# If we are doing classification
if method == classification:
inst.classify_gloss_pos(kwargs.get('classifier'), posdict=kwargs.get('posdict'))
inst.project_gloss_to_lang()
# If we are doing normal projection via the gloss line
elif method in normal_proj:
try:
inst.project_trans_to_gloss()
except ProjectionTransGlossException as ptge:
TAGLOG.warn(ptge)
continue
inst.project_gloss_to_lang()
# Otherwise, we are looking at doing the direct translation
# to language based approach.
elif method == giza_direct:
inst.project_trans_to_lang()
# Raise an exception if we somehow got a different method.
else:
raise TagProductionException('Method "%s" is not defined for producing taggers.' % method)
# Whichever method, get the gloss line tags:
sequence = inst.get_lang_sequence()
# If we get a "skip" and "UNK" appears in the sequence...
if kwargs.get('skip') and len(sequence) != len([i for i in sequence if i.label != UNK]):
corp_length -= 1
continue
else:
# Replace the "UNK" with "NOUN"
for i, pos_token in enumerate(sequence):
if pos_token.label == 'UNK' and kwargs.get('unk_nouns'):
pos_token.label = "NOUN"
elif pos_token.label == 'UNK' and kwargs.get('unk_classify'):
classifier = kwargs.get('classifier')
kwargs['prev_gram'] = ''
kwargs['next_gram'] = ''
if i > 0:
kwargs['prev_gram'] = inst.gloss[i-1].get_content()
if i < len(inst.gloss)-1:
kwargs['next_gram'] = inst.gloss[i+1].get_content()
pos_token.label = classifier.classify_string(inst.gloss[i].get_content(), **kwargs).largest()[0]
out_f.write('%s/%s ' % (pos_token.seq, pos_token.label))
out_f.write('\n')
out_f.flush()
out_f.close()
return corp_length
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('-i', '--input', required=True, type=existsfile, help='Existing xigt xml file.')
p.add_argument('-o', '--output', required=True, type=writefile, help='Output slashtag format file.')
p.add_argument('-m', '--method', choices=['classification','heur-proj','giza-proj', 'giza-proj-direct'])
p.add_argument('-s', '--skip', action='store_true', help='Whether to skip incomplete projections or not.')
p.add_argument('-l', '--limit', type=int, default=0, help='limit the number of sentences used in the resulting file')
args = p.parse_args()
ap = ArgPasser(vars(args))
del ap['method']
del ap['output']
classifier = None
tagger = None
if args.method not in projection:
ap['classifier'] = MalletMaxent(c['classifier_model'])
produce_tagger(args.input, args.output, args.method, **ap) | 28.73822 | 118 | 0.626344 |
4a23d538b493659a448d941bcc3801fc2abd5b82 | 14,858 | py | Python | src/blib2to3/pgen2/parse.py | cbows/black | 0f26a0369efc7305a1a0120355f78d85b3030e56 | [
"MIT"
] | null | null | null | src/blib2to3/pgen2/parse.py | cbows/black | 0f26a0369efc7305a1a0120355f78d85b3030e56 | [
"MIT"
] | 14 | 2021-12-15T13:35:48.000Z | 2022-03-28T13:37:04.000Z | src/blib2to3/pgen2/parse.py | cbows/black | 0f26a0369efc7305a1a0120355f78d85b3030e56 | [
"MIT"
] | null | null | null | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser engine for the grammar tables generated by pgen.
The grammar table must be loaded first.
See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.
"""
import copy
from contextlib import contextmanager
# Local imports
from . import grammar, token, tokenize
from typing import (
cast,
Any,
Optional,
Text,
Union,
Tuple,
Dict,
List,
Iterator,
Callable,
Set,
TYPE_CHECKING,
)
from blib2to3.pgen2.grammar import Grammar
from blib2to3.pytree import convert, NL, Context, RawNode, Leaf, Node
if TYPE_CHECKING:
from blib2to3.driver import TokenProxy
Results = Dict[Text, NL]
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
DFA = List[List[Tuple[int, int]]]
DFAS = Tuple[DFA, Dict[int, int]]
def lam_sub(grammar: Grammar, node: RawNode) -> NL:
assert node[3] is not None
return Node(type=node[0], children=node[3], context=node[2])
# A placeholder node, used when parser is backtracking.
DUMMY_NODE = (-1, None, None, None)
def stack_copy(
stack: List[Tuple[DFAS, int, RawNode]]
) -> List[Tuple[DFAS, int, RawNode]]:
"""Nodeless stack copy."""
return [(copy.deepcopy(dfa), label, DUMMY_NODE) for dfa, label, _ in stack]
class Recorder:
def __init__(self, parser: "Parser", ilabels: List[int], context: Context) -> None:
self.parser = parser
self._ilabels = ilabels
self.context = context # not really matter
self._dead_ilabels: Set[int] = set()
self._start_point = self.parser.stack
self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels}
@property
def ilabels(self) -> Set[int]:
return self._dead_ilabels.symmetric_difference(self._ilabels)
@contextmanager
def switch_to(self, ilabel: int) -> Iterator[None]:
with self.backtrack():
self.parser.stack = self._points[ilabel]
try:
yield
except ParseError:
self._dead_ilabels.add(ilabel)
finally:
self.parser.stack = self._start_point
@contextmanager
def backtrack(self) -> Iterator[None]:
"""
Use the node-level invariant ones for basic parsing operations (push/pop/shift).
These still will operate on the stack; but they won't create any new nodes, or
modify the contents of any other existing nodes.
This saves us a ton of time when we are backtracking, since we
want to restore to the initial state as quick as possible, which
can only be done by having as little mutatations as possible.
"""
is_backtracking = self.parser.is_backtracking
try:
self.parser.is_backtracking = True
yield
finally:
self.parser.is_backtracking = is_backtracking
def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None:
func: Callable[..., Any]
if raw:
func = self.parser._addtoken
else:
func = self.parser.addtoken
for ilabel in self.ilabels:
with self.switch_to(ilabel):
args = [tok_type, tok_val, self.context]
if raw:
args.insert(0, ilabel)
func(*args)
def determine_route(self, value: Text = None, force: bool = False) -> Optional[int]:
alive_ilabels = self.ilabels
if len(alive_ilabels) == 0:
*_, most_successful_ilabel = self._dead_ilabels
raise ParseError("bad input", most_successful_ilabel, value, self.context)
ilabel, *rest = alive_ilabels
if force or not rest:
return ilabel
else:
return None
class ParseError(Exception):
"""Exception to signal the parser is stuck."""
def __init__(
self, msg: Text, type: Optional[int], value: Optional[Text], context: Context
) -> None:
Exception.__init__(
self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context)
)
self.msg = msg
self.type = type
self.value = value
self.context = context
class Parser(object):
"""Parser engine.
The proper usage sequence is:
p = Parser(grammar, [converter]) # create instance
p.setup([start]) # prepare for parsing
<for each input token>:
if p.addtoken(...): # parse a token; may raise ParseError
break
root = p.rootnode # root of abstract syntax tree
A Parser instance may be reused by calling setup() repeatedly.
A Parser instance contains state pertaining to the current token
sequence, and should not be used concurrently by different threads
to parse separate token sequences.
See driver.py for how to get input tokens by tokenizing a file or
string.
Parsing is complete when addtoken() returns True; the root of the
abstract syntax tree can then be retrieved from the rootnode
instance variable. When a syntax error occurs, addtoken() raises
the ParseError exception. There is no error recovery; the parser
cannot be used after a syntax error was reported (but it can be
reinitialized by calling setup()).
"""
def __init__(self, grammar: Grammar, convert: Optional[Convert] = None) -> None:
"""Constructor.
The grammar argument is a grammar.Grammar instance; see the
grammar module for more information.
The parser is not ready yet for parsing; you must call the
setup() method to get it started.
The optional convert argument is a function mapping concrete
syntax tree nodes to abstract syntax tree nodes. If not
given, no conversion is done and the syntax tree produced is
the concrete syntax tree. If given, it must be a function of
two arguments, the first being the grammar (a grammar.Grammar
instance), and the second being the concrete syntax tree node
to be converted. The syntax tree is converted from the bottom
up.
**post-note: the convert argument is ignored since for Black's
usage, convert will always be blib2to3.pytree.convert. Allowing
this to be dynamic hurts mypyc's ability to use early binding.
These docs are left for historical and informational value.
A concrete syntax tree node is a (type, value, context, nodes)
tuple, where type is the node type (a token or symbol number),
value is None for symbols and a string for tokens, context is
None or an opaque value used for error reporting (typically a
(lineno, offset) pair), and nodes is a list of children for
symbols, and None for tokens.
An abstract syntax tree node may be anything; this is entirely
up to the converter function.
"""
self.grammar = grammar
# See note in docstring above. TL;DR this is ignored.
self.convert = convert or lam_sub
self.is_backtracking = False
def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None:
"""Prepare for parsing.
This *must* be called before starting to parse.
The optional argument is an alternative start symbol; it
defaults to the grammar's start symbol.
You can use a Parser instance to parse any number of programs;
each time you call setup() the parser is reset to an initial
state determined by the (implicit or explicit) start symbol.
"""
if start is None:
start = self.grammar.start
# Each stack entry is a tuple: (dfa, state, node).
# A node is a tuple: (type, value, context, children),
# where children is a list of nodes or None, and context may be None.
newnode: RawNode = (start, None, None, [])
stackentry = (self.grammar.dfas[start], 0, newnode)
self.stack: List[Tuple[DFAS, int, RawNode]] = [stackentry]
self.rootnode: Optional[NL] = None
self.used_names: Set[str] = set()
self.proxy = proxy
def addtoken(self, type: int, value: Text, context: Context) -> bool:
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabels = self.classify(type, value, context)
assert len(ilabels) >= 1
# If we have only one state to advance, we'll directly
# take it as is.
if len(ilabels) == 1:
[ilabel] = ilabels
return self._addtoken(ilabel, type, value, context)
# If there are multiple states which we can advance (only
# happen under soft-keywords), then we will try all of them
# in parallel and as soon as one state can reach further than
# the rest, we'll choose that one. This is a pretty hacky
# and hopefully temporary algorithm.
#
# For a more detailed explanation, check out this post:
# https://tree.science/what-the-backtracking.html
with self.proxy.release() as proxy:
counter, force = 0, False
recorder = Recorder(self, ilabels, context)
recorder.add_token(type, value, raw=True)
next_token_value = value
while recorder.determine_route(next_token_value) is None:
if not proxy.can_advance(counter):
force = True
break
next_token_type, next_token_value, *_ = proxy.eat(counter)
if next_token_type in (tokenize.COMMENT, tokenize.NL):
counter += 1
continue
if next_token_type == tokenize.OP:
next_token_type = grammar.opmap[next_token_value]
recorder.add_token(next_token_type, next_token_value)
counter += 1
ilabel = cast(int, recorder.determine_route(next_token_value, force=force))
assert ilabel is not None
return self._addtoken(ilabel, type, value, context)
def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bool:
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
states, first = dfa
arcs = states[state]
# Look for a state with this label
for i, newstate in arcs:
t = self.grammar.labels[i][0]
if t >= 256:
# See if it's a symbol and if we're in its first set
itsdfa = self.grammar.dfas[t]
itsstates, itsfirst = itsdfa
if ilabel in itsfirst:
# Push a symbol
self.push(t, itsdfa, newstate, context)
break # To continue the outer while loop
elif ilabel == i:
# Look it up in the list of labels
# Shift a token; we're done with it
self.shift(type, value, newstate, context)
# Pop while we are in an accept-only state
state = newstate
while states[state] == [(0, state)]:
self.pop()
if not self.stack:
# Done parsing!
return True
dfa, state, node = self.stack[-1]
states, first = dfa
# Done with this token
return False
else:
if (0, state) in arcs:
# An accepting state, pop it and try something else
self.pop()
if not self.stack:
# Done parsing, but another token is input
raise ParseError("too much input", type, value, context)
else:
# No success finding a transition
raise ParseError("bad input", type, value, context)
def classify(self, type: int, value: Text, context: Context) -> List[int]:
"""Turn a token into a label. (Internal)
Depending on whether the value is a soft-keyword or not,
this function may return multiple labels to choose from."""
if type == token.NAME:
# Keep a listing of all used names
self.used_names.add(value)
# Check for reserved words
if value in self.grammar.keywords:
return [self.grammar.keywords[value]]
elif value in self.grammar.soft_keywords:
assert type in self.grammar.tokens
return [
self.grammar.soft_keywords[value],
self.grammar.tokens[type],
]
ilabel = self.grammar.tokens.get(type)
if ilabel is None:
raise ParseError("bad token", type, value, context)
return [ilabel]
def shift(self, type: int, value: Text, newstate: int, context: Context) -> None:
"""Shift a token. (Internal)"""
if self.is_backtracking:
dfa, state, _ = self.stack[-1]
self.stack[-1] = (dfa, newstate, DUMMY_NODE)
else:
dfa, state, node = self.stack[-1]
rawnode: RawNode = (type, value, context, None)
newnode = convert(self.grammar, rawnode)
assert node[-1] is not None
node[-1].append(newnode)
self.stack[-1] = (dfa, newstate, node)
def push(self, type: int, newdfa: DFAS, newstate: int, context: Context) -> None:
"""Push a nonterminal. (Internal)"""
if self.is_backtracking:
dfa, state, _ = self.stack[-1]
self.stack[-1] = (dfa, newstate, DUMMY_NODE)
self.stack.append((newdfa, 0, DUMMY_NODE))
else:
dfa, state, node = self.stack[-1]
newnode: RawNode = (type, None, context, [])
self.stack[-1] = (dfa, newstate, node)
self.stack.append((newdfa, 0, newnode))
def pop(self) -> None:
"""Pop a nonterminal. (Internal)"""
if self.is_backtracking:
self.stack.pop()
else:
popdfa, popstate, popnode = self.stack.pop()
newnode = convert(self.grammar, popnode)
if self.stack:
dfa, state, node = self.stack[-1]
assert node[-1] is not None
node[-1].append(newnode)
else:
self.rootnode = newnode
self.rootnode.used_names = self.used_names
| 37.71066 | 88 | 0.591802 |
4a23d56ec8a2b6f3f1d48e125424f202cfecbd2f | 1,142 | py | Python | bitmovin_api_sdk/models/progressive_webm_muxing_information.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/models/progressive_webm_muxing_information.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/models/progressive_webm_muxing_information.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | # coding: utf-8
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.progressive_muxing_information import ProgressiveMuxingInformation
import pprint
class ProgressiveWebmMuxingInformation(ProgressiveMuxingInformation):
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(ProgressiveWebmMuxingInformation, self), "to_dict"):
result = super(ProgressiveWebmMuxingInformation, self).to_dict()
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProgressiveWebmMuxingInformation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.052632 | 95 | 0.688266 |
4a23d5c17e6a9f2369faf6ea31f734f613ec9291 | 2,082 | py | Python | deicode/scripts/_logratio.py | justinshaffer/DEICODE | b7cd4da09c993bdd9ab536b1a5919dbc28d2b9ca | [
"BSD-3-Clause"
] | null | null | null | deicode/scripts/_logratio.py | justinshaffer/DEICODE | b7cd4da09c993bdd9ab536b1a5919dbc28d2b9ca | [
"BSD-3-Clause"
] | null | null | null | deicode/scripts/_logratio.py | justinshaffer/DEICODE | b7cd4da09c993bdd9ab536b1a5919dbc28d2b9ca | [
"BSD-3-Clause"
] | null | null | null | from biom import load_table
import pandas as pd
import numpy as np
import skbio
import os
from deicode.ratios import log_ratios
from deicode.preprocessing import rclr
import click
from gneiss.util import match
from skbio.stats.ordination import OrdinationResults
@click.command()
@click.option('--in_biom', help='Input table in biom format. (optional taxa in observation)')
@click.option('--in_ord', help='RPCA output RPCA_Ordination.txt')
@click.option('--output_dir', help='Location of output files.')
@click.option('--axis', help='Axis of both ordinations to use default=0',default=0)
@click.option('--n_lr', help='Number of log-ratios to comput default=10',default=10)
@click.option('--tax_level', help='If taxa included - choose level default=lowest',default=10)
def logratio(in_biom: str, in_ord:str,
output_dir: str, axis:int,
n_lr: int, tax_level: str) -> None:
""" Runs log ratios on import features from RPCA output"""
table = load_table(in_biom)
tabledf = table.to_dataframe().T.drop_duplicates()
# get loadings from ordination files
sample_loading=OrdinationResults.read(in_ord).samples
feature_loading=OrdinationResults.read(in_ord).features
# match tables
tabledf,feature_loading=match(tabledf.T,feature_loading)
tabledf,sample_loading=match(tabledf.T,sample_loading)
try:
# try to inclide taxa if there
taxonomy=table.metadata_to_dataframe('observation')
logdf=log_ratios(tabledf, feature_loading,
sample_loading, taxa_tmp=taxonomy,
axis_sort=axis,N_show=n_lr,
level=tax_level)
except:
# if not then just run with OTU ids
logdf=log_ratios(tabledf, feature_loading,
sample_loading,
axis_sort=axis,N_show=n_lr,
level=tax_level)
logdf.to_csv(os.path.join(output_dir,'Log_Ratios.txt'), sep='\t')
return
if __name__ == '__main__':
logratio()
| 38.555556 | 94 | 0.664745 |
4a23d6114366e20f319b6f975e0d4c37d26a37b2 | 53,137 | py | Python | os_win/tests/unit/utils/network/test_networkutils.py | mail2nsrajesh/os-win | b5ee321a097ddc96ea9c7a652a19d88215eab996 | [
"Apache-2.0"
] | null | null | null | os_win/tests/unit/utils/network/test_networkutils.py | mail2nsrajesh/os-win | b5ee321a097ddc96ea9c7a652a19d88215eab996 | [
"Apache-2.0"
] | null | null | null | os_win/tests/unit/utils/network/test_networkutils.py | mail2nsrajesh/os-win | b5ee321a097ddc96ea9c7a652a19d88215eab996 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_utils import units
from os_win import constants
from os_win import exceptions
from os_win.tests.unit import test_base
from os_win.utils import _wqlutils
from os_win.utils.network import networkutils
@ddt.ddt
class NetworkUtilsTestCase(test_base.OsWinBaseTestCase):
"""Unit tests for the Hyper-V NetworkUtils class."""
_FAKE_VSWITCH_NAME = "fake_vswitch_name"
_FAKE_PORT_NAME = "fake_port_name"
_FAKE_JOB_PATH = 'fake_job_path'
_FAKE_RET_VAL = 0
_FAKE_RES_PATH = "fake_res_path"
_FAKE_VSWITCH = "fake_vswitch"
_FAKE_VLAN_ID = "fake_vlan_id"
_FAKE_CLASS_NAME = "fake_class_name"
_FAKE_ELEMENT_NAME = "fake_element_name"
_FAKE_HYPERV_VM_STATE = 'fake_hyperv_state'
_FAKE_ACL_ACT = 'fake_acl_action'
_FAKE_ACL_DIR = 'fake_acl_dir'
_FAKE_ACL_TYPE = 'fake_acl_type'
_FAKE_LOCAL_PORT = 'fake_local_port'
_FAKE_PROTOCOL = 'fake_port_protocol'
_FAKE_REMOTE_ADDR = '0.0.0.0/0'
_FAKE_WEIGHT = 'fake_weight'
_FAKE_BAD_INSTANCE_ID = 'bad_instance_id'
_FAKE_INSTANCE_ID = (
r"Microsoft:609CBAAD-BC13-4A65-AADE-AD95861FE394\\55349F56-72AB-4FA3-"
"B5FE-6A30A511A419\\C\\776E0BA7-94A1-41C8-8F28-951F524251B5\\77A43184-"
"5444-49BF-ABE0-2210B72ABA73")
_MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualEthernetSwitch'
def setUp(self):
super(NetworkUtilsTestCase, self).setUp()
self.netutils = networkutils.NetworkUtils()
self.netutils._conn_attr = mock.MagicMock()
self.netutils._jobutils = mock.MagicMock()
def test_init_caches_disabled(self):
self.netutils._enable_cache = False
self.netutils._switches = {}
self.netutils.init_caches()
self.netutils._conn.Msvm_VirtualEthernetSwitch.assert_not_called()
self.assertEqual({}, self.netutils._switches)
def test_init_caches(self):
self.netutils._switches = {}
self.netutils._switch_ports = {}
self.netutils._vlan_sds = {}
self.netutils._profile_sds = {}
self.netutils._vsid_sds = {}
self.netutils._bandwidth_sds = {}
conn = self.netutils._conn
mock_vswitch = mock.MagicMock(ElementName=mock.sentinel.vswitch_name)
conn.Msvm_VirtualEthernetSwitch.return_value = [mock_vswitch]
mock_port = mock.MagicMock(ElementName=mock.sentinel.port_name)
conn.Msvm_EthernetPortAllocationSettingData.return_value = [
mock_port]
mock_sd = mock.MagicMock(InstanceID=self._FAKE_INSTANCE_ID)
mock_bad_sd = mock.MagicMock(InstanceID=self._FAKE_BAD_INSTANCE_ID)
conn.Msvm_EthernetSwitchPortProfileSettingData.return_value = [
mock_bad_sd, mock_sd]
conn.Msvm_EthernetSwitchPortVlanSettingData.return_value = [
mock_bad_sd, mock_sd]
conn.Msvm_EthernetSwitchPortSecuritySettingData.return_value = [
mock_bad_sd, mock_sd]
conn.Msvm_EthernetSwitchPortBandwidthSettingData.return_value = [
mock_bad_sd, mock_sd]
self.netutils.init_caches()
self.assertEqual({mock.sentinel.vswitch_name: mock_vswitch},
self.netutils._switches)
self.assertEqual({mock.sentinel.port_name: mock_port},
self.netutils._switch_ports)
self.assertEqual([mock_sd], list(self.netutils._profile_sds.values()))
self.assertEqual([mock_sd], list(self.netutils._vlan_sds.values()))
self.assertEqual([mock_sd], list(self.netutils._vsid_sds.values()))
self.assertEqual([mock_sd],
list(self.netutils._bandwidth_sds.values()))
def test_update_cache_disabled(self):
self.netutils._enable_cache = False
self.netutils._switch_ports = {}
self.netutils.update_cache()
conn = self.netutils._conn
conn.Msvm_EthernetPortAllocationSettingData.assert_not_called()
self.assertEqual({}, self.netutils._switch_ports)
def test_update_cache(self):
self.netutils._switch_ports[mock.sentinel.other] = mock.sentinel.port
conn = self.netutils._conn
mock_port = mock.MagicMock(ElementName=mock.sentinel.port_name)
conn.Msvm_EthernetPortAllocationSettingData.return_value = [
mock_port]
self.netutils.update_cache()
self.assertEqual({mock.sentinel.port_name: mock_port},
self.netutils._switch_ports)
# assert that other networkutils have the same cache.
netutils = networkutils.NetworkUtils()
self.assertEqual({mock.sentinel.port_name: mock_port},
netutils._switch_ports)
def test_clear_port_sg_acls_cache(self):
self.netutils._sg_acl_sds[mock.sentinel.port_id] = [mock.sentinel.acl]
self.netutils.clear_port_sg_acls_cache(mock.sentinel.port_id)
self.assertNotIn(mock.sentinel.acl, self.netutils._sg_acl_sds)
@mock.patch.object(networkutils.NetworkUtils, '_get_vswitch_external_port')
def test_get_vswitch_external_network_name(self, mock_get_vswitch_port):
mock_get_vswitch_port.return_value.ElementName = (
mock.sentinel.network_name)
result = self.netutils.get_vswitch_external_network_name(
mock.sentinel.vswitch_name)
self.assertEqual(mock.sentinel.network_name, result)
def test_get_vswitch_external_port(self):
vswitch = mock.MagicMock(Name=mock.sentinel.vswitch_name)
self.netutils._conn.Msvm_VirtualEthernetSwitch.return_value = [vswitch]
conn = self.netutils._conn
ext_port = mock.MagicMock()
lan_endpoint_assoc1 = mock.MagicMock()
lan_endpoint_assoc2 = mock.Mock(SystemName=mock.sentinel.vswitch_name)
self.netutils._conn.Msvm_ExternalEthernetPort.return_value = [ext_port]
conn.Msvm_EthernetDeviceSAPImplementation.return_value = [
lan_endpoint_assoc1]
conn.Msvm_ActiveConnection.return_value = [
mock.Mock(Antecedent=lan_endpoint_assoc2)]
result = self.netutils._get_vswitch_external_port(mock.sentinel.name)
self.assertEqual(ext_port, result)
conn.Msvm_EthernetDeviceSAPImplementation.assert_called_once_with(
Antecedent=ext_port.path_.return_value)
conn.Msvm_ActiveConnection.assert_called_once_with(
Dependent=lan_endpoint_assoc1.Dependent.path_.return_value)
def test_vswitch_port_needed(self):
self.assertFalse(self.netutils.vswitch_port_needed())
@mock.patch.object(networkutils.NetworkUtils, '_get_vnic_settings')
def test_get_vnic_mac_address(self, mock_get_vnic_settings):
mock_vnic = mock.MagicMock(Address=mock.sentinel.mac_address)
mock_get_vnic_settings.return_value = mock_vnic
actual_mac_address = self.netutils.get_vnic_mac_address(
mock.sentinel.switch_port_name)
self.assertEqual(mock.sentinel.mac_address, actual_mac_address)
@ddt.data([], [mock.sentinel.nic_sd])
def test_get_vnic_settings(self, nic_sds):
mock_nic_sd = self.netutils._conn.Msvm_SyntheticEthernetPortSettingData
mock_nic_sd.return_value = nic_sds
if not nic_sds:
self.assertRaises(exceptions.HyperVvNicNotFound,
self.netutils._get_vnic_settings,
mock.sentinel.vnic_name)
else:
nic_sd = self.netutils._get_vnic_settings(mock.sentinel.vnic_name)
self.assertEqual(mock.sentinel.nic_sd, nic_sd)
mock_nic_sd.assert_called_once_with(
ElementName=mock.sentinel.vnic_name)
@mock.patch.object(networkutils, 'patcher')
@mock.patch.object(networkutils.tpool, 'execute')
@mock.patch.object(networkutils.NetworkUtils, '_get_event_wql_query')
def test_get_vnic_event_listener(self, mock_get_event_query,
mock_execute, mock_patcher):
event = mock.MagicMock()
port_class = self.netutils._conn.Msvm_SyntheticEthernetPortSettingData
wmi_event_listener = port_class.watch_for.return_value
mock_execute.side_effect = [exceptions.x_wmi_timed_out, event]
# callback will raise an exception in order to stop iteration in the
# listener.
callback = mock.MagicMock(side_effect=TypeError)
returned_listener = self.netutils.get_vnic_event_listener(
self.netutils.EVENT_TYPE_CREATE)
self.assertRaises(TypeError, returned_listener, callback)
mock_get_event_query.assert_called_once_with(
cls=self.netutils._VNIC_SET_DATA,
event_type=self.netutils.EVENT_TYPE_CREATE,
timeframe=2)
port_class.watch_for.assert_called_once_with(
mock_get_event_query.return_value)
mock_execute.assert_has_calls(
[mock.call(wmi_event_listener,
self.netutils._VNIC_LISTENER_TIMEOUT_MS)] * 2)
callback.assert_called_once_with(event.ElementName)
def test_get_event_wql_query(self):
expected = ("SELECT * FROM %(event_type)s WITHIN %(timeframe)s "
"WHERE TargetInstance ISA '%(class)s' AND "
"%(like)s" % {
'class': "FakeClass",
'event_type': self.netutils.EVENT_TYPE_CREATE,
'like': "TargetInstance.foo LIKE 'bar%'",
'timeframe': 2})
query = self.netutils._get_event_wql_query(
"FakeClass", self.netutils.EVENT_TYPE_CREATE, like=dict(foo="bar"))
self.assertEqual(expected, query)
def test_connect_vnic_to_vswitch_found(self):
self._test_connect_vnic_to_vswitch(True)
def test_connect_vnic_to_vswitch_not_found(self):
self._test_connect_vnic_to_vswitch(False)
def _test_connect_vnic_to_vswitch(self, found):
self.netutils._get_vnic_settings = mock.MagicMock()
if not found:
mock_vm = mock.MagicMock()
self.netutils._get_vm_from_res_setting_data = mock.MagicMock(
return_value=mock_vm)
self.netutils._add_virt_resource = mock.MagicMock()
else:
self.netutils._modify_virt_resource = mock.MagicMock()
self.netutils._get_vswitch = mock.MagicMock()
mock_port = self._mock_get_switch_port_alloc(found=found)
mock_port.HostResource = []
self.netutils.connect_vnic_to_vswitch(self._FAKE_VSWITCH_NAME,
self._FAKE_PORT_NAME)
if not found:
mock_add_resource = self.netutils._jobutils.add_virt_resource
mock_add_resource.assert_called_once_with(mock_port, mock_vm)
else:
mock_modify_resource = self.netutils._jobutils.modify_virt_resource
mock_modify_resource.assert_called_once_with(mock_port)
def test_connect_vnic_to_vswitch_already_connected(self):
mock_port = self._mock_get_switch_port_alloc()
mock_port.HostResource = [mock.sentinel.vswitch_path]
self.netutils.connect_vnic_to_vswitch(mock.sentinel.switch_name,
mock.sentinel.port_name)
self.assertFalse(self.netutils._jobutils.modify_virt_resource.called)
def _mock_get_switch_port_alloc(self, found=True):
mock_port = mock.MagicMock()
patched = mock.patch.object(
self.netutils, '_get_switch_port_allocation',
return_value=(mock_port, found))
patched.start()
self.addCleanup(patched.stop)
return mock_port
def test_get_vm_from_res_setting_data(self):
fake_res_set_instance_id = "Microsoft:GUID\\SpecificData"
fake_vm_set_instance_id = "Microsoft:GUID"
res_setting_data = mock.Mock(InstanceID=fake_res_set_instance_id)
conn = self.netutils._conn
mock_setting_data = conn.Msvm_VirtualSystemSettingData.return_value
resulted_vm = self.netutils._get_vm_from_res_setting_data(
res_setting_data)
conn.Msvm_VirtualSystemSettingData.assert_called_once_with(
InstanceID=fake_vm_set_instance_id)
conn.Msvm_ComputerSystem.assert_called_once_with(
Name=mock_setting_data[0].ConfigurationID)
expected_result = conn.Msvm_ComputerSystem.return_value[0]
self.assertEqual(expected_result, resulted_vm)
def test_remove_switch_port(self):
mock_sw_port = self._mock_get_switch_port_alloc()
self.netutils._switch_ports[self._FAKE_PORT_NAME] = mock_sw_port
self.netutils._vlan_sds[mock_sw_port.InstanceID] = mock.MagicMock()
self.netutils._jobutils.remove_virt_resource.side_effect = (
exceptions.x_wmi)
self.netutils.remove_switch_port(self._FAKE_PORT_NAME, False)
self.netutils._jobutils.remove_virt_resource.assert_called_once_with(
mock_sw_port)
self.assertNotIn(self._FAKE_PORT_NAME, self.netutils._switch_ports)
self.assertNotIn(mock_sw_port.InstanceID, self.netutils._vlan_sds)
@ddt.data(True, False)
def test_get_vswitch(self, enable_cache):
self.netutils._enable_cache = enable_cache
self.netutils._switches = {}
self.netutils._conn.Msvm_VirtualEthernetSwitch.return_value = [
self._FAKE_VSWITCH]
vswitch = self.netutils._get_vswitch(self._FAKE_VSWITCH_NAME)
expected_cache = ({self._FAKE_VSWITCH_NAME: self._FAKE_VSWITCH} if
enable_cache else {})
self.assertEqual(expected_cache, self.netutils._switches)
self.assertEqual(self._FAKE_VSWITCH, vswitch)
def test_get_vswitch_cache(self):
self.netutils._switches = {
self._FAKE_VSWITCH_NAME: mock.sentinel.vswitch}
vswitch = self.netutils._get_vswitch(self._FAKE_VSWITCH_NAME)
self.assertEqual(mock.sentinel.vswitch, vswitch)
def test_get_vswitch_not_found(self):
self.netutils._switches = {}
self.netutils._conn.Msvm_VirtualEthernetSwitch.return_value = []
self.assertRaises(exceptions.HyperVException,
self.netutils._get_vswitch,
self._FAKE_VSWITCH_NAME)
@mock.patch.object(networkutils.NetworkUtils,
'_prepare_profile_sd')
@mock.patch.object(networkutils.NetworkUtils,
'_get_profile_setting_data_from_port_alloc')
def _test_set_vswitch_port_profile_id(
self, mock_get_profile_setting_data_from_port_alloc,
mock_prepare_profile_sd, found, side_effect=None):
mock_port_profile = mock.MagicMock()
mock_new_port_profile = mock.MagicMock()
mock_port_alloc = self._mock_get_switch_port_alloc()
mock_add_feature = self.netutils._jobutils.add_virt_feature
mock_remove_feature = self.netutils._jobutils.remove_virt_feature
mock_get_profile_setting_data_from_port_alloc.return_value = (
mock_port_profile if found else None
)
mock_prepare_profile_sd.return_value = mock_new_port_profile
mock_add_feature.side_effect = side_effect
fake_params = {
"switch_port_name": self._FAKE_PORT_NAME,
"profile_id": mock.sentinel.profile_id,
"profile_data": mock.sentinel.profile_data,
"profile_name": mock.sentinel.profile_name,
"net_cfg_instance_id": None,
"cdn_label_id": None,
"cdn_label_string": None,
"vendor_id": None,
"vendor_name": mock.sentinel.vendor_name,
}
if side_effect:
self.assertRaises(
exceptions.HyperVException,
self.netutils.set_vswitch_port_profile_id,
**fake_params)
else:
self.netutils.set_vswitch_port_profile_id(**fake_params)
fake_params.pop("switch_port_name")
mock_prepare_profile_sd.assert_called_once_with(**fake_params)
if found:
mock_remove_feature.assert_called_once_with(mock_port_profile)
self.assertNotIn(self._FAKE_INSTANCE_ID,
self.netutils._profile_sds)
mock_get_profile_setting_data_from_port_alloc.assert_called_with(
mock_port_alloc)
self.assertNotIn(mock_port_alloc, self.netutils._profile_sds)
mock_add_feature.assert_called_once_with(mock_new_port_profile,
mock_port_alloc)
def test_set_vswitch_port_profile_id(self):
self._test_set_vswitch_port_profile_id(found=True)
def test_set_vswitch_port_profile_id_not_found(self):
self._test_set_vswitch_port_profile_id(found=False)
def test_set_vswitch_port_profile_id_failed(self):
self._test_set_vswitch_port_profile_id(found=False,
side_effect=Exception)
def test_set_vswitch_port_vlan_id_invalid_mode(self):
self.assertRaises(
AttributeError, self.netutils.set_vswitch_port_vlan_id,
mock.sentinel.vlan_id, mock.sentinel.switch_port_name,
operation_mode=mock.sentinel.invalid_mode)
def test_set_vswitch_port_vlan_id_access_mode_trunked(self):
self.assertRaises(
AttributeError, self.netutils.set_vswitch_port_vlan_id,
mock.sentinel.vlan_id, mock.sentinel.switch_port_name,
trunk_vlans=[mock.sentinel.vlan_id])
@mock.patch.object(networkutils.NetworkUtils,
'_prepare_vlan_sd_trunk_mode')
@mock.patch.object(networkutils.NetworkUtils,
'_prepare_vlan_sd_access_mode')
def _check_set_vswitch_port_vlan_id(self, mock_prepare_vlan_sd_access,
mock_prepare_vlan_sd_trunk,
op_mode=constants.VLAN_MODE_ACCESS,
missing_vlan=False):
mock_port = self._mock_get_switch_port_alloc(found=True)
old_vlan_settings = mock.MagicMock()
if missing_vlan:
side_effect = [old_vlan_settings, None]
else:
side_effect = [old_vlan_settings, old_vlan_settings]
self.netutils._get_vlan_setting_data_from_port_alloc = mock.MagicMock(
side_effect=side_effect)
mock_vlan_settings = mock.MagicMock()
mock_prepare_vlan_sd_access.return_value = mock_vlan_settings
mock_prepare_vlan_sd_trunk.return_value = mock_vlan_settings
if missing_vlan:
self.assertRaises(exceptions.HyperVException,
self.netutils.set_vswitch_port_vlan_id,
self._FAKE_VLAN_ID, self._FAKE_PORT_NAME,
operation_mode=op_mode)
else:
self.netutils.set_vswitch_port_vlan_id(
self._FAKE_VLAN_ID, self._FAKE_PORT_NAME,
operation_mode=op_mode)
if op_mode == constants.VLAN_MODE_ACCESS:
mock_prepare_vlan_sd_access.assert_called_once_with(
old_vlan_settings, self._FAKE_VLAN_ID)
else:
mock_prepare_vlan_sd_trunk.assert_called_once_with(
old_vlan_settings, self._FAKE_VLAN_ID, None)
mock_remove_feature = self.netutils._jobutils.remove_virt_feature
mock_remove_feature.assert_called_once_with(old_vlan_settings)
mock_add_feature = self.netutils._jobutils.add_virt_feature
mock_add_feature.assert_called_once_with(mock_vlan_settings, mock_port)
def test_set_vswitch_port_vlan_id_access(self):
self._check_set_vswitch_port_vlan_id()
def test_set_vswitch_port_vlan_id_trunk(self):
self._check_set_vswitch_port_vlan_id(op_mode=constants.VLAN_MODE_TRUNK)
def test_set_vswitch_port_vlan_id_missing(self):
self._check_set_vswitch_port_vlan_id(missing_vlan=True)
@mock.patch.object(networkutils.NetworkUtils,
'_prepare_vlan_sd_access_mode')
def test_set_vswitch_port_vlan_id_already_set(self, mock_prepare_vlan_sd):
self._mock_get_switch_port_alloc()
mock_prepare_vlan_sd.return_value = None
self.netutils.set_vswitch_port_vlan_id(mock.sentinel.vlan_id,
mock.sentinel.port_name)
mock_remove_feature = self.netutils._jobutils.remove_virt_feature
self.assertFalse(mock_remove_feature.called)
def test_prepare_vlan_sd_access_mode_already_set(self):
mock_vlan_sd = mock.MagicMock(OperationMode=constants.VLAN_MODE_ACCESS,
AccessVlanId=mock.sentinel.vlan_id)
actual_vlan_sd = self.netutils._prepare_vlan_sd_access_mode(
mock_vlan_sd, mock.sentinel.vlan_id)
self.assertIsNone(actual_vlan_sd)
@mock.patch.object(networkutils.NetworkUtils,
'_create_default_setting_data')
def test_prepare_vlan_sd_access_mode(self, mock_create_default_sd):
mock_vlan_sd = mock_create_default_sd.return_value
actual_vlan_sd = self.netutils._prepare_vlan_sd_access_mode(
None, mock.sentinel.vlan_id)
self.assertEqual(mock_vlan_sd, actual_vlan_sd)
self.assertEqual(mock.sentinel.vlan_id, mock_vlan_sd.AccessVlanId)
self.assertEqual(constants.VLAN_MODE_ACCESS,
mock_vlan_sd.OperationMode)
mock_create_default_sd.assert_called_once_with(
self.netutils._PORT_VLAN_SET_DATA)
def test_prepare_vlan_sd_trunk_mode_already_set(self):
mock_vlan_sd = mock.MagicMock(OperationMode=constants.VLAN_MODE_TRUNK,
NativeVlanId=mock.sentinel.vlan_id,
TrunkVlanIdArray=[100, 99])
actual_vlan_sd = self.netutils._prepare_vlan_sd_trunk_mode(
mock_vlan_sd, None, [99, 100])
self.assertIsNone(actual_vlan_sd)
@mock.patch.object(networkutils.NetworkUtils,
'_create_default_setting_data')
def test_prepare_vlan_sd_trunk_mode(self, mock_create_default_sd):
mock_vlan_sd = mock_create_default_sd.return_value
actual_vlan_sd = self.netutils._prepare_vlan_sd_trunk_mode(
None, mock.sentinel.vlan_id, mock.sentinel.trunk_vlans)
self.assertEqual(mock_vlan_sd, actual_vlan_sd)
self.assertEqual(mock.sentinel.vlan_id, mock_vlan_sd.NativeVlanId)
self.assertEqual(mock.sentinel.trunk_vlans,
mock_vlan_sd.TrunkVlanIdArray)
self.assertEqual(constants.VLAN_MODE_TRUNK, mock_vlan_sd.OperationMode)
mock_create_default_sd.assert_called_once_with(
self.netutils._PORT_VLAN_SET_DATA)
@mock.patch.object(networkutils.NetworkUtils,
'_set_switch_port_security_settings')
def test_set_vswitch_port_vsid(self, mock_set_port_sec_settings):
self.netutils.set_vswitch_port_vsid(mock.sentinel.vsid,
mock.sentinel.switch_port_name)
mock_set_port_sec_settings.assert_called_once_with(
mock.sentinel.switch_port_name, VirtualSubnetId=mock.sentinel.vsid)
@mock.patch.object(networkutils.NetworkUtils,
'_set_switch_port_security_settings')
def test_set_vswitch_port_mac_spoofing(self, mock_set_port_sec_settings):
self.netutils.set_vswitch_port_mac_spoofing(
mock.sentinel.switch_port_name, mock.sentinel.state)
mock_set_port_sec_settings.assert_called_once_with(
mock.sentinel.switch_port_name,
AllowMacSpoofing=mock.sentinel.state)
@mock.patch.object(networkutils.NetworkUtils,
'_get_security_setting_data_from_port_alloc')
@mock.patch.object(networkutils.NetworkUtils,
'_create_default_setting_data')
def _check_set_switch_port_security_settings(self, mock_create_default_sd,
mock_get_security_sd,
missing_sec=False):
mock_port_alloc = self._mock_get_switch_port_alloc()
mock_sec_settings = mock.MagicMock()
mock_get_security_sd.return_value = (
None if missing_sec else mock_sec_settings)
mock_create_default_sd.return_value = mock_sec_settings
if missing_sec:
self.assertRaises(exceptions.HyperVException,
self.netutils._set_switch_port_security_settings,
mock.sentinel.switch_port_name,
VirtualSubnetId=mock.sentinel.vsid)
mock_create_default_sd.assert_called_once_with(
self.netutils._PORT_SECURITY_SET_DATA)
else:
self.netutils._set_switch_port_security_settings(
mock.sentinel.switch_port_name,
VirtualSubnetId=mock.sentinel.vsid)
mock_remove_feature = self.netutils._jobutils.remove_virt_feature
mock_remove_feature.assert_called_once_with(mock_sec_settings)
self.assertEqual(mock.sentinel.vsid,
mock_sec_settings.VirtualSubnetId)
mock_add_feature = self.netutils._jobutils.add_virt_feature
mock_add_feature.assert_called_once_with(mock_sec_settings,
mock_port_alloc)
def test_set_switch_port_security_settings(self):
self._check_set_switch_port_security_settings()
def test_set_switch_port_security_settings_missing(self):
self._check_set_switch_port_security_settings(missing_sec=True)
@mock.patch.object(networkutils.NetworkUtils,
'_get_security_setting_data_from_port_alloc')
def test_set_switch_port_security_settings_already_set(self,
mock_get_sec_sd):
self._mock_get_switch_port_alloc()
mock_sec_sd = mock.MagicMock(VirtualSubnetId=mock.sentinel.vsid,
AllowMacSpoofing=mock.sentinel.state)
mock_get_sec_sd.return_value = mock_sec_sd
self.netutils._set_switch_port_security_settings(
mock.sentinel.switch_port_name,
VirtualSubnetId=mock.sentinel.vsid,
AllowMacSpoofing=mock.sentinel.state)
self.assertFalse(self.netutils._jobutils.remove_virt_feature.called)
self.assertFalse(self.netutils._jobutils.add_virt_feature.called)
@mock.patch.object(_wqlutils, 'get_element_associated_class')
def test_set_vswitch_port_vsid_already_set(self, mock_get_elem_assoc_cls):
self._mock_get_switch_port_alloc()
mock_sec_settings = mock.MagicMock(
AllowMacSpoofing=mock.sentinel.state)
mock_get_elem_assoc_cls.return_value = (mock_sec_settings, True)
self.netutils.set_vswitch_port_mac_spoofing(
mock.sentinel.switch_port_name, mock.sentinel.state)
self.assertFalse(self.netutils._jobutils.add_virt_feature.called)
@mock.patch.object(networkutils.NetworkUtils,
'_get_setting_data_from_port_alloc')
def test_get_profile_setting_data_from_port_alloc(self, mock_get_sd):
result = self.netutils._get_profile_setting_data_from_port_alloc(
mock.sentinel.port)
self.assertEqual(mock_get_sd.return_value, result)
mock_get_sd.assert_called_once_with(
mock.sentinel.port, self.netutils._profile_sds,
self.netutils._PORT_PROFILE_SET_DATA)
@mock.patch.object(networkutils.NetworkUtils,
'_get_setting_data_from_port_alloc')
def test_get_vlan_setting_data_from_port_alloc(self, mock_get_sd):
mock_port = mock.MagicMock()
result = self.netutils._get_vlan_setting_data_from_port_alloc(
mock_port)
self.assertEqual(mock_get_sd.return_value, result)
mock_get_sd.assert_called_once_with(mock_port, self.netutils._vsid_sds,
self.netutils._PORT_VLAN_SET_DATA)
@mock.patch.object(networkutils.NetworkUtils,
'_get_setting_data_from_port_alloc')
def test_get_security_setting_data_from_port_alloc(self, mock_get_sd):
mock_port = mock.MagicMock()
result = self.netutils._get_security_setting_data_from_port_alloc(
mock_port)
self.assertEqual(mock_get_sd.return_value, result)
mock_get_sd.assert_called_once_with(
mock_port, self.netutils._vsid_sds,
self.netutils._PORT_SECURITY_SET_DATA)
@mock.patch.object(networkutils.NetworkUtils,
'_get_setting_data_from_port_alloc')
def test_get_bandwidth_setting_data_from_port_alloc(self, mock_get_sd):
mock_port = mock.MagicMock()
result = self.netutils._get_bandwidth_setting_data_from_port_alloc(
mock_port)
self.assertEqual(mock_get_sd.return_value, result)
mock_get_sd.assert_called_once_with(
mock_port, self.netutils._bandwidth_sds,
self.netutils._PORT_BANDWIDTH_SET_DATA)
def test_get_setting_data_from_port_alloc_cached(self):
mock_port = mock.MagicMock(InstanceID=mock.sentinel.InstanceID)
cache = {mock_port.InstanceID: mock.sentinel.sd_object}
result = self.netutils._get_setting_data_from_port_alloc(
mock_port, cache, mock.sentinel.data_class)
self.assertEqual(mock.sentinel.sd_object, result)
@ddt.data(True, False)
@mock.patch.object(_wqlutils, 'get_element_associated_class')
def test_get_setting_data_from_port_alloc(self, enable_cache,
mock_get_elem_assoc_cls):
self.netutils._enable_cache = enable_cache
sd_object = mock.MagicMock()
mock_port = mock.MagicMock(InstanceID=mock.sentinel.InstanceID)
mock_get_elem_assoc_cls.return_value = [sd_object]
cache = {}
result = self.netutils._get_setting_data_from_port_alloc(
mock_port, cache, mock.sentinel.data_class)
mock_get_elem_assoc_cls.assert_called_once_with(
self.netutils._conn, mock.sentinel.data_class,
element_instance_id=mock.sentinel.InstanceID)
self.assertEqual(sd_object, result)
expected_cache = ({mock.sentinel.InstanceID: sd_object}
if enable_cache else {})
self.assertEqual(expected_cache, cache)
def test_get_switch_port_allocation_cached(self):
self.netutils._switch_ports[mock.sentinel.port_name] = (
mock.sentinel.port)
port, found = self.netutils._get_switch_port_allocation(
mock.sentinel.port_name)
self.assertEqual(mock.sentinel.port, port)
self.assertTrue(found)
@ddt.data(True, False)
@mock.patch.object(networkutils.NetworkUtils, '_get_setting_data')
def test_get_switch_port_allocation(self, enable_cache, mock_get_set_data):
self.netutils._enable_cache = enable_cache
self.netutils._switch_ports = {}
mock_get_set_data.return_value = (mock.sentinel.port, True)
port, found = self.netutils._get_switch_port_allocation(
mock.sentinel.port_name)
self.assertEqual(mock.sentinel.port, port)
self.assertTrue(found)
expected_cache = ({mock.sentinel.port_name: port}
if enable_cache else {})
self.assertEqual(expected_cache, self.netutils._switch_ports)
mock_get_set_data.assert_called_once_with(
self.netutils._PORT_ALLOC_SET_DATA, mock.sentinel.port_name, False)
@mock.patch.object(networkutils.NetworkUtils, '_get_setting_data')
def test_get_switch_port_allocation_expected(self, mock_get_set_data):
self.netutils._switch_ports = {}
mock_get_set_data.return_value = (None, False)
self.assertRaises(exceptions.HyperVPortNotFoundException,
self.netutils._get_switch_port_allocation,
mock.sentinel.port_name, expected=True)
mock_get_set_data.assert_called_once_with(
self.netutils._PORT_ALLOC_SET_DATA, mock.sentinel.port_name, False)
def test_get_setting_data(self):
self.netutils._get_first_item = mock.MagicMock(return_value=None)
mock_data = mock.MagicMock()
self.netutils._get_default_setting_data = mock.MagicMock(
return_value=mock_data)
ret_val = self.netutils._get_setting_data(self._FAKE_CLASS_NAME,
self._FAKE_ELEMENT_NAME,
True)
self.assertEqual(ret_val, (mock_data, False))
def test_create_default_setting_data(self):
result = self.netutils._create_default_setting_data('FakeClass')
fake_class = self.netutils._conn.FakeClass
self.assertEqual(fake_class.new.return_value, result)
fake_class.new.assert_called_once_with()
def test_add_metrics_collection_acls(self):
mock_port = self._mock_get_switch_port_alloc()
mock_acl = mock.MagicMock()
with mock.patch.multiple(
self.netutils,
_create_default_setting_data=mock.Mock(
return_value=mock_acl)):
self.netutils.add_metrics_collection_acls(self._FAKE_PORT_NAME)
mock_add_feature = self.netutils._jobutils.add_virt_feature
actual_calls = len(mock_add_feature.mock_calls)
self.assertEqual(4, actual_calls)
mock_add_feature.assert_called_with(mock_acl, mock_port)
@mock.patch.object(networkutils.NetworkUtils, '_is_port_vm_started')
def test_is_metrics_collection_allowed_true(self, mock_is_started):
mock_acl = mock.MagicMock()
mock_acl.Action = self.netutils._ACL_ACTION_METER
self._test_is_metrics_collection_allowed(
mock_vm_started=mock_is_started,
acls=[mock_acl, mock_acl],
expected_result=True)
@mock.patch.object(networkutils.NetworkUtils, '_is_port_vm_started')
def test_test_is_metrics_collection_allowed_false(self, mock_is_started):
self._test_is_metrics_collection_allowed(
mock_vm_started=mock_is_started,
acls=[],
expected_result=False)
@mock.patch.object(_wqlutils, 'get_element_associated_class')
def _test_is_metrics_collection_allowed(self, mock_get_elem_assoc_cls,
mock_vm_started, acls,
expected_result):
mock_port = self._mock_get_switch_port_alloc()
mock_acl = mock.MagicMock()
mock_acl.Action = self.netutils._ACL_ACTION_METER
mock_get_elem_assoc_cls.return_value = acls
mock_vm_started.return_value = True
result = self.netutils.is_metrics_collection_allowed(
self._FAKE_PORT_NAME)
self.assertEqual(expected_result, result)
mock_get_elem_assoc_cls.assert_called_once_with(
self.netutils._conn, self.netutils._PORT_ALLOC_ACL_SET_DATA,
element_instance_id=mock_port.InstanceID)
def test_is_port_vm_started_true(self):
self._test_is_port_vm_started(self.netutils._HYPERV_VM_STATE_ENABLED,
True)
def test_is_port_vm_started_false(self):
self._test_is_port_vm_started(self._FAKE_HYPERV_VM_STATE, False)
def _test_is_port_vm_started(self, vm_state, expected_result):
mock_svc = self.netutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_port = mock.MagicMock()
mock_vmsettings = mock.MagicMock()
mock_summary = mock.MagicMock()
mock_summary.EnabledState = vm_state
mock_vmsettings.path_.return_value = self._FAKE_RES_PATH
self.netutils._conn.Msvm_VirtualSystemSettingData.return_value = [
mock_vmsettings]
mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL,
[mock_summary])
result = self.netutils._is_port_vm_started(mock_port)
self.assertEqual(expected_result, result)
mock_svc.GetSummaryInformation.assert_called_once_with(
[self.netutils._VM_SUMMARY_ENABLED_STATE],
[self._FAKE_RES_PATH])
@mock.patch.object(_wqlutils, 'get_element_associated_class')
@mock.patch.object(networkutils.NetworkUtils, '_bind_security_rules')
def test_create_security_rules(self, mock_bind, mock_get_elem_assoc_cls):
(m_port, m_acl) = self._setup_security_rule_test(
mock_get_elem_assoc_cls)
fake_rule = mock.MagicMock()
self.netutils.create_security_rules(self._FAKE_PORT_NAME, fake_rule)
mock_bind.assert_called_once_with(m_port, fake_rule)
@mock.patch.object(_wqlutils, 'get_element_associated_class')
@mock.patch.object(networkutils.NetworkUtils, '_create_security_acl')
@mock.patch.object(networkutils.NetworkUtils, '_get_new_weights')
@mock.patch.object(networkutils.NetworkUtils, '_filter_security_acls')
def test_bind_security_rules(self, mock_filtered_acls, mock_get_weights,
mock_create_acl, mock_get_elem_assoc_cls):
m_port = mock.MagicMock()
m_acl = mock.MagicMock()
mock_get_elem_assoc_cls.return_value = [m_acl]
mock_filtered_acls.return_value = []
mock_get_weights.return_value = [mock.sentinel.FAKE_WEIGHT]
mock_create_acl.return_value = m_acl
fake_rule = mock.MagicMock()
self.netutils._bind_security_rules(m_port, [fake_rule])
mock_create_acl.assert_called_once_with(fake_rule,
mock.sentinel.FAKE_WEIGHT)
mock_add_features = self.netutils._jobutils.add_multiple_virt_features
mock_add_features.assert_called_once_with([m_acl], m_port)
mock_get_elem_assoc_cls.assert_called_once_with(
self.netutils._conn, self.netutils._PORT_EXT_ACL_SET_DATA,
element_instance_id=m_port.InstanceID)
@mock.patch.object(_wqlutils, 'get_element_associated_class')
@mock.patch.object(networkutils.NetworkUtils, '_get_new_weights')
@mock.patch.object(networkutils.NetworkUtils, '_filter_security_acls')
def test_bind_security_rules_existent(self, mock_filtered_acls,
mock_get_weights,
mock_get_elem_assoc_cls):
m_port = mock.MagicMock()
m_acl = mock.MagicMock()
mock_get_elem_assoc_cls.return_value = [m_acl]
mock_filtered_acls.return_value = [m_acl]
fake_rule = mock.MagicMock()
self.netutils._bind_security_rules(m_port, [fake_rule])
mock_filtered_acls.assert_called_once_with(fake_rule, [m_acl])
mock_get_weights.assert_called_once_with([fake_rule], [m_acl])
mock_get_elem_assoc_cls.assert_called_once_with(
self.netutils._conn, self.netutils._PORT_EXT_ACL_SET_DATA,
element_instance_id=m_port.InstanceID)
def test_get_port_security_acls_cached(self):
mock_port = mock.MagicMock(ElementName=mock.sentinel.port_name)
self.netutils._sg_acl_sds = {
mock.sentinel.port_name: [mock.sentinel.fake_acl]}
acls = self.netutils._get_port_security_acls(mock_port)
self.assertEqual([mock.sentinel.fake_acl], acls)
@ddt.data(True, False)
@mock.patch.object(_wqlutils, 'get_element_associated_class')
def test_get_port_security_acls(self, enable_cache,
mock_get_elem_assoc_cls):
self.netutils._enable_cache = enable_cache
self.netutils._sg_acl_sds = {}
mock_port = mock.MagicMock()
mock_get_elem_assoc_cls.return_value = [mock.sentinel.fake_acl]
acls = self.netutils._get_port_security_acls(mock_port)
self.assertEqual([mock.sentinel.fake_acl], acls)
expected_cache = ({mock_port.ElementName: [mock.sentinel.fake_acl]}
if enable_cache else {})
self.assertEqual(expected_cache,
self.netutils._sg_acl_sds)
mock_get_elem_assoc_cls.assert_called_once_with(
self.netutils._conn, self.netutils._PORT_EXT_ACL_SET_DATA,
element_instance_id=mock_port.InstanceID)
@mock.patch.object(_wqlutils, 'get_element_associated_class')
@mock.patch.object(networkutils.NetworkUtils, '_filter_security_acls')
def test_remove_security_rules(self, mock_filter, mock_get_elem_assoc_cls):
mock_acl = self._setup_security_rule_test(mock_get_elem_assoc_cls)[1]
fake_rule = mock.MagicMock()
mock_filter.return_value = [mock_acl]
self.netutils.remove_security_rules(self._FAKE_PORT_NAME, [fake_rule])
mock_remove_features = (
self.netutils._jobutils.remove_multiple_virt_features)
mock_remove_features.assert_called_once_with([mock_acl])
@mock.patch.object(_wqlutils, 'get_element_associated_class')
def test_remove_all_security_rules(self, mock_get_elem_assoc_cls):
mock_acl = self._setup_security_rule_test(mock_get_elem_assoc_cls)[1]
self.netutils.remove_all_security_rules(self._FAKE_PORT_NAME)
mock_remove_features = (
self.netutils._jobutils.remove_multiple_virt_features)
mock_remove_features.assert_called_once_with([mock_acl])
@mock.patch.object(networkutils.NetworkUtils,
'_create_default_setting_data')
def test_create_security_acl(self, mock_get_set_data):
mock_acl = mock_get_set_data.return_value
fake_rule = mock.MagicMock()
fake_rule.to_dict.return_value = {"Action": self._FAKE_ACL_ACT}
self.netutils._create_security_acl(fake_rule, self._FAKE_WEIGHT)
mock_acl.set.assert_called_once_with(Action=self._FAKE_ACL_ACT)
def _setup_security_rule_test(self, mock_get_elem_assoc_cls):
mock_port = self._mock_get_switch_port_alloc()
mock_acl = mock.MagicMock()
mock_get_elem_assoc_cls.return_value = [mock_acl]
self.netutils._filter_security_acls = mock.MagicMock(
return_value=[mock_acl])
return (mock_port, mock_acl)
def test_filter_acls(self):
mock_acl = mock.MagicMock()
mock_acl.Action = self._FAKE_ACL_ACT
mock_acl.Applicability = self.netutils._ACL_APPLICABILITY_LOCAL
mock_acl.Direction = self._FAKE_ACL_DIR
mock_acl.AclType = self._FAKE_ACL_TYPE
mock_acl.RemoteAddress = self._FAKE_REMOTE_ADDR
acls = [mock_acl, mock_acl]
good_acls = self.netutils._filter_acls(
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR,
self._FAKE_ACL_TYPE, self._FAKE_REMOTE_ADDR)
bad_acls = self.netutils._filter_acls(
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE)
self.assertEqual(acls, good_acls)
self.assertEqual([], bad_acls)
def test_get_new_weights_allow(self):
actual = self.netutils._get_new_weights([mock.ANY, mock.ANY], mock.ANY)
self.assertEqual([0, 0], actual)
@mock.patch.object(networkutils.NetworkUtils,
'_get_bandwidth_setting_data_from_port_alloc')
@mock.patch.object(networkutils.NetworkUtils,
'_get_default_setting_data')
def test_set_port_qos_rule_hyperv_exc(self, mock_get_default_sd,
mock_get_bandwidth_sd):
mock_port_alloc = self._mock_get_switch_port_alloc()
self.netutils._bandwidth_sds = {
mock_port_alloc.InstanceID: mock.sentinel.InstanceID}
mock_remove_feature = self.netutils._jobutils.remove_virt_feature
mock_add_feature = self.netutils._jobutils.add_virt_feature
mock_add_feature.side_effect = exceptions.HyperVException
qos_rule = dict(min_kbps=20000, max_kbps=30000,
max_burst_kbps=40000, max_burst_size_kb=50000)
self.assertRaises(exceptions.HyperVException,
self.netutils.set_port_qos_rule,
mock.sentinel.port_id, qos_rule)
mock_get_bandwidth_sd.assert_called_once_with(mock_port_alloc)
mock_get_default_sd.assert_called_once_with(
self.netutils._PORT_BANDWIDTH_SET_DATA)
mock_remove_feature.assert_called_once_with(
mock_get_bandwidth_sd.return_value)
mock_add_feature.assert_called_once_with(
mock_get_default_sd.return_value, mock_port_alloc)
bw = mock_get_default_sd.return_value
self.assertEqual(qos_rule['min_kbps'] * units.Ki,
bw.Reservation)
self.assertEqual(qos_rule['max_kbps'] * units.Ki,
bw.Limit)
self.assertEqual(qos_rule['max_burst_kbps'] * units.Ki,
bw.BurstLimit)
self.assertEqual(qos_rule['max_burst_size_kb'] * units.Ki,
bw.BurstSize)
self.assertNotIn(mock_port_alloc.InstanceID,
self.netutils._bandwidth_sds)
@ddt.data({'min_kbps': 100},
{'min_kbps': 10 * units.Ki, 'max_kbps': 100},
{'max_kbps': 10 * units.Ki, 'max_burst_kbps': 100})
def test_set_port_qos_rule_invalid_params_exception(self, qos_rule):
self.assertRaises(exceptions.InvalidParameterValue,
self.netutils.set_port_qos_rule,
mock.sentinel.port_id,
qos_rule)
@mock.patch.object(networkutils.NetworkUtils,
'_get_bandwidth_setting_data_from_port_alloc')
@mock.patch.object(networkutils.NetworkUtils,
'_get_default_setting_data')
def test_set_port_qos_rule_invalid_qos_rule_exc(self, mock_get_default_sd,
mock_get_bandwidth_sd):
self._mock_get_switch_port_alloc()
mock_add_feature = self.netutils._jobutils.add_virt_feature
mock_add_feature.side_effect = exceptions.InvalidParameterValue(
'0x80070057')
qos_rule = dict(min_kbps=20000, max_kbps=30000,
max_burst_kbps=40000, max_burst_size_kb=50000)
self.assertRaises(exceptions.InvalidParameterValue,
self.netutils.set_port_qos_rule,
mock.sentinel.port_id, qos_rule)
def test_set_empty_port_qos_rule(self):
self._mock_get_switch_port_alloc()
self.netutils.set_port_qos_rule(mock.sentinel.port_id, {})
self.assertFalse(self.netutils._get_switch_port_allocation.called)
@mock.patch.object(networkutils.NetworkUtils,
'_get_bandwidth_setting_data_from_port_alloc')
def test_remove_port_qos_rule(self, mock_get_bandwidth_sd):
mock_port_alloc = self._mock_get_switch_port_alloc()
mock_bandwidth_settings = mock_get_bandwidth_sd.return_value
self.netutils.remove_port_qos_rule(mock.sentinel.port_id)
mock_get_bandwidth_sd.assert_called_once_with(mock_port_alloc)
mock_remove_feature = self.netutils._jobutils.remove_virt_feature
mock_remove_feature.assert_called_once_with(
mock_bandwidth_settings)
@mock.patch.object(networkutils.NetworkUtils,
'_create_default_setting_data')
def test_prepare_profile_sd(self, mock_create_default_sd):
mock_profile_sd = mock_create_default_sd.return_value
actual_profile_sd = self.netutils._prepare_profile_sd(
profile_id=mock.sentinel.profile_id,
profile_data=mock.sentinel.profile_data,
profile_name=mock.sentinel.profile_name,
net_cfg_instance_id=mock.sentinel.net_cfg_instance_id,
cdn_label_id=mock.sentinel.cdn_label_id,
cdn_label_string=mock.sentinel.cdn_label_string,
vendor_id=mock.sentinel.vendor_id,
vendor_name=mock.sentinel.vendor_name)
self.assertEqual(mock_profile_sd, actual_profile_sd)
self.assertEqual(mock.sentinel.profile_id,
mock_profile_sd.ProfileId)
self.assertEqual(mock.sentinel.profile_data,
mock_profile_sd.ProfileData)
self.assertEqual(mock.sentinel.profile_name,
mock_profile_sd.ProfileName)
self.assertEqual(mock.sentinel.net_cfg_instance_id,
mock_profile_sd.NetCfgInstanceId)
self.assertEqual(mock.sentinel.cdn_label_id,
mock_profile_sd.CdnLabelId)
self.assertEqual(mock.sentinel.cdn_label_string,
mock_profile_sd.CdnLabelString)
self.assertEqual(mock.sentinel.vendor_id,
mock_profile_sd.VendorId)
self.assertEqual(mock.sentinel.vendor_name,
mock_profile_sd.VendorName)
mock_create_default_sd.assert_called_once_with(
self.netutils._PORT_PROFILE_SET_DATA)
@mock.patch.object(networkutils.NetworkUtils,
'_create_default_setting_data')
def test_prepare_profile_sd_failed(self, mock_create_default_sd):
self.assertRaises(TypeError, self.netutils._prepare_profile_sd,
invalid_argument=mock.sentinel.invalid_argument)
class TestNetworkUtilsR2(test_base.OsWinBaseTestCase):
def setUp(self):
super(TestNetworkUtilsR2, self).setUp()
self.netutils = networkutils.NetworkUtilsR2()
self.netutils._conn_attr = mock.MagicMock()
@mock.patch.object(networkutils.NetworkUtilsR2,
'_create_default_setting_data')
def test_create_security_acl(self, mock_create_default_setting_data):
sg_rule = mock.MagicMock()
sg_rule.to_dict.return_value = {}
acl = self.netutils._create_security_acl(sg_rule, mock.sentinel.weight)
self.assertEqual(mock.sentinel.weight, acl.Weight)
def test_get_new_weights_no_acls_deny(self):
mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_DENY)
actual = self.netutils._get_new_weights([mock_rule], [])
self.assertEqual([1], actual)
def test_get_new_weights_no_acls_allow(self):
mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW)
actual = self.netutils._get_new_weights([mock_rule, mock_rule], [])
expected = [self.netutils._MAX_WEIGHT - 1,
self.netutils._MAX_WEIGHT - 2]
self.assertEqual(expected, actual)
def test_get_new_weights_deny(self):
mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_DENY)
mockacl1 = mock.MagicMock(Action=self.netutils._ACL_ACTION_DENY,
Weight=1)
mockacl2 = mock.MagicMock(Action=self.netutils._ACL_ACTION_DENY,
Weight=3)
actual = self.netutils._get_new_weights([mock_rule, mock_rule],
[mockacl1, mockacl2])
self.assertEqual([2, 4], actual)
def test_get_new_weights_allow(self):
mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW)
mockacl = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW,
Weight=self.netutils._MAX_WEIGHT - 3)
actual = self.netutils._get_new_weights([mock_rule, mock_rule],
[mockacl])
expected = [self.netutils._MAX_WEIGHT - 4,
self.netutils._MAX_WEIGHT - 5]
self.assertEqual(expected, actual)
def test_get_new_weights_search_available(self):
mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW)
mockacl1 = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW,
Weight=self.netutils._REJECT_ACLS_COUNT + 1)
mockacl2 = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW,
Weight=self.netutils._MAX_WEIGHT - 1)
actual = self.netutils._get_new_weights([mock_rule],
[mockacl1, mockacl2])
self.assertEqual([self.netutils._MAX_WEIGHT - 2], actual)
| 45.338737 | 79 | 0.684476 |
4a23d644a6962d6ea7eadcf81a11f2f15a39da40 | 3,714 | py | Python | api/cases/libraries/advice.py | uktrade/lite-ap | 4e1a57956bd921992b4a6e2b8fbacbba5720960d | [
"MIT"
] | 3 | 2019-05-15T09:30:39.000Z | 2020-04-22T16:14:23.000Z | api/cases/libraries/advice.py | uktrade/lite-ap | 4e1a57956bd921992b4a6e2b8fbacbba5720960d | [
"MIT"
] | 85 | 2019-04-24T10:39:35.000Z | 2022-03-21T14:52:12.000Z | api/cases/libraries/advice.py | uktrade/lite-ap | 4e1a57956bd921992b4a6e2b8fbacbba5720960d | [
"MIT"
] | 1 | 2021-01-17T11:12:19.000Z | 2021-01-17T11:12:19.000Z | from collections import defaultdict
from api.cases.enums import AdviceType
from api.cases.models import Advice
from api.goods.enums import PvGrading
def group_advice(case, advice, user, new_level):
advice_entities = {entity_field: defaultdict(list) for entity_field in Advice.ENTITY_FIELDS}
for advice in advice:
advice_entities[advice.entity_field][advice.entity].append(advice)
for entity_field in Advice.ENTITY_FIELDS:
collate_advice(entity_field, new_level, advice_entities[entity_field], case, user)
def collate_advice(entity_field, new_level, collection, case, user):
for key, advice_list in collection.items():
denial_reasons = []
advice = construct_coalesced_advice_values(
deduplicate_advice(advice_list), case, user.govuser, denial_reasons=denial_reasons
)
# Set outside the constructor so it can apply only when necessary
advice.team = user.govuser.team
advice.level = new_level
setattr(advice, entity_field, key)
advice.save()
advice.denial_reasons.set(denial_reasons)
def deduplicate_advice(advice_list):
"""
This examines each piece of data in a set of advice for an object
and if there are any exact duplicates it only returns one of them.
"""
deduplicated = []
matches = False
for advice in advice_list:
for item in deduplicated:
# Compare each piece of unique advice against the new piece of advice being introduced
matches = advice.equals(item)
break
if not matches:
deduplicated.append(advice)
return deduplicated
def construct_coalesced_advice_values(
deduplicated_advice, case, user, denial_reasons, advice_type=None,
):
fields = {
"text": set(),
"note": set(),
"pv_grading": set(),
"proviso": set(),
"collated_pv_grading": set(),
"footnote": set(),
}
break_text = "\n-------\n"
for advice in deduplicated_advice:
for denial_reason in advice.denial_reasons.values_list("id", flat=True):
denial_reasons.append(denial_reason)
if advice_type:
if advice_type != advice.type:
if {advice_type, advice.type} == {AdviceType.APPROVE, AdviceType.PROVISO}:
advice_type = AdviceType.PROVISO
else:
advice_type = AdviceType.CONFLICTING
else:
advice_type = advice.type
for field in fields:
if getattr(advice, field):
fields[field].add(getattr(advice, field))
advice_types = set([a.type for a in deduplicated_advice])
if len(advice_types) == 1:
advice_type = deduplicated_advice[0].type
elif advice_types == {AdviceType.NO_LICENCE_REQUIRED, AdviceType.APPROVE}:
advice_type = AdviceType.NO_LICENCE_REQUIRED
elif advice_types == {AdviceType.PROVISO, AdviceType.APPROVE}:
advice_type = AdviceType.PROVISO
else:
advice_type = AdviceType.CONFLICTING
pv_grading = (
break_text.join([PvGrading.to_str(pv_grading) for pv_grading in fields["pv_grading"]])
if fields["pv_grading"]
else list(fields["collated_pv_grading"])[0]
if fields["collated_pv_grading"]
else None
)
return Advice(
text=break_text.join(fields["text"]),
case=case,
note=break_text.join(fields["note"]),
proviso=break_text.join(fields["proviso"]),
user=user,
type=advice_type,
collated_pv_grading=pv_grading,
footnote=break_text.join(fields["footnote"]),
footnote_required=len(fields["footnote"]) > 0,
)
| 34.073394 | 98 | 0.655358 |
4a23d6d7c47dc749445a0953aab2753edece7db2 | 7,343 | py | Python | pytvision/datasets/datasets.py | HelenGuohx/cv-ferattn-code | faa9b7850fe2a0f8c08193bb129b5fec4639d616 | [
"MIT"
] | 1 | 2020-12-22T02:51:37.000Z | 2020-12-22T02:51:37.000Z | pytvision/datasets/datasets.py | HelenGuohx/cv-ferattn-code | faa9b7850fe2a0f8c08193bb129b5fec4639d616 | [
"MIT"
] | 1 | 2021-05-18T11:46:25.000Z | 2021-05-18T11:46:25.000Z | pytvision/datasets/datasets.py | HelenGuohx/cv-ferattn-code | faa9b7850fe2a0f8c08193bb129b5fec4639d616 | [
"MIT"
] | null | null | null | import numpy as np
import random
import torch
from . import utility
from ..transforms.aumentation import ObjectImageAndLabelTransform
import warnings
warnings.filterwarnings("ignore")
class Dataset( object ):
"""
Generic dataset
"""
def __init__(self,
data,
num_channels=1,
count=None,
transform=None
):
"""
Initialization
Args:
@data: dataprovide class
@num_channels:
@tranform: tranform
"""
if count is None: count = len(data)
self.count = count
self.data = data
self.num_channels=num_channels
self.transform = transform
self.labels = data.labels
self.classes = np.unique(self.labels)
self.numclass = len(self.classes)
def __len__(self):
return self.count
def __getitem__(self, idx):
idx = idx % len(self.data)
image, label = self.data[idx]
image = np.array(image)
image = utility.to_channels(image, self.num_channels)
label = utility.to_one_hot(label, self.numclass)
obj = ObjectImageAndLabelTransform( image, label )
if self.transform:
obj = self.transform( obj )
return obj.to_dict()
class ResampleDataset( object ):
r"""Resample data for generic dataset
Args:
data : dataloader class
num_channels : number of the channels
count : size of dataset
tranform : tranform
"""
def __init__(self,
data,
num_channels=1,
count=200,
transform=None
):
self.num_channels=num_channels
self.data = data
self.transform = transform
self.labels = data.labels
self.count=count
#self.classes = np.unique(self.labels)
self.classes, self.frecs = np.unique(self.labels, return_counts=True)
self.numclass = len(self.classes)
#self.weights = 1-(self.frecs/np.sum(self.frecs))
self.weights = np.ones( (self.numclass,1) )
self.reset(self.weights)
self.labels_index = list()
for cl in range( self.numclass ):
indx = np.where(self.labels==cl)[0]
self.labels_index.append(indx)
def reset(self, weights):
self.dist_of_classes = np.array(random.choices(self.classes, weights=weights, k=self.count ))
def __len__(self):
return self.count
def __getitem__(self, idx):
idx = self.dist_of_classes[ idx ]
class_index = self.labels_index[idx]
n = len(class_index)
idx = class_index[ random.randint(0,n-1) ]
image, label = self.data[idx]
image = np.array(image)
image = utility.to_channels(image, self.num_channels)
label = utility.to_one_hot(label, self.numclass)
obj = ObjectImageAndLabelTransform( image, label )
if self.transform:
obj = self.transform( obj )
return obj.to_dict()
class ODDataset( object ):
r"""Abstract generator class for object detection.
Args:
batch_size : The size of the batches to generate.
shuffle_groups : If True, shuffles the groups each epoch.
image_min_side : After resizing the minimum side of an image is equal to image_min_side.
image_max_side : If after resizing the maximum side is larger than image_max_side, scales down further so that the max side is equal to image_max_side.
transform_parameters : The transform parameters used for data augmentation.
compute_anchor_targets : Function handler for computing the targets of anchors for an image and its annotations.
compute_shapes : Function handler for computing the shapes of the pyramid for a given input.
"""
def __init__(
self,
batch_size=1,
shuffle_groups=True,
image_min_side=800,
image_max_side=1333,
transform_parameters=None,
compute_anchor_targets=None,
compute_shapes=None,
):
self.batch_size = int(batch_size)
self.shuffle_groups = shuffle_groups
self.image_min_side = image_min_side
self.image_max_side = image_max_side
self.compute_anchor_targets = compute_anchor_targets
self.compute_shapes = compute_shapes
self.index = 0
def __len__(self):
return self.size()
def size(self):
""" Size of the dataset.
"""
raise NotImplementedError('size method not implemented')
def num_classes(self):
""" Number of classes in the dataset.
"""
raise NotImplementedError('num_classes method not implemented')
def name_to_label(self, name):
""" Map name to label.
"""
raise NotImplementedError('name_to_label method not implemented')
def label_to_name(self, label):
""" Map label to name.
"""
raise NotImplementedError('label_to_name method not implemented')
def image_aspect_ratio(self, image_index):
""" Compute the aspect ratio for an image with image_index.
"""
raise NotImplementedError('image_aspect_ratio method not implemented')
def load_image(self, image_index):
""" Load an image at the image_index.
"""
raise NotImplementedError('load_image method not implemented')
def load_annotations(self, image_index):
""" Load annotations for an image_index.
"""
raise NotImplementedError('load_annotations method not implemented')
def filter_boxes(self, image_group, boxs_group ):
""" Filter boxes by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
boxs_group_filter = []
for index, (image, boxes) in enumerate(zip(image_group, boxs_group)):
assert(isinstance(boxes, torch.Tensor)), '\'load_annotations\' should return a list of numpy arrays, received: {}'.format(type(boxes))
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
invalid_indices = (
(boxes[:, 2] <= boxes[:, 0]) |
(boxes[:, 3] <= boxes[:, 1]) |
(boxes[:, 0] < 0) |
(boxes[:, 1] < 0) |
(boxes[:, 2] > image.shape[1]) |
(boxes[:, 3] > image.shape[0])
)
boxes = boxes[invalid_indices]
boxs_group_filter.append(boxes)
return boxs_group_filter
def compute_targets(self, image, annotations):
""" Compute target outputs for the network using images and their annotations.
"""
labels=[]
boxs=[]
for ann in annotations:
x1,y1,x2,y2,c = ann
boxs.append([float(x1), float(y1), float(x2), float(y2)])
labels.append(int(c))
return np.stack( boxs, 0 ), np.stack( labels, 0 )
| 31.650862 | 167 | 0.580281 |
4a23d722a94fdc5ff543da80fe3d399f8ce36080 | 96 | py | Python | venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_json.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_json.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_json.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/a9/78/b5/9c4abbac9a099ac7d70a15c83f46865e955a50f31806d48310ea258758 | 96 | 96 | 0.895833 |
4a23d7c89f4604668dfd771408b1d0e8f5fa7dbe | 8,473 | py | Python | packages/python/plotly/plotly/graph_objs/bar/marker/colorbar/_tickfont.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/graph_objs/bar/marker/colorbar/_tickfont.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/graph_objs/bar/marker/colorbar/_tickfont.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "bar.marker.colorbar"
_path_str = "bar.marker.colorbar.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.bar.marker.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.bar.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.marker.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 37.162281 | 82 | 0.567214 |
4a23d7fe42f43c8fde334d6f7ef7956c653adfb0 | 12,202 | py | Python | workers/mns_py/src/common/core_network_mock.py | ATownsend/mzbench | 96733b62ee466c25cfe6d308a63b851b4938b4f9 | [
"BSD-3-Clause"
] | 2 | 2020-01-28T20:55:13.000Z | 2020-10-28T15:24:25.000Z | workers/mns_py/src/common/core_network_mock.py | ATownsend/mzbench | 96733b62ee466c25cfe6d308a63b851b4938b4f9 | [
"BSD-3-Clause"
] | null | null | null | workers/mns_py/src/common/core_network_mock.py | ATownsend/mzbench | 96733b62ee466c25cfe6d308a63b851b4938b4f9 | [
"BSD-3-Clause"
] | 1 | 2020-06-04T13:49:14.000Z | 2020-06-04T13:49:14.000Z | ####################################
# Imports
####################################
import requests
import random
import uuid
import time
import sys
import json
import paho.mqtt.client as mqtt
import gevent
#from .MqttClient import MqttClient
from .MacAddress import MacAddress
class CoreNetworkSimple:
def __init__(
self,
mac=None,
gk_url=None,
mqtt_on_connect=lambda *args: None,
mqtt_on_disconnect=lambda *args: None
):
if gk_url is None:
raise Exception('full gk_url must be provided')
self.mqtt_on_connect = mqtt_on_connect
self.mqtt_on_disconnect = mqtt_on_disconnect
self.macAddress = MacAddress(mac=mac if mac is not None else random.randint(0, 10000000000))
self.location_id = "location-id-%s" % (uuid.uuid4())
self.network = self._core_create_dummy_network_model()
self.gk_url = gk_url
@staticmethod
def nothing():
return True
def populate_network(self, mqtt_status = True, mqtt_history = True, retry = 2):
runtime_start = time.time()
# Register
results = self._gatekeeper_register_network(retry = retry)
runtime_gatekeeper_registration = time.time()
self.network_id = self.guardian_mqtt['network_id']
self.guardian_type = self.guardian_mqtt["mqType"]
# Initialize history
if mqtt_status or mqtt_history:
self._mqtt_connect()
guardianReport = self._create_guardian_status_report()
motionReport = self._create_motionmatrix_report()
if mqtt_status:
self._mqtt_publish("guardian-status", guardianReport)
if mqtt_history:
self._mqtt_publish("motion-matrix", motionReport)
# Return stats
runtime_end_mqtt = time.time()
runtimes = {
"total": runtime_end_mqtt - runtime_start,
"gatekeeper": runtime_gatekeeper_registration - runtime_start,
"mqtt": runtime_end_mqtt - runtime_gatekeeper_registration,
}
return {'results': results, 'runtimes': runtimes}
def send_guardian_status_report(self, timestamp = time.time()):
guardianReport = self._create_guardian_status_report(timestamp)
self._mqtt_publish("guardian-status", guardianReport)
def send_heartbeat(self, timestamp = time.time()):
guardianReport = self._create_guardian_status_report(timestamp, heartbeat = True)
self._mqtt_publish("guardian-status", guardianReport)
def send_motion(self, timestamp = time.time()):
motionReport = self._create_motionmatrix_report(timestamp = timestamp)
self._mqtt_publish("motion-matrix", motionReport)
#--- Internal API past this point
def __del__(self):
if hasattr(self, "mqtt_connection"):
self._mqtt_disconnect()
@property
def guardian_mqtt(self):
return self.network['nodes'][0]['gk_reply']['local_config']['guardian_mqtt']
def _mqtt_connect(self, username = 'device'):
self.mqtt_connection = mqtt.Client(client_id=self.location_id)
self.mqtt_connection.username_pw_set(username=username, password=self.guardian_mqtt['mqToken'])
self.mqtt_connection.connect(self.guardian_mqtt['mqServer'], port=self.guardian_mqtt['mqPort'])
self.mqtt_connection.on_connect = self.mqtt_on_disconnect
self.mqtt_connection.on_disconnect = self.mqtt_on_disconnect
self.glet = gevent.spawn(self.mqtt_connection.loop_forever)
def _mqtt_publish(self, event, data):
# Blocking call to send a report to an MQTT client
topic = "iot-2/type/%s/id/%s/evt/%s/fmt/json" % (self.guardian_type, self.location_id, event)
check = 0
msg_info = self.mqtt_connection.publish(topic, json.dumps(data), qos=1)
if not msg_info.is_published():
while not msg_info.is_published():
gevent.sleep(0.1)
check += 1
if check > 300:
print("Failed to publish to MQTT")
return False
def _mqtt_disconnect(self):
print("MQTT disconnect in progress", self.location_id)
self.mqtt_connection.disconnect()
gevent.joinall([self.glet])
delattr(self, "mqtt_connection")
def _core_create_dummy_network_model(self):
mac_address = self.macAddress
# Define a 3-node mesh network, where one acts as the gateway.
network = {
# External IP assigned to the master wan0 ethernet
# interface.
"ip": "10.0.0.0",
# Gateway mac and IP address
"gateway": {"mac": "ff:00:00:00:00:00", "ip": "10.0.0.0"},
"nodes": [
{
# Master node
"role": "master",
"mesh_mac": mac_address.address(),
"eth_mac": mac_address.offset(1),
"wlan_2ghz_mac": mac_address.offset(2),
"wlan_5ghz_mac": mac_address.offset(3),
"peers": [1, 2, 3],
},
{
# Peer node 1
"role": "peer",
"mesh_mac": mac_address.offset("10"),
"eth_mac": mac_address.offset("11"),
"wlan_2ghz_mac": mac_address.offset("12"),
"wlan_5ghz_mac": mac_address.offset("13"),
"peers": [0, 2, 4],
},
{
# Peer node 2
"role": "peer",
"mesh_mac": mac_address.offset("20"),
"eth_mac": mac_address.offset("21"),
"wlan_2ghz_mac": mac_address.offset("22"),
"wlan_5ghz_mac": mac_address.offset("23"),
"peers": [0, 1, 5],
},
{
# Leaf node 1 (connected to Master)
"role": "leaf",
"mesh_mac": mac_address.offset("30"),
"peers": [0],
},
{
# Leaf node 2 (connected to Peer node 1)
"role": "leaf",
"mesh_mac": mac_address.offset("40"),
"peers": [1],
},
{
# Leaf node 3 (connected to Peer node 2)
"role": "leaf",
"mesh_mac": mac_address.offset("50"),
"peers": [2],
},
],
}
return network
def _gatekeeper_register_network(self, retry = 2):
# Register a new (or existing) network by publishing radar status
# reports to gatekeeper.
# print("Registering network with gatekeeper @ %s..." % gatekeeper_url)
results = []
for node in self.network["nodes"]:
if node["role"] not in ["master", "peer"]:
continue
status = self._create_radar_status_report(node)
payload = {
"radar_status": status,
"factory_reset": False,
"master_failed": False,
"location_id": self.location_id,
}
while True:
root = requests.post(self.gk_url, json=payload)
if retry == 0:
print(root.__dict__)
raise Exception("Failed to register %s with gatekeeper, after 3 tries." % node)
elif root.status_code == 200:
node['gk_reply'] = root.json()
results.append(root.status_code)
break
else:
rand_number = random.randint(10, 30)
print("Retrying in %i" % rand_number)
time.sleep(rand_number)
retry -= 1
return results
def _create_motionmatrix_report(self, timestamp = time.time(), interval = 500, count = 1, report_type="matrix"):
# Create a dummy motion matrix report
def mac_to_linkstr(mac):
return mac.replace(":", "")[-6:]
data_key = "data" if report_type == "matrix" else "motion"
report = {
"ts": timestamp,
"interval": interval,
"count": count,
data_key: {"mkai": [], "throughput": []},
"links": [],
}
# Generate link list combinations (using the mesh macs in the
# network).
for i in range(len(self.network["nodes"]) - 1):
for j in range(i + 1, len(self.network["nodes"])):
src_mac = self.network["nodes"][i]["mesh_mac"]
dest_mac = self.network["nodes"][j]["mesh_mac"]
report["links"].append( mac_to_linkstr(src_mac) + "-" + mac_to_linkstr(dest_mac) )
for l in range(len(report["links"])):
# Omit the outer arrays when count=1
if count == 1:
mkai = random.random()
throughput = 1.0
else:
mkai = [random.random() for x in range(report["count"])]
throughput = [1.0]*report["count"]
report[data_key]["mkai"].append(mkai)
report[data_key]["throughput"].append(throughput)
return report
def _create_guardian_status_report(self, timestamp = time.time(), heartbeat = False):
report = {
"ts": timestamp if timestamp is not None else time.time(),
"guardian_id": self.location_id,
"network_id": self.network_id,
"last_motion": time.time(),
"motion_enabled": 1,
"motion_tripped": 0
}
if heartbeat == False:
radar_reports = {}
for node in self.network["nodes"]:
if node["role"] not in ["master", "peer"]:
continue
radar_reports["test-" + node["eth_mac"].replace(":", "")] = self._create_radar_status_report(node)
report["radars"] = radar_reports
return report
def _create_radar_status_report(self, node, timestamp = time.time()):
# Create a dummy-status block for a given network node,
# such that we can get a valid response from gatekeeper
# with it.
# Master node must be first one
master_node = self.network["nodes"][0]
timestamp = time.time()
# Create empty status report
status = {
"location_id": self.location_id,
"deviceId": "test-" + node["eth_mac"].replace(":", ""),
"ts": timestamp,
"interfaces": [],
"links": [],
"ap_bssid_2ghz": node["wlan_2ghz_mac"],
"ap_bssid_5ghz": node["wlan_5ghz_mac"],
"mesh_bssid": node["mesh_mac"],
"gateway_bssid": master_node["mesh_mac"],
"root_mode": 1,
}
# Override gateway bssid for master node:
if node == master_node:
status["gateway_bssid"] = self.network["gateway"]["mac"]
status["root_mode"] = 2
# Add wan0 ethernet interface with default gateway,
# but only set its' type to ETHERNET if this is the master.
if node == master_node:
if_type = "ETHERNET"
else:
if_type = "BRIDGE"
interface = {
"name": "wan0",
"type": if_type,
"mac": node["eth_mac"],
"ip": "10.22.22.1",
"routes": [{"dst": "0.0.0.0"}],
}
status["interfaces"].append(interface)
# Populate link list for all local peers
# This is what is actually used to form the network.
for peer_id in node["peers"]:
peer_node = self.network["nodes"][peer_id]
link_entry = {"mac": peer_node["mesh_mac"], "peer_type": "7"}
if peer_node["role"] == "leaf":
link_entry["peer_type"] = "2"
status["links"].append(link_entry)
return status
| 37.088146 | 116 | 0.536469 |
4a23d96721de1171a6f90cd0e547a8add0ce2011 | 86,628 | py | Python | tensorflow/python/ops/variable_scope.py | arnaldog12/tensorflow | 5a4261d2015cf241ca0b6e3e5a023c4957def3bf | [
"Apache-2.0"
] | 3 | 2018-04-23T19:34:31.000Z | 2018-09-13T08:41:34.000Z | tensorflow/python/ops/variable_scope.py | arnaldog12/tensorflow | 5a4261d2015cf241ca0b6e3e5a023c4957def3bf | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/variable_scope.py | arnaldog12/tensorflow | 5a4261d2015cf241ca0b6e3e5a023c4957def3bf | [
"Apache-2.0"
] | 13 | 2018-02-22T21:04:13.000Z | 2020-11-17T11:38:36.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import traceback
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
__all__ = ["AUTO_REUSE", "VariableScope", "get_variable_scope",
"get_variable", "get_local_variable", "variable_scope",
"variable_op_scope", "no_regularizer"]
class _PartitionInfo(object):
"""Holds partition info used by initializer functions.
"""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape
of the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(
shape), self.full_shape, len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
AUTO_REUSE = _ReuseMode.AUTO_REUSE
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys
and the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self.variable_scopes_count = {} # Count re-used variable scopes.
self._store_eager_variables = False
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in self.variable_scopes_count:
if not scope_name or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable(self, name, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
partitioner=None, validate_shape=True, use_resource=None,
custom_getter=None, constraint=None):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True).
When eager execution is enabled this argument is always forced to be
true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError(
"Passed a custom_getter which is not callable: %s" % custom_getter)
if context.in_eager_mode():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
use_resource = True
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter(name, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
partitioner=None, validate_shape=True, use_resource=None,
constraint=None):
is_scalar = (shape is not None
and isinstance(shape, collections_lib.Sequence)
and not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError(
"Partitioner must be callable, but received: %s" % partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, reuse=reuse,
trainable=trainable, collections=collections,
caching_device=caching_device, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
}
# `fn_args` can handle functions, `functools.partial`, `lambda`.
if "constraint" in estimator_util.fn_args(custom_getter):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
reuse=reuse, trainable=trainable, collections=collections,
caching_device=caching_device, partitioner=partitioner,
validate_shape=validate_shape, use_resource=use_resource,
constraint=constraint)
def _get_partitioned_variable(
self, name, partitioner, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
validate_shape=True, use_resource=None, constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable
(defaults to `DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
if context.in_eager_mode():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
reuse_without_partition = reuse and not partitioner
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
if not reuse_without_partition:
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable (%s) must be "
"fully defined, but instead was %s." % (name, shape))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
partitions = partitioner(shape=shape, dtype=dtype)
if not isinstance(partitions, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s"
% partitions)
if len(partitions) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (partitions, shape))
if any([p < 1 for p in partitions]):
raise ValueError(
"Partitioner returned zero partitions for some axes: %s" %
partitions)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?"
% name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s."
% (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s."
% (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (not reuse_without_partition and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, slice_shape = _compute_slice_dim_and_shape(
shape.as_list(), partitions)
vs = []
num_slices = partitions[slice_dim]
num_slices_with_excess = shape[slice_dim].value % num_slices
slice_offset = [0] * shape.ndims
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not."
% (num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d."
% (num_slices, name, name, num_slices))
for i in xrange(num_slices):
var_shape = slice_shape[:]
var_offset = slice_offset[:]
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
if i < num_slices_with_excess:
var_shape[slice_dim] += 1
slice_offset[slice_dim] += var_shape[slice_dim]
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# pylint: disable=protected-access
var._set_save_slice_info(variables.Variable.SaveSliceInfo(
name, shape.as_list(), var_offset, var_shape))
vs.append(var)
# pylint: enable=protected-access
# pylint: disable=protected-access
partitioned_var = variables.PartitionedVariable(name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
# pylint: enable=protected-access
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
tb = self._vars[name].op.traceback[::-1]
# Throw away internal tf entries and only take a few lines.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:3]
raise ValueError("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope? "
"Originally defined at:\n\n%s" % (
name, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
if not shape.is_fully_defined() and not initializing_from_value:
raise ValueError("Shape of a new variable (%s) must be fully defined, "
"but instead was %s." % (name, shape))
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Clear control dependencies while creating the initializer.
with ops.control_dependencies(None):
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype, partition_info=partition_info)
variable_dtype = dtype.base_dtype
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = False
if use_resource:
v = resource_variable_ops.ResourceVariable(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint)
else:
v = variables.Variable(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint)
if context.in_graph_mode() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
loss = regularizer(v)
if loss is not None:
if context.in_graph_mode():
v_name = v.name
loss_name = loss.name
else:
v_name = "v_%s" % type(v)
loss_name = "loss_%s" % type(loss)
logging.vlog(1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v_name, loss_name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required"
% (name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults
to False (will later change to True). When eager execution is enabled
this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.in_eager_mode():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
if self._partitioner is not None:
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.in_eager_mode() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.in_eager_mode():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
if partitioner and context.in_eager_mode():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.in_graph_mode():
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
else:
reuse = False
use_resource = True
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter,
constraint=constraint)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets an existing variable with this name or create a new one."""
if context.in_eager_mode():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=self.reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPE_KEY = ("__varscope",)
def get_variable_scope():
"""Returns the current variable scope."""
scope = ops.get_collection(_VARSCOPE_KEY)
if scope: # This collection has at most 1 element, the default scope at [0].
return scope[0]
scope = VariableScope(False)
ops.add_to_collection(_VARSCOPE_KEY, scope)
return scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self):
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x._trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x._trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None):
return get_variable_scope().get_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter,
constraint=constraint)
get_variable_or_local_docstring = (
"""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
@{$variables$Variable Scope How To}
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
@{tf.GraphKeys.REGULARIZATION_LOSSES} and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.",
"",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
@functools.wraps(get_variable)
def get_local_variable(*args, **kwargs):
kwargs["trainable"] = False
if "collections" in kwargs:
kwargs["collections"] += [ops.GraphKeys.LOCAL_VARIABLES]
else:
kwargs["collections"] = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(*args, **kwargs)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n",
"",
"GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.AUTO_REUSE; if `None`, we inherit the parent
scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
get_variable_scope() # Ensure that a default exists, then get a pointer.
# Get the reference to the collection as we want to modify it in place.
self._default_varscope = ops.get_collection_ref(_VARSCOPE_KEY)
self._var_store = _get_default_variable_store()
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(
self._custom_getter, self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._default_varscope[0]
if isinstance(self._name_or_scope, VariableScope):
self._var_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(self._var_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" + self._name_or_scope if self._old.name
else self._name_or_scope)
self._reuse = (self._reuse
or self._old.reuse) # Re-using is inherited by sub-scopes.
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=self._old_name_scope or self._name_or_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_store.open_variable_scope(self._new_name)
self._default_varscope[0] = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_store.variable_scopes_count = self._old_subscopes
else:
self._var_store.close_variable_subscopes(self._new_name)
self._default_varscope[0] = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(
functools.partial(old_getter, getter),
*args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_store = _get_default_variable_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
class variable_scope(object): # pylint: disable=invalid-name
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `scope` is None, then
`default_name` is used. In that case, if the same name has been previously
used in the same scope, it will be made unique by appending `_N` to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the @{$variables$Variable Scope How To}, here we present only a few basic
examples.
Simple example of how to create a new variable:
```python
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.variable_scope("foo") as scope:
v = tf.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
v1 = tf.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.AUTO_REUSE; if `True`, we go into reuse mode
for this scope as well as all sub-scopes; if tf.AUTO_REUSE, we create
variables if they do not exist, and return them otherwise; if None, we
inherit the parent scope's reuse flag. When eager execution is enabled,
this argument is always forced to be tf.AUTO_REUSE.
dtype: type of variables created in this scope (defaults to the type
in the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.in_eager_mode()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous __enter__, so we avoid some overhead by
# re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope:
self._current_name_scope = ops.name_scope(name_scope)
current_name_scope_name = self._current_name_scope.__enter__()
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
self._cached_pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
return self._cached_pure_variable_scope.__enter__()
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
self._cached_pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
return self._cached_pure_variable_scope.__enter__()
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
self._current_name_scope = ops.name_scope(self._default_name)
current_name_scope_name = self._current_name_scope.__enter__()
unique_default_name = _get_unique_variable_scope(self._default_name)
self._cached_pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
return self._cached_pure_variable_scope.__enter__()
def __exit__(self, type_arg, value_arg, traceback_arg):
self._cached_pure_variable_scope.__exit__(
type_arg, value_arg, traceback_arg)
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg)
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _compute_slice_dim_and_shape(full_shape, slicing):
"""Computes which dimension is being sliced and the typical slice shape."""
slice_shape = [0] * len(full_shape)
slice_dim = None
for dim, num_slices in enumerate(slicing):
dim_size = full_shape[dim]
if num_slices <= 0 or dim_size < num_slices:
raise ValueError("Cannot create %d slices for size %d. shape: %s, "
"slicing: %s" %
(num_slices, full_shape[dim], full_shape, slicing))
if num_slices == 1:
# Not slicing in this dimension.
slice_shape[dim] = dim_size
elif slice_dim is not None:
# We only support slicing along one of the dimensions.
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, slicing: %s" % (full_shape, slicing))
else:
# Note: We will add any extras onto the last slice, later.
slice_dim = dim
slice_shape[dim] = dim_size // num_slices
# Degenerate case: If "slicing" was all ones, pretend we are slicing along
# the first dimension.
if slice_dim is None:
slice_dim = 0
return slice_dim, slice_shape
def variable(initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
use_resource=None):
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource or (use_resource is None and context.in_eager_mode()):
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype)
elif not use_resource and context.in_eager_mode():
raise RuntimeError(
"VariableScope should use resource variable when eager execution is"
" enabled, but use_resource is False."
)
else:
return variables.Variable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype)
| 43.034277 | 103 | 0.668664 |
4a23d9f4eeb714e7969cbfaa2510bcacb90cad6a | 342 | py | Python | appointment/admin.py | md-arru/Heart-Care-System | b7b8472f7b8ea289c8bea5fe00c2856af159f46b | [
"MIT"
] | 20 | 2019-11-26T12:33:25.000Z | 2021-09-02T16:03:59.000Z | appointment/admin.py | md-arru/Heart-Care-System | b7b8472f7b8ea289c8bea5fe00c2856af159f46b | [
"MIT"
] | 14 | 2019-08-23T15:12:34.000Z | 2022-03-11T23:46:34.000Z | appointment/admin.py | md-arru/Heart-Care-System | b7b8472f7b8ea289c8bea5fe00c2856af159f46b | [
"MIT"
] | 23 | 2019-08-19T01:12:25.000Z | 2022-03-08T12:53:44.000Z | from django.contrib import admin
from .models import Appointment
@admin.register(Appointment)
class AppoinmentAdmin(admin.ModelAdmin):
list_display = ['name', 'email', 'phone', 'doctor', 'date', 'time']
date_hierarchy = ('date')
list_filter = ['date', 'doctor', ]
list_per_page = 20
search_fields = ['doctor', 'name', ]
| 28.5 | 71 | 0.675439 |
4a23db137cd92761e02b70dbcc37ee509fb85218 | 7,695 | py | Python | src/mlp_morgan_out.py | yliuhz/PMAW | 23f4f3ec2ccb381be3d4b2edea0878e4015e1ae4 | [
"Apache-2.0"
] | 8 | 2021-12-02T02:25:55.000Z | 2022-03-18T23:41:42.000Z | src/mlp_morgan_out.py | yliuhz/PMAW | 23f4f3ec2ccb381be3d4b2edea0878e4015e1ae4 | [
"Apache-2.0"
] | null | null | null | src/mlp_morgan_out.py | yliuhz/PMAW | 23f4f3ec2ccb381be3d4b2edea0878e4015e1ae4 | [
"Apache-2.0"
] | null | null | null | # MLP
import csv
from itertools import islice
import random
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold, train_test_split
import pandas as pd
from sklearn.utils import shuffle
from time import sleep
from base import bit2attr
import tensorflow as tf
# def bit2attr(bitstr) -> list:
# attr_vec = list()
# for i in range(len(bitstr)):
# attr_vec.append(int(bitstr[i]))
# return attr_vec
def mean_relative_error(y_pred, y_test):
assert len(y_pred) == len(y_test)
mre = 0.0
for i in range(len(y_pred)):
mre = mre + abs((y_pred[i] - y_test[i]) / y_test[i])
mre = mre * 100/ len(y_pred)
return mre
Large_MRE_points = pd.DataFrame()
Large_MRE_X = []
Large_MRE_y_test = []
Large_MRE_y_pred = []
Large_MRE = []
'''
1) 数据预处理
'''
# filepath = 'data/fp/sjn/R+B+Cmorgan_fp1202.csv'
NUM_ATTR = 1024
def read_bit(filepath):
data = list()
# data_y = pd.DataFrame(columns=['y'])
with open(filepath, 'r', encoding='gbk') as f:
reader = csv.reader(f)
num_attr = int()
for row in islice(reader, 1, None): # 不跳过第一行 # for row in islice(reader, 1, None): # 跳过第一行
if len(row) == 0:
continue
num_attr = len(row[1])
assert num_attr == NUM_ATTR
num_attr = len(row[2])
assert num_attr == NUM_ATTR
# data_x.append(bit2attr(row[0]), ignore_index=True)
# data_y.append([int(row[1])], ignore_index=True)
temp = bit2attr(row[1])
temp = temp + bit2attr(row[2])
temp.append(float(row[0]))
data.append(temp)
# random.shuffle(data) # 不打乱数据
data = np.array(data)
# data_x_df = pd.DataFrame(data[:, 0:2*NUM_ATTR])
# data_y_df = pd.DataFrame(data[:, 2*NUM_ATTR])
# return [data_x_df, data_y_df]
data = pd.DataFrame(data)
return data
# filepath = 'data/fp/sjn/R+B+Cmorgan_fp1202.csv'
filepath = 'data/database/22-01-29-morgan-train.csv'
# data_x = pd.DataFrame(columns=[str(i) for i in range(NUM_ATTR)])
test_filepath = "data/database/22-01-29-morgan-test-level-1.csv"
# [data_x_df, data_y_df] = read_bit(filepath)
data = read_bit(filepath)
data = shuffle(data)
data_x_df = pd.DataFrame(data.iloc[:, :-1])
data_y_df = pd.DataFrame(data.iloc[:, -1])
# 归一化
min_max_scaler_X = MinMaxScaler()
min_max_scaler_X.fit(data_x_df)
x_trans1 = min_max_scaler_X.transform(data_x_df)
min_max_scaler_y = MinMaxScaler()
min_max_scaler_y.fit(data_y_df)
y_trans1 = min_max_scaler_y.transform(data_y_df)
# [test_data_x_df, test_data_y_df] = read_bit(test_filepath)
test_data = read_bit(test_filepath)
test_data_x_df = pd.DataFrame(test_data.iloc[:, :-1])
test_data_y_df = pd.DataFrame(test_data.iloc[:, -1])
x_trans1_test = min_max_scaler_X.transform(test_data_x_df)
y_trans1_test = min_max_scaler_y.transform(test_data_y_df)
print(data_x_df.shape, data_y_df.shape)
print(test_data_x_df.shape, test_data_y_df.shape)
sleep(5)
'''
3) 构建模型
'''
from keras.layers import MaxPooling1D, Conv1D, Dense, Flatten, Dropout
from keras import models
from keras.optimizers import Adam, RMSprop, SGD
def buildModel():
model = models.Sequential()
l5 = Dense(512, activation='relu')
l6 = Dropout(rate=0.2)
l7 = Dense(128, activation='relu')
l8 = Dense(30, activation='relu')
l9 = Dense(1)
layers = [l5, l6, l7, l8, l9]
for i in range(len(layers)):
model.add(layers[i])
adam = Adam(lr=1e-3)
model.compile(optimizer=adam, loss='logcosh', metrics=['mae'])
model_mlp = MLPRegressor(
hidden_layer_sizes=(512, 128, 32), activation='relu', solver='lbfgs', alpha=0.0001,
max_iter=5000,
random_state=1, tol=0.0001, verbose=False, warm_start=False)
return model
def scheduler(epoch, lr):
if epoch > 0 and epoch % 500 == 0:
return lr * 0.1
else:
return lr
'''
4) 训练模型
'''
from sklearn import metrics
# n_split = 10
mlp_scores = []
MAEs = []
out_MAEs = []
in_y_test = []
in_y_pred = []
out_y_test = []
out_y_pred = []
X_train = x_trans1
y_train = y_trans1
callback = tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=1)
model_mlp = buildModel()
model_mlp.fit(X_train, y_train, epochs=2000, verbose=1, callbacks=[callback])
# 外部验证
X_test = x_trans1_test
result = model_mlp.predict(x_trans1_test)
y_trans1_test = np.reshape(y_trans1_test, (-1, 1))
y_test = min_max_scaler_y.inverse_transform(y_trans1_test)
result = result.reshape(-1, 1)
result = min_max_scaler_y.inverse_transform(result)
mae = mean_relative_error(y_test, result)
out_MAEs.append(mae)
Large_MRE_X = [] ## Type of X_test??
Large_MRE_y_test = []
Large_MRE_y_pred = []
Large_MRE = []
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1]))
X_test = min_max_scaler_X.inverse_transform(X_test)
for idx in range(len(y_test)):
Large_MRE.append(mean_relative_error([result[idx]], [y_test[idx]])[0])
Large_MRE_y_test = list(np.reshape(y_test, (-1,)))
Large_MRE_y_pred = list(np.reshape(result, (-1,)))
temp = pd.DataFrame(X_test)
temp = pd.concat([temp, pd.DataFrame({'Real Value': Large_MRE_y_test}), pd.DataFrame({'Predicted Value': Large_MRE_y_pred}),
pd.DataFrame({'MRE': Large_MRE})], axis=1)
# temp = temp.sort_values(by='MRE', ascending=False)
temp.to_csv('Out/Large_MRE_out_points.csv', encoding='gb18030', index=False)
out_y_test.append(y_test)
out_y_pred.append(result)
## 白+绿纯色颜色映射
from pylab import *
from matplotlib.colors import ListedColormap,LinearSegmentedColormap
clist = ['white', 'purple', 'black']
newcmp = LinearSegmentedColormap.from_list('chaos',clist)
# 外部验证图像
## 白+绿纯色颜色映射
out_y_pred = np.reshape(out_y_pred, (-1,))
out_y_test = np.reshape(out_y_test, (-1,))
xmin = out_y_test.min()
# xmin = min(xmin, out_y_pred.min())
xmax = out_y_test.max()
# xmax = max(xmax, out_y_pred.max())
fig = plt.figure(figsize=(14, 10))
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
# plt.grid(linestyle="--")
plt.xlabel('Real values for lambda(mm)', fontsize=20)
plt.ylabel('Predicted values for lambda(mm)', fontsize=20)
plt.yticks(size=16)
plt.xticks(size=16)
plt.plot([xmin, xmax], [xmin, xmax], ':', linewidth=1.5, color='gray')
print('MRE', out_MAEs)
print('avg MRE', sum(out_MAEs) / len(out_MAEs))
print('max MRE', max(out_MAEs))
print('min MRE', min(out_MAEs))
errstr = 'MRE=%.2f%%' % (sum(out_MAEs) / len(out_MAEs))
plt.text(xmin + 50, xmax - 130, errstr, fontsize=20, weight='bold')
# for i in range(len(in_y_pred)):
# plt.scatter(in_y_test[i], in_y_pred[i], edgecolors='b')
hexf = plt.hexbin(out_y_test, out_y_pred, gridsize=20, extent=[xmin, xmax, xmin, xmax],
cmap=newcmp)
# xmin = np.array(in_y_test).min()
# xmax = np.array(in_y_test).max()
# ymin = np.array(in_y_pred).min()
# ymax = np.array(in_y_pred).max()
plt.axis([xmin, xmax, xmin, xmax])
ax = plt.gca()
ax.tick_params(top=True, right=True)
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=16)
plt.savefig('pics/morgan-fig-out-mlp.png')
plt.show()
# plt.figure(figsize=(10, 6))
# plt.xlabel('ground truth')
# plt.ylabel('predicted')
# plt.plot([400, 1100], [400, 1100], 'k--')
# print('MRE', out_MAEs)
# print('avg MRE', sum(out_MAEs) / len(out_MAEs))
# print('max MRE', max(out_MAEs))
# print('min MRE', min(out_MAEs))
# errstr = 'MRE = %.2f%%' % (sum(out_MAEs) / len(out_MAEs))
# plt.text(420, 750, errstr, fontsize=16)
# for i in range(len(out_y_pred)):
# plt.plot(out_y_test[i], out_y_pred[i], 'ro')
# print('mlp_score', mlp_scores)
# plt.savefig('pics/descriptor-fig-out.png')
# plt.show()
| 29.037736 | 124 | 0.683431 |
4a23db7bc1b9d39aa878375dec74d1f334c2e818 | 592 | py | Python | tests/test_tutorial/test_path_operation_advanced_configurations/test_tutorial003.py | Aryabhata-Rootspring/fastapi | f6237ad05a8468ac19c591181adad38d75372c46 | [
"MIT"
] | 53,007 | 2018-12-08T10:05:29.000Z | 2022-03-31T23:30:02.000Z | tests/test_tutorial/test_path_operation_advanced_configurations/test_tutorial003.py | Aryabhata-Rootspring/fastapi | f6237ad05a8468ac19c591181adad38d75372c46 | [
"MIT"
] | 4,155 | 2019-01-05T05:07:49.000Z | 2022-03-31T21:25:38.000Z | tests/test_tutorial/test_path_operation_advanced_configurations/test_tutorial003.py | Aryabhata-Rootspring/fastapi | f6237ad05a8468ac19c591181adad38d75372c46 | [
"MIT"
] | 4,092 | 2018-12-09T16:21:00.000Z | 2022-03-31T07:59:45.000Z | from fastapi.testclient import TestClient
from docs_src.path_operation_advanced_configuration.tutorial003 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_get():
response = client.get("/items/")
assert response.status_code == 200, response.text
assert response.json() == [{"item_id": "Foo"}]
| 24.666667 | 74 | 0.680743 |
4a23dbe58e2787eceab5be26f51d85a6d3bd77bb | 5,091 | py | Python | src/meshcat/animation.py | Arpafaucon/meshcat-python | c3a9ceaa2b82ba1146b174d901a63269a9b5432f | [
"MIT"
] | null | null | null | src/meshcat/animation.py | Arpafaucon/meshcat-python | c3a9ceaa2b82ba1146b174d901a63269a9b5432f | [
"MIT"
] | null | null | null | src/meshcat/animation.py | Arpafaucon/meshcat-python | c3a9ceaa2b82ba1146b174d901a63269a9b5432f | [
"MIT"
] | null | null | null | import tempfile
import tarfile
import os.path
import subprocess
import bisect
from . import transformations as tf
class AnimationTrack(object):
__slots__ = ["name", "jstype", "frames", "values"]
def __init__(self, name, jstype, frames=None, values=None):
self.name = name
self.jstype = jstype
if frames is None:
self.frames = []
else:
self.frames = frames
if values is None:
self.values = []
else:
self.values = values
def set_property(self, frame, value):
i = bisect.bisect(self.frames, frame)
self.frames.insert(i, frame)
self.values.insert(i, value)
def lower(self):
return {
u"name": str("." + self.name),
u"type": str(self.jstype),
u"keys": [{
u"time": self.frames[i],
u"value": self.values[i]
} for i in range(len(self.frames))]
}
class AnimationClip(object):
__slots__ = ["tracks", "fps", "name"]
def __init__(self, tracks=None, fps=30, name=u"default"):
if tracks is None:
self.tracks = {}
else:
self.tracks = tracks
self.fps = fps
self.name = name
def set_property(self, frame, property, jstype, value):
if property not in self.tracks:
self.tracks[property] = AnimationTrack(property, jstype)
track = self.tracks[property]
track.set_property(frame, value)
def lower(self):
return {
u"fps": self.fps,
u"name": str(self.name),
u"tracks": [t.lower() for t in self.tracks.values()]
}
class Animation(object):
__slots__ = ["clips", "default_framerate"]
def __init__(self, clips=None, default_framerate=30):
if clips is None:
self.clips = {}
else:
self.clips = clips
self.default_framerate = default_framerate
def lower(self):
return [{
u"path": path.lower(),
u"clip": clip.lower()
} for (path, clip) in self.clips.items()]
def at_frame(self, visualizer, frame):
return AnimationFrameVisualizer(self, visualizer.path, frame)
def js_position(matrix):
return list(matrix[:3, 3])
def js_quaternion(matrix):
quat = tf.quaternion_from_matrix(matrix)
return [quat[1], quat[2], quat[3], quat[0]]
class AnimationFrameVisualizer(object):
__slots__ = ["animation", "path", "current_frame"]
def __init__(self, animation, path, current_frame):
self.animation = animation
self.path = path
self.current_frame = current_frame
def get_clip(self):
if self.path not in self.animation.clips:
self.animation.clips[self.path] = AnimationClip(fps=self.animation.default_framerate)
return self.animation.clips[self.path]
def set_transform(self, matrix):
clip = self.get_clip()
clip.set_property(self.current_frame, u"position", u"vector3", js_position(matrix))
clip.set_property(self.current_frame, u"quaternion", u"quaternion", js_quaternion(matrix))
def set_property(self, prop, jstype, value):
clip = self.get_clip()
clip.set_property(self.current_frame, prop, jstype, value)
def __getitem__(self, path):
return AnimationFrameVisualizer(self.animation, self.path.append(path), self.current_frame)
def __enter__(self):
return self
def __exit__(self, *arg):
pass
def convert_frames_to_video(tar_file_path, output_path="output.mp4", framerate=60, overwrite=False):
"""
Try to convert a tar file containing a sequence of frames saved by the
meshcat viewer into a single video file.
This relies on having `ffmpeg` installed on your system.
"""
output_path = os.path.abspath(output_path)
if os.path.isfile(output_path) and not overwrite:
raise ValueError("The output path {:s} already exists. To overwrite that file, you can pass overwrite=True to this function.".format(output_path))
with tempfile.TemporaryDirectory() as tmp_dir:
with tarfile.open(tar_file_path) as tar:
tar.extractall(tmp_dir)
args = ["ffmpeg",
"-r", str(framerate),
"-i", r"%07d.png",
"-vcodec", "libx264",
"-preset", "slow",
"-pix_fmt", "yuv420p",
"-crf", "18"]
if overwrite:
args.append("-y")
args.append(output_path)
try:
subprocess.check_call(args, cwd=tmp_dir)
except subprocess.CalledProcessError as e:
print("""
Could not call `ffmpeg` to convert your frames into a video.
If you want to convert the frames manually, you can extract the
.tar archive into a directory, cd to that directory, and run:
ffmpeg -r 60 -i %07d.png -vcodec libx264 -preset slow -pix_fmt yuv420p -crf 18 output.mp4
""")
raise
print("Saved output as {:s}".format(output_path))
return output_path
| 30.854545 | 154 | 0.608132 |
4a23dc76db90b6df581fc0a59f9ffc2e68cc6a7d | 31,049 | py | Python | intersight/models/license_account_license_data.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
] | null | null | null | intersight/models/license_account_license_data.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
] | null | null | null | intersight/models/license_account_license_data.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class LicenseAccountLicenseData(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'ancestors': 'list[MoBaseMoRef]',
'create_time': 'datetime',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'parent': 'MoBaseMoRef',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'account': 'IamAccountRef',
'account_id': 'str',
'agent_data': 'str',
'auth_expire_time': 'str',
'auth_initial_time': 'str',
'auth_next_time': 'str',
'category': 'str',
'customer_op': 'LicenseCustomerOpRef',
'group': 'str',
'last_sync': 'datetime',
'last_updated_time': 'datetime',
'license_state': 'str',
'license_tech_support_info': 'str',
'licenseinfos': 'list[LicenseLicenseInfoRef]',
'register_expire_time': 'str',
'register_initial_time': 'str',
'register_next_time': 'str',
'registration_status': 'str',
'renew_failure_string': 'str',
'smart_account': 'str',
'smartlicense_token': 'LicenseSmartlicenseTokenRef',
'sync_status': 'str',
'virtual_account': 'str'
}
attribute_map = {
'account_moid': 'AccountMoid',
'ancestors': 'Ancestors',
'create_time': 'CreateTime',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'parent': 'Parent',
'tags': 'Tags',
'version_context': 'VersionContext',
'account': 'Account',
'account_id': 'AccountId',
'agent_data': 'AgentData',
'auth_expire_time': 'AuthExpireTime',
'auth_initial_time': 'AuthInitialTime',
'auth_next_time': 'AuthNextTime',
'category': 'Category',
'customer_op': 'CustomerOp',
'group': 'Group',
'last_sync': 'LastSync',
'last_updated_time': 'LastUpdatedTime',
'license_state': 'LicenseState',
'license_tech_support_info': 'LicenseTechSupportInfo',
'licenseinfos': 'Licenseinfos',
'register_expire_time': 'RegisterExpireTime',
'register_initial_time': 'RegisterInitialTime',
'register_next_time': 'RegisterNextTime',
'registration_status': 'RegistrationStatus',
'renew_failure_string': 'RenewFailureString',
'smart_account': 'SmartAccount',
'smartlicense_token': 'SmartlicenseToken',
'sync_status': 'SyncStatus',
'virtual_account': 'VirtualAccount'
}
def __init__(self, account_moid=None, ancestors=None, create_time=None, mod_time=None, moid=None, object_type=None, owners=None, parent=None, tags=None, version_context=None, account=None, account_id=None, agent_data=None, auth_expire_time=None, auth_initial_time=None, auth_next_time=None, category=None, customer_op=None, group=None, last_sync=None, last_updated_time=None, license_state=None, license_tech_support_info=None, licenseinfos=None, register_expire_time=None, register_initial_time=None, register_next_time=None, registration_status=None, renew_failure_string=None, smart_account=None, smartlicense_token=None, sync_status=None, virtual_account=None):
"""
LicenseAccountLicenseData - a model defined in Swagger
"""
self._account_moid = None
self._ancestors = None
self._create_time = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._parent = None
self._tags = None
self._version_context = None
self._account = None
self._account_id = None
self._agent_data = None
self._auth_expire_time = None
self._auth_initial_time = None
self._auth_next_time = None
self._category = None
self._customer_op = None
self._group = None
self._last_sync = None
self._last_updated_time = None
self._license_state = None
self._license_tech_support_info = None
self._licenseinfos = None
self._register_expire_time = None
self._register_initial_time = None
self._register_next_time = None
self._registration_status = None
self._renew_failure_string = None
self._smart_account = None
self._smartlicense_token = None
self._sync_status = None
self._virtual_account = None
if account_moid is not None:
self.account_moid = account_moid
if ancestors is not None:
self.ancestors = ancestors
if create_time is not None:
self.create_time = create_time
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if parent is not None:
self.parent = parent
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if account is not None:
self.account = account
if account_id is not None:
self.account_id = account_id
if agent_data is not None:
self.agent_data = agent_data
if auth_expire_time is not None:
self.auth_expire_time = auth_expire_time
if auth_initial_time is not None:
self.auth_initial_time = auth_initial_time
if auth_next_time is not None:
self.auth_next_time = auth_next_time
if category is not None:
self.category = category
if customer_op is not None:
self.customer_op = customer_op
if group is not None:
self.group = group
if last_sync is not None:
self.last_sync = last_sync
if last_updated_time is not None:
self.last_updated_time = last_updated_time
if license_state is not None:
self.license_state = license_state
if license_tech_support_info is not None:
self.license_tech_support_info = license_tech_support_info
if licenseinfos is not None:
self.licenseinfos = licenseinfos
if register_expire_time is not None:
self.register_expire_time = register_expire_time
if register_initial_time is not None:
self.register_initial_time = register_initial_time
if register_next_time is not None:
self.register_next_time = register_next_time
if registration_status is not None:
self.registration_status = registration_status
if renew_failure_string is not None:
self.renew_failure_string = renew_failure_string
if smart_account is not None:
self.smart_account = smart_account
if smartlicense_token is not None:
self.smartlicense_token = smartlicense_token
if sync_status is not None:
self.sync_status = sync_status
if virtual_account is not None:
self.virtual_account = virtual_account
@property
def account_moid(self):
"""
Gets the account_moid of this LicenseAccountLicenseData.
The Account ID for this managed object.
:return: The account_moid of this LicenseAccountLicenseData.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this LicenseAccountLicenseData.
The Account ID for this managed object.
:param account_moid: The account_moid of this LicenseAccountLicenseData.
:type: str
"""
self._account_moid = account_moid
@property
def ancestors(self):
"""
Gets the ancestors of this LicenseAccountLicenseData.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this LicenseAccountLicenseData.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this LicenseAccountLicenseData.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this LicenseAccountLicenseData.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def create_time(self):
"""
Gets the create_time of this LicenseAccountLicenseData.
The time when this managed object was created.
:return: The create_time of this LicenseAccountLicenseData.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this LicenseAccountLicenseData.
The time when this managed object was created.
:param create_time: The create_time of this LicenseAccountLicenseData.
:type: datetime
"""
self._create_time = create_time
@property
def mod_time(self):
"""
Gets the mod_time of this LicenseAccountLicenseData.
The time when this managed object was last modified.
:return: The mod_time of this LicenseAccountLicenseData.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this LicenseAccountLicenseData.
The time when this managed object was last modified.
:param mod_time: The mod_time of this LicenseAccountLicenseData.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this LicenseAccountLicenseData.
A unique identifier of this Managed Object instance.
:return: The moid of this LicenseAccountLicenseData.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this LicenseAccountLicenseData.
A unique identifier of this Managed Object instance.
:param moid: The moid of this LicenseAccountLicenseData.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this LicenseAccountLicenseData.
The fully-qualified type of this managed object, e.g. the class name.
:return: The object_type of this LicenseAccountLicenseData.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this LicenseAccountLicenseData.
The fully-qualified type of this managed object, e.g. the class name.
:param object_type: The object_type of this LicenseAccountLicenseData.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this LicenseAccountLicenseData.
An array of owners which represent effective ownership of this object.
:return: The owners of this LicenseAccountLicenseData.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this LicenseAccountLicenseData.
An array of owners which represent effective ownership of this object.
:param owners: The owners of this LicenseAccountLicenseData.
:type: list[str]
"""
self._owners = owners
@property
def parent(self):
"""
Gets the parent of this LicenseAccountLicenseData.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this LicenseAccountLicenseData.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this LicenseAccountLicenseData.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this LicenseAccountLicenseData.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def tags(self):
"""
Gets the tags of this LicenseAccountLicenseData.
An array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this LicenseAccountLicenseData.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this LicenseAccountLicenseData.
An array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this LicenseAccountLicenseData.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this LicenseAccountLicenseData.
The versioning info for this managed object
:return: The version_context of this LicenseAccountLicenseData.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this LicenseAccountLicenseData.
The versioning info for this managed object
:param version_context: The version_context of this LicenseAccountLicenseData.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def account(self):
"""
Gets the account of this LicenseAccountLicenseData.
:return: The account of this LicenseAccountLicenseData.
:rtype: IamAccountRef
"""
return self._account
@account.setter
def account(self, account):
"""
Sets the account of this LicenseAccountLicenseData.
:param account: The account of this LicenseAccountLicenseData.
:type: IamAccountRef
"""
self._account = account
@property
def account_id(self):
"""
Gets the account_id of this LicenseAccountLicenseData.
Root user's ID of the account
:return: The account_id of this LicenseAccountLicenseData.
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""
Sets the account_id of this LicenseAccountLicenseData.
Root user's ID of the account
:param account_id: The account_id of this LicenseAccountLicenseData.
:type: str
"""
self._account_id = account_id
@property
def agent_data(self):
"""
Gets the agent_data of this LicenseAccountLicenseData.
Agent trusted store data
:return: The agent_data of this LicenseAccountLicenseData.
:rtype: str
"""
return self._agent_data
@agent_data.setter
def agent_data(self, agent_data):
"""
Sets the agent_data of this LicenseAccountLicenseData.
Agent trusted store data
:param agent_data: The agent_data of this LicenseAccountLicenseData.
:type: str
"""
self._agent_data = agent_data
@property
def auth_expire_time(self):
"""
Gets the auth_expire_time of this LicenseAccountLicenseData.
Authorization expiration time
:return: The auth_expire_time of this LicenseAccountLicenseData.
:rtype: str
"""
return self._auth_expire_time
@auth_expire_time.setter
def auth_expire_time(self, auth_expire_time):
"""
Sets the auth_expire_time of this LicenseAccountLicenseData.
Authorization expiration time
:param auth_expire_time: The auth_expire_time of this LicenseAccountLicenseData.
:type: str
"""
self._auth_expire_time = auth_expire_time
@property
def auth_initial_time(self):
"""
Gets the auth_initial_time of this LicenseAccountLicenseData.
Intial Authorization time
:return: The auth_initial_time of this LicenseAccountLicenseData.
:rtype: str
"""
return self._auth_initial_time
@auth_initial_time.setter
def auth_initial_time(self, auth_initial_time):
"""
Sets the auth_initial_time of this LicenseAccountLicenseData.
Intial Authorization time
:param auth_initial_time: The auth_initial_time of this LicenseAccountLicenseData.
:type: str
"""
self._auth_initial_time = auth_initial_time
@property
def auth_next_time(self):
"""
Gets the auth_next_time of this LicenseAccountLicenseData.
Next time for Authorization
:return: The auth_next_time of this LicenseAccountLicenseData.
:rtype: str
"""
return self._auth_next_time
@auth_next_time.setter
def auth_next_time(self, auth_next_time):
"""
Sets the auth_next_time of this LicenseAccountLicenseData.
Next time for Authorization
:param auth_next_time: The auth_next_time of this LicenseAccountLicenseData.
:type: str
"""
self._auth_next_time = auth_next_time
@property
def category(self):
"""
Gets the category of this LicenseAccountLicenseData.
License category
:return: The category of this LicenseAccountLicenseData.
:rtype: str
"""
return self._category
@category.setter
def category(self, category):
"""
Sets the category of this LicenseAccountLicenseData.
License category
:param category: The category of this LicenseAccountLicenseData.
:type: str
"""
self._category = category
@property
def customer_op(self):
"""
Gets the customer_op of this LicenseAccountLicenseData.
:return: The customer_op of this LicenseAccountLicenseData.
:rtype: LicenseCustomerOpRef
"""
return self._customer_op
@customer_op.setter
def customer_op(self, customer_op):
"""
Sets the customer_op of this LicenseAccountLicenseData.
:param customer_op: The customer_op of this LicenseAccountLicenseData.
:type: LicenseCustomerOpRef
"""
self._customer_op = customer_op
@property
def group(self):
"""
Gets the group of this LicenseAccountLicenseData.
Group
:return: The group of this LicenseAccountLicenseData.
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""
Sets the group of this LicenseAccountLicenseData.
Group
:param group: The group of this LicenseAccountLicenseData.
:type: str
"""
self._group = group
@property
def last_sync(self):
"""
Gets the last_sync of this LicenseAccountLicenseData.
Specifies last sync time with SA
:return: The last_sync of this LicenseAccountLicenseData.
:rtype: datetime
"""
return self._last_sync
@last_sync.setter
def last_sync(self, last_sync):
"""
Sets the last_sync of this LicenseAccountLicenseData.
Specifies last sync time with SA
:param last_sync: The last_sync of this LicenseAccountLicenseData.
:type: datetime
"""
self._last_sync = last_sync
@property
def last_updated_time(self):
"""
Gets the last_updated_time of this LicenseAccountLicenseData.
Last updated
:return: The last_updated_time of this LicenseAccountLicenseData.
:rtype: datetime
"""
return self._last_updated_time
@last_updated_time.setter
def last_updated_time(self, last_updated_time):
"""
Sets the last_updated_time of this LicenseAccountLicenseData.
Last updated
:param last_updated_time: The last_updated_time of this LicenseAccountLicenseData.
:type: datetime
"""
self._last_updated_time = last_updated_time
@property
def license_state(self):
"""
Gets the license_state of this LicenseAccountLicenseData.
Aggregrated mode for the agent
:return: The license_state of this LicenseAccountLicenseData.
:rtype: str
"""
return self._license_state
@license_state.setter
def license_state(self, license_state):
"""
Sets the license_state of this LicenseAccountLicenseData.
Aggregrated mode for the agent
:param license_state: The license_state of this LicenseAccountLicenseData.
:type: str
"""
self._license_state = license_state
@property
def license_tech_support_info(self):
"""
Gets the license_tech_support_info of this LicenseAccountLicenseData.
Tech-support info for a smart-agent
:return: The license_tech_support_info of this LicenseAccountLicenseData.
:rtype: str
"""
return self._license_tech_support_info
@license_tech_support_info.setter
def license_tech_support_info(self, license_tech_support_info):
"""
Sets the license_tech_support_info of this LicenseAccountLicenseData.
Tech-support info for a smart-agent
:param license_tech_support_info: The license_tech_support_info of this LicenseAccountLicenseData.
:type: str
"""
self._license_tech_support_info = license_tech_support_info
@property
def licenseinfos(self):
"""
Gets the licenseinfos of this LicenseAccountLicenseData.
:return: The licenseinfos of this LicenseAccountLicenseData.
:rtype: list[LicenseLicenseInfoRef]
"""
return self._licenseinfos
@licenseinfos.setter
def licenseinfos(self, licenseinfos):
"""
Sets the licenseinfos of this LicenseAccountLicenseData.
:param licenseinfos: The licenseinfos of this LicenseAccountLicenseData.
:type: list[LicenseLicenseInfoRef]
"""
self._licenseinfos = licenseinfos
@property
def register_expire_time(self):
"""
Gets the register_expire_time of this LicenseAccountLicenseData.
Registration exipiration time
:return: The register_expire_time of this LicenseAccountLicenseData.
:rtype: str
"""
return self._register_expire_time
@register_expire_time.setter
def register_expire_time(self, register_expire_time):
"""
Sets the register_expire_time of this LicenseAccountLicenseData.
Registration exipiration time
:param register_expire_time: The register_expire_time of this LicenseAccountLicenseData.
:type: str
"""
self._register_expire_time = register_expire_time
@property
def register_initial_time(self):
"""
Gets the register_initial_time of this LicenseAccountLicenseData.
Initial time of registration
:return: The register_initial_time of this LicenseAccountLicenseData.
:rtype: str
"""
return self._register_initial_time
@register_initial_time.setter
def register_initial_time(self, register_initial_time):
"""
Sets the register_initial_time of this LicenseAccountLicenseData.
Initial time of registration
:param register_initial_time: The register_initial_time of this LicenseAccountLicenseData.
:type: str
"""
self._register_initial_time = register_initial_time
@property
def register_next_time(self):
"""
Gets the register_next_time of this LicenseAccountLicenseData.
Next time for registration
:return: The register_next_time of this LicenseAccountLicenseData.
:rtype: str
"""
return self._register_next_time
@register_next_time.setter
def register_next_time(self, register_next_time):
"""
Sets the register_next_time of this LicenseAccountLicenseData.
Next time for registration
:param register_next_time: The register_next_time of this LicenseAccountLicenseData.
:type: str
"""
self._register_next_time = register_next_time
@property
def registration_status(self):
"""
Gets the registration_status of this LicenseAccountLicenseData.
Registration status
:return: The registration_status of this LicenseAccountLicenseData.
:rtype: str
"""
return self._registration_status
@registration_status.setter
def registration_status(self, registration_status):
"""
Sets the registration_status of this LicenseAccountLicenseData.
Registration status
:param registration_status: The registration_status of this LicenseAccountLicenseData.
:type: str
"""
self._registration_status = registration_status
@property
def renew_failure_string(self):
"""
Gets the renew_failure_string of this LicenseAccountLicenseData.
Renew failure message
:return: The renew_failure_string of this LicenseAccountLicenseData.
:rtype: str
"""
return self._renew_failure_string
@renew_failure_string.setter
def renew_failure_string(self, renew_failure_string):
"""
Sets the renew_failure_string of this LicenseAccountLicenseData.
Renew failure message
:param renew_failure_string: The renew_failure_string of this LicenseAccountLicenseData.
:type: str
"""
self._renew_failure_string = renew_failure_string
@property
def smart_account(self):
"""
Gets the smart_account of this LicenseAccountLicenseData.
Name of smart account
:return: The smart_account of this LicenseAccountLicenseData.
:rtype: str
"""
return self._smart_account
@smart_account.setter
def smart_account(self, smart_account):
"""
Sets the smart_account of this LicenseAccountLicenseData.
Name of smart account
:param smart_account: The smart_account of this LicenseAccountLicenseData.
:type: str
"""
self._smart_account = smart_account
@property
def smartlicense_token(self):
"""
Gets the smartlicense_token of this LicenseAccountLicenseData.
:return: The smartlicense_token of this LicenseAccountLicenseData.
:rtype: LicenseSmartlicenseTokenRef
"""
return self._smartlicense_token
@smartlicense_token.setter
def smartlicense_token(self, smartlicense_token):
"""
Sets the smartlicense_token of this LicenseAccountLicenseData.
:param smartlicense_token: The smartlicense_token of this LicenseAccountLicenseData.
:type: LicenseSmartlicenseTokenRef
"""
self._smartlicense_token = smartlicense_token
@property
def sync_status(self):
"""
Gets the sync_status of this LicenseAccountLicenseData.
Current sync status for the account
:return: The sync_status of this LicenseAccountLicenseData.
:rtype: str
"""
return self._sync_status
@sync_status.setter
def sync_status(self, sync_status):
"""
Sets the sync_status of this LicenseAccountLicenseData.
Current sync status for the account
:param sync_status: The sync_status of this LicenseAccountLicenseData.
:type: str
"""
self._sync_status = sync_status
@property
def virtual_account(self):
"""
Gets the virtual_account of this LicenseAccountLicenseData.
Name of virtual account
:return: The virtual_account of this LicenseAccountLicenseData.
:rtype: str
"""
return self._virtual_account
@virtual_account.setter
def virtual_account(self, virtual_account):
"""
Sets the virtual_account of this LicenseAccountLicenseData.
Name of virtual account
:param virtual_account: The virtual_account of this LicenseAccountLicenseData.
:type: str
"""
self._virtual_account = virtual_account
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, LicenseAccountLicenseData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 30.620316 | 669 | 0.639248 |
4a23dd3a66065878fc620f59cc5ee4097b78fec8 | 807 | py | Python | setup.py | george-omosun-e/arweave-python-client | 256ecd192ead7e67705fa9be89fd0f94d1523158 | [
"MIT"
] | null | null | null | setup.py | george-omosun-e/arweave-python-client | 256ecd192ead7e67705fa9be89fd0f94d1523158 | [
"MIT"
] | null | null | null | setup.py | george-omosun-e/arweave-python-client | 256ecd192ead7e67705fa9be89fd0f94d1523158 | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name="arweave-client-python",
packages = ['arweave'], # this must be the same as the name above
version="1.0.15.dev0",
description="Client interface for sending transactions on the Arweave permaweb",
author="George Omosun E. Jr.",
author_email="george.omosun.e@@gmail.com",
url="https://github.com/george-omosun-e/arweave-python-client",
download_url="https://github.com/george-omosun-e/arweave-python-client",
keywords=['arweave', 'crypto', 'python'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'arrow',
'python-jose',
'pynacl',
'pycryptodome',
'cryptography',
'requests',
'psutil'
],
)
| 28.821429 | 82 | 0.670384 |
4a23dd52ddcdd6cf99d3fd99a67c31ffdae879b3 | 6,451 | py | Python | opsdroid/connector/rocketchat/__init__.py | shendrekbharath/opsdroid | c4bd284a27c9cd98036705199d896ea20430e467 | [
"Apache-2.0"
] | null | null | null | opsdroid/connector/rocketchat/__init__.py | shendrekbharath/opsdroid | c4bd284a27c9cd98036705199d896ea20430e467 | [
"Apache-2.0"
] | null | null | null | opsdroid/connector/rocketchat/__init__.py | shendrekbharath/opsdroid | c4bd284a27c9cd98036705199d896ea20430e467 | [
"Apache-2.0"
] | null | null | null | """A connector for Rocket.Chat."""
import asyncio
import logging
import datetime
import aiohttp
from opsdroid.connector import Connector
from opsdroid.events import Message
_LOGGER = logging.getLogger(__name__)
API_PATH = '/api/v1/'
class RocketChat(Connector):
"""A connector for the chat service Rocket.Chat."""
def __init__(self, config, opsdroid=None):
"""Create the connector.
Sets up logic for the Connector class, gets data from
the config.yaml or adds default values.
Args:
opsdroid (OpsDroid): An instance of opsdroid.core.
config (dict): configuration settings from the
file config.yaml.
"""
super().__init__(config, opsdroid=opsdroid)
self.name = "rocket.chat"
self.config = config
self.default_room = config.get("default-room", "general")
self.group = config.get("group", None)
self.url = config.get("channel-url", "https://open.rocket.chat")
self.update_interval = config.get("update-interval", 1)
self.bot_name = config.get("bot-name", "opsdroid")
self.listening = True
self.latest_update = datetime.datetime.utcnow().isoformat()
try:
self.user_id = config['user-id']
self.token = config['token']
self.headers = {
'X-User-Id': self.user_id,
"X-Auth-Token": self.token,
}
except (KeyError, AttributeError):
_LOGGER.error("Unable to login: Access token is missing. "
"Rocket.Chat connector will not be available.")
def build_url(self, method):
"""Build the url to connect with api.
Helper function to build the url to interact with the
Rocket.Chat REST API. Uses the global variable API_PATH
that points to current api version. (example: /api/v1/)
Args:
method (string): Api call endpoint.
Return:
String that represents full API url.
"""
return "{}{}{}".format(self.url, API_PATH, method)
async def connect(self):
"""Connect to the chat service.
This method is used to text if the connection to the chat
service is successful. If the connection is successful
the response is a JSON format containing information
about the user. Other than the user username, the
information is not used.
"""
_LOGGER.info("Connecting to Rocket.Chat")
async with aiohttp.ClientSession() as session:
resp = await session.get(self.build_url('me'),
headers=self.headers)
if resp.status != 200:
_LOGGER.error("Unable to connect.")
_LOGGER.error("Rocket.Chat error %s, %s",
resp.status, resp.text)
else:
json = await resp.json()
_LOGGER.debug("Connected to Rocket.Chat as %s",
json["username"])
async def _parse_message(self, response):
"""Parse the message received.
Args:
response (dict): Response returned by aiohttp.Client.
"""
if response['messages']:
message = Message(
response['messages'][0]['u']['username'],
response['messages'][0]['rid'],
self,
response['messages'][0]['msg'])
_LOGGER.debug("Received message from Rocket.Chat %s",
response['messages'][0]['msg'])
await self.opsdroid.parse(message)
self.latest_update = response['messages'][0]['ts']
async def _get_message(self):
"""Connect to the API and get messages.
This method will only listen to either a channel or a
private room called groups by Rocket.Chat. If a group
is specified in the config then it takes priority
over a channel.
"""
if self.group:
url = self.build_url('groups.history?roomName={}'.format(
self.group))
self.default_room = self.group
else:
url = self.build_url('channels.history?roomName={}'.format(
self.default_room))
if self.latest_update:
url += '&oldest={}'.format(self.latest_update)
async with aiohttp.ClientSession() as session:
resp = await session.get(url,
headers=self.headers)
if resp.status != 200:
_LOGGER.error("Rocket.Chat error %s, %s",
resp.status, resp.text)
self.listening = False
else:
json = await resp.json()
await self._parse_message(json)
async def listen(self):
"""Listen for and parse new messages.
The method will sleep asynchronously at the end of
every loop. The time can either be specified in the
config.yaml with the param update-interval - this
defaults to 1 second.
If the channel didn't get any new messages opsdroid
will still call the REST API, but won't do anything.
"""
while self.listening:
await self._get_message()
await asyncio.sleep(self.update_interval)
async def respond(self, message, room=None):
"""Respond with a message.
The message argument carries both the text to reply with but
also which room to reply with depending of the roomId(rid) got
from the _parse_message method.
Args:
message (object): An instance of Message
room (string, optional): Name of the room to respond to.
"""
_LOGGER.debug("Responding with: %s", message.text)
async with aiohttp.ClientSession() as session:
data = {}
data['channel'] = message.room
data['alias'] = self.bot_name
data['text'] = message.text
data['avatar'] = ''
resp = await session.post(
self.build_url('chat.postMessage'),
headers=self.headers,
data=data)
if resp.status == 200:
_LOGGER.debug('Successfully responded')
else:
_LOGGER.debug("Error - %s: Unable to respond", resp.status)
| 34.682796 | 75 | 0.567044 |
4a23de0e76bc87440ee4c7a78ab85a9f00c22b05 | 1,961 | py | Python | orchestrator/core/orc_server/device/admin.py | oasis-open/openc2-oif | 9227d38cb53204b45641ac55aefd6a13d2aad563 | [
"Apache-2.0"
] | 2 | 2019-07-02T14:06:24.000Z | 2021-07-07T09:45:54.000Z | orchestrator/core/orc_server/device/admin.py | oasis-open/openc2-oif-orchestrator | 9227d38cb53204b45641ac55aefd6a13d2aad563 | [
"Apache-2.0"
] | 22 | 2020-03-24T16:58:17.000Z | 2022-02-27T15:36:57.000Z | orchestrator/core/orc_server/device/admin.py | oasis-open/openc2-oif | 9227d38cb53204b45641ac55aefd6a13d2aad563 | [
"Apache-2.0"
] | 10 | 2019-04-26T12:22:22.000Z | 2021-08-05T09:16:05.000Z | from django.contrib import admin
from polymorphic.admin import PolymorphicParentModelAdmin, PolymorphicChildModelAdmin, PolymorphicChildModelFilter
from .models import Device, DeviceGroup
from .models.transports import (
# Base
Transport,
TransportAuth,
# Transport Specific
TransportHTTP,
TransportHTTPS,
TransportMQTT
)
# Polymorphic Transport Models
class TransportSharedOptions:
# Standard Options
list_display = ('transport_id', 'host', 'port', 'protocol', )
filter_horizontal = ('serialization', )
class TransportChildAdmin(TransportSharedOptions, PolymorphicChildModelAdmin):
""" Base admin class for all child models """
base_model = Transport # Optional, explicitly set here.
@admin.register(TransportAuth)
class TransportAuthAdmin(TransportChildAdmin):
base_model = TransportAuth
@admin.register(TransportHTTP)
class TransportHTTPAdmin(TransportChildAdmin):
base_model = TransportHTTP
@admin.register(TransportHTTPS)
class TransportHTTPSAdmin(TransportChildAdmin):
base_model = TransportHTTPS
@admin.register(TransportMQTT)
class TransportMQTTAdmin(TransportChildAdmin):
base_model = TransportMQTT
@admin.register(Transport)
class TransportParentAdmin(TransportSharedOptions, PolymorphicParentModelAdmin):
"""
Transport model admin
"""
# Polymorphic Options
base_model = Transport # Optional, explicitly set here.
child_models = (Transport, TransportAuth, TransportHTTP, TransportHTTPS, TransportMQTT)
list_filter = (PolymorphicChildModelFilter, )
# Device Models
@admin.register(Device)
class DeviceAdmin(admin.ModelAdmin):
"""
Device model admin
"""
list_display = ('device_id', 'name', )
filter_horizontal = ('transport', )
@admin.register(DeviceGroup)
class DeviceGroupAdmin(admin.ModelAdmin):
"""
Device Group model admin
"""
list_display = ('name', )
filter_horizontal = ('users', 'devices')
| 25.802632 | 114 | 0.750637 |
4a23de41b2a5e766632335272a33d6547ee0213b | 811 | py | Python | migrations/versions/20e031b23d18_.py | paked/WPC-fix | bca46731bce6a76f6b7eb79123b79b9c6377bc52 | [
"MIT"
] | 200 | 2015-01-27T18:26:09.000Z | 2021-12-19T14:38:53.000Z | migrations/versions/20e031b23d18_.py | paked/WPC-fix | bca46731bce6a76f6b7eb79123b79b9c6377bc52 | [
"MIT"
] | 12 | 2015-02-09T10:18:38.000Z | 2021-12-13T19:43:56.000Z | migrations/versions/20e031b23d18_.py | paked/WPC-fix | bca46731bce6a76f6b7eb79123b79b9c6377bc52 | [
"MIT"
] | 23 | 2015-02-09T04:42:48.000Z | 2015-02-20T18:58:56.000Z | """empty message
Revision ID: 20e031b23d18
Revises: 28af272381c0
Create Date: 2015-02-04 16:19:45.930039
"""
# revision identifiers, used by Alembic.
revision = '20e031b23d18'
down_revision = '28af272381c0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('subscription',
sa.Column('stream_id', sa.Integer(), nullable=True),
sa.Column('subscriber_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['stream_id'], ['stream.id'], ),
sa.ForeignKeyConstraint(['subscriber_id'], ['subscriber.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('subscription')
### end Alembic commands ###
| 25.34375 | 67 | 0.686806 |
4a23dfad7a1b4f723fdb6848bf914c670e35b79a | 345 | py | Python | nipy/fixes/scipy/setup.py | yarikoptic/NiPy-OLD | 8759b598ac72d3b9df7414642c7a662ad9c55ece | [
"BSD-3-Clause"
] | 1 | 2015-08-22T16:14:45.000Z | 2015-08-22T16:14:45.000Z | nipy/fixes/scipy/setup.py | yarikoptic/NiPy-OLD | 8759b598ac72d3b9df7414642c7a662ad9c55ece | [
"BSD-3-Clause"
] | null | null | null | nipy/fixes/scipy/setup.py | yarikoptic/NiPy-OLD | 8759b598ac72d3b9df7414642c7a662ad9c55ece | [
"BSD-3-Clause"
] | null | null | null | def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('scipy', parent_package, top_path)
config.add_subpackage('stats')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 28.75 | 61 | 0.736232 |
4a23dfb36f38c385a08e2bc4144d2dbc0dd4abe7 | 2,249 | py | Python | parser/config.py | zhancongc/movie_parse | 69d9b31b6b4ff021db9ff1f0c89488f5b6adaf8e | [
"MIT"
] | null | null | null | parser/config.py | zhancongc/movie_parse | 69d9b31b6b4ff021db9ff1f0c89488f5b6adaf8e | [
"MIT"
] | null | null | null | parser/config.py | zhancongc/movie_parse | 69d9b31b6b4ff021db9ff1f0c89488f5b6adaf8e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# alternative user agent
USER_AGENT = [
'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',
'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',
'Opera/9.27 (Windows NT 5.2; U; zh-cn)',
'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',
'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000 Chrome/26.0.1410.43 Safari/537.1 ',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.92 Safari/537.1 LBBROWSER',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11'
]
# website's base url
BASE_URL = 'http://m.idyjy.com/sub/'
# database URI
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:security@localhost:3306/movie?charset=utf8mb4' | 77.551724 | 222 | 0.666518 |
4a23e0891a0ab2c8eaa83bd8f49ed20012c0b578 | 6,988 | py | Python | nova/console/xvp.py | bopopescu/nova_vmware_compute_driver | 60d3936b68030647b9f11970c9e0d060fc286dd9 | [
"Apache-2.0"
] | null | null | null | nova/console/xvp.py | bopopescu/nova_vmware_compute_driver | 60d3936b68030647b9f11970c9e0d060fc286dd9 | [
"Apache-2.0"
] | null | null | null | nova/console/xvp.py | bopopescu/nova_vmware_compute_driver | 60d3936b68030647b9f11970c9e0d060fc286dd9 | [
"Apache-2.0"
] | 2 | 2019-07-08T22:12:35.000Z | 2020-07-24T08:27:24.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""XVP (Xenserver VNC Proxy) driver."""
import os
import signal
from Cheetah import Template
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
xvp_opts = [
cfg.StrOpt('console_xvp_conf_template',
default='$pybasedir/nova/console/xvp.conf.template',
help='XVP conf template'),
cfg.StrOpt('console_xvp_conf',
default='/etc/xvp.conf',
help='generated XVP conf file'),
cfg.StrOpt('console_xvp_pid',
default='/var/run/xvp.pid',
help='XVP master process pid file'),
cfg.StrOpt('console_xvp_log',
default='/var/log/xvp.log',
help='XVP log file'),
cfg.IntOpt('console_xvp_multiplex_port',
default=5900,
help='port for XVP to multiplex VNC connections on'),
]
CONF = cfg.CONF
CONF.register_opts(xvp_opts)
CONF.import_opt('host', 'nova.config')
CONF.import_opt('pybasedir', 'nova.config')
LOG = logging.getLogger(__name__)
class XVPConsoleProxy(object):
"""Sets up XVP config, and manages XVP daemon."""
def __init__(self):
self.xvpconf_template = open(CONF.console_xvp_conf_template).read()
self.host = CONF.host # default, set by manager.
super(XVPConsoleProxy, self).__init__()
@property
def console_type(self):
return 'vnc+xvp'
def get_port(self, context):
"""Get available port for consoles that need one."""
#TODO(mdragon): implement port selection for non multiplex ports,
# we are not using that, but someone else may want
# it.
return CONF.console_xvp_multiplex_port
def setup_console(self, context, console):
"""Sets up actual proxies."""
self._rebuild_xvp_conf(context.elevated())
def teardown_console(self, context, console):
"""Tears down actual proxies."""
self._rebuild_xvp_conf(context.elevated())
def init_host(self):
"""Start up any config'ed consoles on start."""
ctxt = context.get_admin_context()
self._rebuild_xvp_conf(ctxt)
def fix_pool_password(self, password):
"""Trim password to length, and encode."""
return self._xvp_encrypt(password, is_pool_password=True)
def fix_console_password(self, password):
"""Trim password to length, and encode."""
return self._xvp_encrypt(password)
def _rebuild_xvp_conf(self, context):
LOG.debug(_('Rebuilding xvp conf'))
pools = [pool for pool in
db.console_pool_get_all_by_host_type(context, self.host,
self.console_type)
if pool['consoles']]
if not pools:
LOG.debug('No console pools!')
self._xvp_stop()
return
conf_data = {'multiplex_port': CONF.console_xvp_multiplex_port,
'pools': pools,
'pass_encode': self.fix_console_password}
config = str(Template.Template(self.xvpconf_template,
searchList=[conf_data]))
self._write_conf(config)
self._xvp_restart()
def _write_conf(self, config):
try:
LOG.debug(_('Re-wrote %s') % CONF.console_xvp_conf)
with open(CONF.console_xvp_conf, 'w') as cfile:
cfile.write(config)
except IOError:
LOG.exception(_("Failed to write configuration file"))
raise
def _xvp_stop(self):
LOG.debug(_('Stopping xvp'))
pid = self._xvp_pid()
if not pid:
return
try:
os.kill(pid, signal.SIGTERM)
except OSError:
#if it's already not running, no problem.
pass
def _xvp_start(self):
if self._xvp_check_running():
return
LOG.debug(_('Starting xvp'))
try:
utils.execute('xvp',
'-p', CONF.console_xvp_pid,
'-c', CONF.console_xvp_conf,
'-l', CONF.console_xvp_log)
except exception.ProcessExecutionError, err:
LOG.error(_('Error starting xvp: %s') % err)
def _xvp_restart(self):
LOG.debug(_('Restarting xvp'))
if not self._xvp_check_running():
LOG.debug(_('xvp not running...'))
self._xvp_start()
else:
pid = self._xvp_pid()
os.kill(pid, signal.SIGUSR1)
def _xvp_pid(self):
try:
with open(CONF.console_xvp_pid, 'r') as pidfile:
pid = int(pidfile.read())
except IOError:
return None
except ValueError:
return None
return pid
def _xvp_check_running(self):
pid = self._xvp_pid()
if not pid:
return False
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _xvp_encrypt(self, password, is_pool_password=False):
"""Call xvp to obfuscate passwords for config file.
Args:
- password: the password to encode, max 8 char for vm passwords,
and 16 chars for pool passwords. passwords will
be trimmed to max len before encoding.
- is_pool_password: True if this this is the XenServer api password
False if it's a VM console password
(xvp uses different keys and max lengths for pool passwords)
Note that xvp's obfuscation should not be considered 'real' encryption.
It simply DES encrypts the passwords with static keys plainly viewable
in the xvp source code.
"""
maxlen = 8
flag = '-e'
if is_pool_password:
maxlen = 16
flag = '-x'
#xvp will blow up on passwords that are too long (mdragon)
password = password[:maxlen]
out, err = utils.execute('xvp', flag, process_input=password)
if err:
raise exception.ProcessExecutionError(_("Failed to run xvp."))
return out.strip()
| 34.254902 | 79 | 0.595306 |
4a23e0ffd22b7a69ec4fa5d517b34a750e69f826 | 2,179 | py | Python | share/qt/extract_strings_qt.py | Shamsdadu/DigitalDenar | 423a782a0e76dcd449d86e59d6a1dd3204ece9d0 | [
"MIT"
] | 8 | 2019-11-14T13:49:50.000Z | 2019-12-28T13:15:06.000Z | share/qt/extract_strings_qt.py | Shamsdadu/DigitalDenar | 423a782a0e76dcd449d86e59d6a1dd3204ece9d0 | [
"MIT"
] | 1 | 2019-12-13T01:10:02.000Z | 2019-12-13T01:10:59.000Z | share/qt/extract_strings_qt.py | Shamsdadu/DigitalDenar | 423a782a0e76dcd449d86e59d6a1dd3204ece9d0 | [
"MIT"
] | 1 | 2019-12-13T01:00:28.000Z | 2019-12-13T01:00:28.000Z | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/digitaldenarstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *digitaldenar_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("digitaldenar-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 25.940476 | 105 | 0.622304 |
4a23e10da8592da1795502e544741e2f7ca392e0 | 711 | py | Python | src/pyretem/subpackage1/module1.py | lpereira95/python-repo-template | 34a91d0d6d7fc6168e9e08deadb6d8fc7f304b9a | [
"MIT"
] | null | null | null | src/pyretem/subpackage1/module1.py | lpereira95/python-repo-template | 34a91d0d6d7fc6168e9e08deadb6d8fc7f304b9a | [
"MIT"
] | null | null | null | src/pyretem/subpackage1/module1.py | lpereira95/python-repo-template | 34a91d0d6d7fc6168e9e08deadb6d8fc7f304b9a | [
"MIT"
] | null | null | null | """This module contains classes.
"""
class AnObject:
"""Defines an object
Args:
a (bool): This is param `a` description.
b (np.array): This is param `b` description.
Attributes:
a (bool): An attribute.
b (np.array): Another attribute.
Notes:
There's a little of repeatability in paramaters and attributes.
Nevertheless, the output is quite different (take a look).
"""
def __init__(self, a, b):
pass
def operate_on_attributes(self, operator):
"""Operates on attributes accordingly to passed method.
Args:
operator (callable): A function that receives `a` and `b`.
"""
pass
| 21.545455 | 71 | 0.59353 |
4a23e16a95b2dca13de6acf98c6fe6eec566a0f1 | 8,973 | py | Python | elasticdl/python/data/reader/odps_reader.py | zuston/elasticdl | 601609fd44f826a2f5ea209443124b2c9a2f9ccb | [
"MIT"
] | null | null | null | elasticdl/python/data/reader/odps_reader.py | zuston/elasticdl | 601609fd44f826a2f5ea209443124b2c9a2f9ccb | [
"MIT"
] | null | null | null | elasticdl/python/data/reader/odps_reader.py | zuston/elasticdl | 601609fd44f826a2f5ea209443124b2c9a2f9ccb | [
"MIT"
] | null | null | null | # Copyright 2020 The ElasticDL Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from odps import ODPS
from elasticdl.python.common.constants import Mode
from elasticdl.python.data.odps_io import ODPSReader
from elasticdl.python.data.reader.data_reader import (
AbstractDataReader,
Metadata,
check_required_kwargs,
)
class ODPSDataReader(AbstractDataReader):
def __init__(self, **kwargs):
AbstractDataReader.__init__(self, **kwargs)
self._kwargs = kwargs
self._metadata = Metadata(column_names=None)
self._table = self._kwargs["table"]
self._columns = self._kwargs.get("columns")
self._init_metadata()
# Initialize an ODPS IO reader for each table with task type
self._table_readers = dict()
def _init_metadata(self):
table_schema = self._get_table_schema()
if self._metadata.column_names is None:
self._metadata.column_names = (
table_schema.names if self._columns is None else self._columns
)
if self._metadata.column_names:
column_dtypes = {
column_name: table_schema[column_name].type
for column_name in self._metadata.column_names
}
self.metadata.column_dtypes = column_dtypes
def read_records(self, task):
task_table_name = self._get_odps_table_name(task.shard.name)
self._init_reader(task_table_name, task.type)
reader = self._table_readers[task_table_name][task.type]
for record in reader.record_generator_with_retry(
start=task.shard.start,
end=task.shard.end,
columns=self._metadata.column_names,
):
yield record
def create_shards(self):
check_required_kwargs(["table", "records_per_task"], self._kwargs)
table_name = self._kwargs["table"]
reader = self.get_odps_reader(table_name)
table_size = reader.get_table_size()
records_per_task = self._kwargs["records_per_task"]
shards = []
num_shards = table_size // records_per_task
start_ind = 0
for shard_id in range(num_shards):
shards.append((table_name, start_ind, records_per_task,))
start_ind += records_per_task
num_records_left = table_size % records_per_task
if num_records_left != 0:
shards.append((table_name, start_ind, num_records_left,))
return shards
@property
def records_output_types(self):
return tf.string
@property
def metadata(self):
return self._metadata
def _init_reader(self, table_name, task_type):
if (
table_name in self._table_readers
and task_type in self._table_readers[table_name]
):
return
self._table_readers.setdefault(table_name, {})
check_required_kwargs(
["project", "access_id", "access_key"], self._kwargs
)
reader = self.get_odps_reader(table_name)
# There may be weird errors if tasks with the same table
# and different type use the same reader.
self._table_readers[table_name][task_type] = reader
def get_odps_reader(self, table_name):
return ODPSReader(
project=self._kwargs["project"],
access_id=self._kwargs["access_id"],
access_key=self._kwargs["access_key"],
table=table_name,
endpoint=self._kwargs.get("endpoint"),
partition=self._kwargs.get("partition", None),
num_processes=self._kwargs.get("num_processes", 1),
options={
"odps.options.tunnel.endpoint": self._kwargs.get(
"tunnel_endpoint", None
)
},
)
def _get_table_schema(self):
odps_client = ODPS(
access_id=self._kwargs["access_id"],
secret_access_key=self._kwargs["access_key"],
project=self._kwargs["project"],
endpoint=self._kwargs.get("endpoint"),
)
odps_table = odps_client.get_table(self._kwargs["table"])
return odps_table.schema
@staticmethod
def _get_odps_table_name(shard_name):
return shard_name.split(":")[0]
def default_feed(self):
check_required_kwargs(["label_col"], self._kwargs)
def feed(dataset, mode, metadata):
def _parse_data(record):
label_col_name = self._kwargs["label_col"]
record = tf.strings.to_number(record, tf.float32)
def _get_features_without_labels(
record, label_col_idx, features_shape
):
features = [
record[:label_col_idx],
record[label_col_idx + 1 :], # noqa: E203
]
features = tf.concat(features, -1)
return tf.reshape(features, features_shape)
features_shape = (len(metadata.column_names) - 1, 1)
labels_shape = (1,)
if mode == Mode.PREDICTION:
if label_col_name in metadata.column_names:
label_col_idx = metadata.column_names.index(
label_col_name
)
return _get_features_without_labels(
record, label_col_idx, features_shape
)
else:
return tf.reshape(record, features_shape)
else:
if label_col_name not in metadata.column_names:
raise ValueError(
"Missing the label column '%s' in the retrieved "
"ODPS table during %s mode."
% (label_col_name, mode)
)
label_col_idx = metadata.column_names.index(label_col_name)
labels = tf.reshape(record[label_col_idx], labels_shape)
return (
_get_features_without_labels(
record, label_col_idx, features_shape
),
labels,
)
dataset = dataset.map(_parse_data)
if mode == Mode.TRAINING:
dataset = dataset.shuffle(buffer_size=200)
return dataset
return feed
class ParallelODPSDataReader(ODPSDataReader):
"""Use multi-process to download records from a MaxCompute table
"""
def __init__(self, parse_fn, **kwargs):
ODPSDataReader.__init__(self, **kwargs)
self.py_parse_data = parse_fn
def parallel_record_records(
self, task, num_processes, shard_size, transform_fn
):
check_required_kwargs(
["project", "access_id", "access_key"], self._kwargs
)
start = task.shard.start
end = task.end
table = self._get_odps_table_name(task.shard.name)
table = table.split(".")[1]
project = self._kwargs["project"]
access_id = self._kwargs["access_id"]
access_key = self._kwargs["access_key"]
endpoint = self._kwargs.get("endpoint")
partition = self._kwargs.get("partition", None)
columns = self._kwargs.get("columns", None)
pd = ODPSReader(
access_id=access_id,
access_key=access_key,
project=project,
endpoint=endpoint,
table=table,
partition=partition,
num_processes=num_processes,
transform_fn=transform_fn,
columns=columns,
)
pd.reset((start, end - start), shard_size)
shard_count = pd.get_shards_count()
for i in range(shard_count):
records = pd.get_records()
for record in records:
yield record
pd.stop()
def read_records(self, task):
shard_size = (task.shard.end - task.shard.start) // 4
record_gen = self.parallel_record_records(
task=task,
num_processes=4,
shard_size=shard_size,
transform_fn=self.py_parse_data,
)
for record in record_gen:
yield record
@property
def records_output_types(self):
return tf.string
| 36.181452 | 79 | 0.591664 |
4a23e175c59ead5a2f54c4fea0e337ebf8110e27 | 5,399 | py | Python | src/setup.py | MaicoTimmerman/pretix | 35ac2f0d2b24aa3f9e30ea9eadad5cd2ac3b7634 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-04-16T00:29:21.000Z | 2021-04-16T00:29:21.000Z | src/setup.py | MaicoTimmerman/pretix | 35ac2f0d2b24aa3f9e30ea9eadad5cd2ac3b7634 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/setup.py | MaicoTimmerman/pretix | 35ac2f0d2b24aa3f9e30ea9eadad5cd2ac3b7634 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import os
import sys
from codecs import open
from distutils.command.build import build
from os import path
from setuptools import find_packages, setup
from pretix import __version__
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 5)
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write("""
==========================
Unsupported Python version
==========================
This version of pretix requires Python {}.{}, but you're trying to
install it on Python {}.{}.
This may be because you are using a version of pip that doesn't
understand the python_requires classifier. Make sure you
have pip >= 9.0 and setuptools >= 24.2, then try again:
$ python -m pip install --upgrade pip setuptools
$ python -m pip install pretix
This will install the latest version of pretix which works on your
version of Python. If you can't upgrade your pip (or Python), request
an older version of pretix:
$ python -m pip install "pretix<2"
""".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
try:
with open(path.join(here, '../README.rst'), encoding='utf-8') as f:
long_description = f.read()
except:
long_description = ''
class CustomBuild(build):
def run(self):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pretix.settings")
import django
django.setup()
from django.conf import settings
from django.core import management
settings.COMPRESS_ENABLED = True
settings.COMPRESS_OFFLINE = True
management.call_command('compilemessages', verbosity=1)
management.call_command('compilejsi18n', verbosity=1)
management.call_command('collectstatic', verbosity=1, interactive=False)
management.call_command('compress', verbosity=1)
build.run(self)
cmdclass = {
'build': CustomBuild
}
setup(
name='pretix',
version=__version__,
python_requires='>={}.{}'.format(*REQUIRED_PYTHON),
description='Reinventing presales, one ticket at a time',
long_description=long_description,
url='https://pretix.eu',
author='Raphael Michel',
author_email='[email protected]',
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Framework :: Django :: 2.2'
],
keywords='tickets web shop ecommerce',
install_requires=[
'Django==2.2.*',
'djangorestframework==3.9.*',
'python-dateutil==2.8.*',
'requests==2.21.*',
'pytz',
'django-bootstrap3==11.0.*',
'django-formset-js-improved==0.5.0.2',
'django-compressor==2.2.*',
'django-hierarkey==1.0.*,>=1.0.2',
'django-filter==2.1.*',
'django-scopes==1.1.*',
'reportlab==3.5.*',
'Pillow==5.*',
'PyPDF2==1.26.*',
'django-libsass',
'libsass',
'django-otp==0.5.*',
'webauthn==0.4.*',
'python-u2flib-server==4.*',
'django-formtools==2.1',
'celery==4.3.*',
'kombu==4.5.*',
'django-statici18n==1.8.*',
'inlinestyler==0.2.*',
'BeautifulSoup4==4.7.*',
'slimit',
'lxml',
'static3==0.7.*',
'dj-static',
'csscompressor',
'django-markup',
'markdown<=2.2',
'bleach==3.1.*',
'sentry-sdk==0.7.*',
'babel',
'paypalrestsdk==1.13.*',
'pycparser==2.13',
'django-redis==4.10.*',
'redis==3.2.*',
'stripe==2.32.*',
'chardet<3.1.0,>=3.0.2',
'mt-940==3.2',
'django-i18nfield>=1.4.0',
'django-jsonfallback>=2.1.2',
'psycopg2-binary',
'vobject==0.9.*',
'pycountry',
'django-countries',
'pyuca',
'defusedcsv>=1.1.0',
'vat_moss==0.11.0',
'django-localflavor>=2.2',
'django-localflavor',
'jsonschema',
'django-hijack>=2.1.10,<2.2.0',
'openpyxl',
'django-oauth-toolkit==1.2.*',
'oauthlib==2.1.*',
'urllib3==1.24.*', # required by current requests
'django-phonenumber-field==3.0.*',
'phonenumberslite==8.10.*',
],
extras_require={
'dev': [
'django-debug-toolbar==1.11',
'sqlparse==0.3.*',
'pycodestyle==2.5.*',
'pyflakes==2.1.*',
'flake8==3.7.*',
'pep8-naming',
'coveralls',
'coverage',
'pytest==4.4.*',
'pytest-django',
'pytest-xdist==1.28.*',
'isort',
'pytest-mock==1.10.*',
'pytest-rerunfailures==7.*',
'responses',
'potypo',
'freezegun',
],
'memcached': ['pylibmc'],
'mysql': ['mysqlclient'],
},
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
cmdclass=cmdclass,
)
| 29.994444 | 80 | 0.562141 |
4a23e239f2ed0a1c056c08e43e7fb0732ed296ad | 3,508 | py | Python | test/unit/git_class/gitmerge_git_pu.py | deepcoder42/git-lib | 7f5736ea71d6592390222a214b0e51cd3c3151f8 | [
"MIT"
] | null | null | null | test/unit/git_class/gitmerge_git_pu.py | deepcoder42/git-lib | 7f5736ea71d6592390222a214b0e51cd3c3151f8 | [
"MIT"
] | null | null | null | test/unit/git_class/gitmerge_git_pu.py | deepcoder42/git-lib | 7f5736ea71d6592390222a214b0e51cd3c3151f8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Classification (U)
"""Program: gitmerge_git_pu.py
Description: Unit testing of gitmerge.git_pu in git_class.py.
Usage:
test/unit/git_class/gitmerge_git_pu.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import collections
import git
# Local
sys.path.append(os.getcwd())
import git_class
import version
__version__ = version.__version__
def push3(option):
"""Function: push3
Description: Method stub holder for git.Repo.git.push().
Arguments:
"""
status = 2
if option:
raise git.exc.GitCommandError("git", status, "stderr")
else:
raise git.exc.GitCommandError("git", 2, "stderr")
def push2(option):
"""Function: push2
Description: Method stub holder for git.Repo.git.push().
Arguments:
"""
status = 128
if option:
raise git.exc.GitCommandError("git", status, "stderr")
else:
raise git.exc.GitCommandError("git", 128, "stderr")
def push(option):
"""Function: push
Description: Method stub holder for git.Repo.git.push().
Arguments:
"""
if option:
return True
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_git_pu_tags
test_git_pu_2
test_git_pu_128
test_git_pu_true
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.repo_name = "Repo_name"
self.git_dir = "/directory/git"
self.url = "URL"
self.branch = "Remote_branch"
self.mod_branch = "Mod_branch"
self.gitr = git_class.GitMerge(self.repo_name, self.git_dir, self.url,
self.branch, self.mod_branch)
def test_git_pu_tags(self):
"""Function: test_git_pu_tags
Description: Test with passing tags option.
Arguments:
"""
giti = collections.namedtuple('GIT', 'push')
self.gitr.gitcmd = giti(push)
status, msg = self.gitr.git_pu(tags=True)
self.assertEqual((status, msg), (True, {}))
def test_git_pu_2(self):
"""Function: test_git_pu_2
Description: Test with raised exception - 2 status.
Arguments:
"""
giti = collections.namedtuple('GIT', 'push')
self.gitr.gitcmd = giti(push3)
status, msg = self.gitr.git_pu()
self.assertEqual((status, msg["status"]), (False, 2))
def test_git_pu_128(self):
"""Function: test_git_pu_128
Description: Test with raised exception - 128 status.
Arguments:
"""
giti = collections.namedtuple('GIT', 'push')
self.gitr.gitcmd = giti(push2)
status, msg = self.gitr.git_pu()
self.assertEqual((status, msg["status"]), (False, 128))
def test_git_pu_true(self):
"""Function: test_git_pu_true
Description: Test with successful git_pu call.
Arguments:
"""
giti = collections.namedtuple('GIT', 'push')
self.gitr.gitcmd = giti(push)
status, msg = self.gitr.git_pu()
self.assertEqual((status, msg), (True, {}))
if __name__ == "__main__":
unittest.main()
| 18.366492 | 78 | 0.601197 |
4a23e28cd57e9b28e067f80613ee6d59217fe829 | 175 | py | Python | sigma/core/util.py | suzuki-shunsuke/pysigma.core | 89fe0d99e8cba015aad245dfda8465af99d1ab9d | [
"MIT"
] | 1 | 2022-02-09T06:38:52.000Z | 2022-02-09T06:38:52.000Z | sigma/core/util.py | suzuki-shunsuke/pysigma.core | 89fe0d99e8cba015aad245dfda8465af99d1ab9d | [
"MIT"
] | null | null | null | sigma/core/util.py | suzuki-shunsuke/pysigma.core | 89fe0d99e8cba015aad245dfda8465af99d1ab9d | [
"MIT"
] | null | null | null | """
"""
def validate(Model, *args, **kwargs):
return Model(*args, **kwargs)
def asdict(model):
return dict((key, getattr(model, key)) for key in model.__fields__)
| 15.909091 | 71 | 0.64 |
4a23e31a07f934a146f333a8498a8e16a0fb1917 | 29,119 | py | Python | src/transformers/adapters/model_mixin.py | AngadSethi/adapter-transformers | b147bba9107a5a561aca28c99f4e4ec2816a6e4f | [
"Apache-2.0"
] | null | null | null | src/transformers/adapters/model_mixin.py | AngadSethi/adapter-transformers | b147bba9107a5a561aca28c99f4e4ec2816a6e4f | [
"Apache-2.0"
] | null | null | null | src/transformers/adapters/model_mixin.py | AngadSethi/adapter-transformers | b147bba9107a5a561aca28c99f4e4ec2816a6e4f | [
"Apache-2.0"
] | null | null | null | import logging
import warnings
from abc import ABC, abstractmethod
from os.path import join
from typing import List, Mapping, Optional, Union
from torch import nn
from .composition import AdapterCompositionBlock, Fuse, Stack, parse_composition
from .configuration import (
ADAPTERFUSION_CONFIG_MAP,
DEFAULT_ADAPTERFUSION_CONFIG,
AdapterConfig,
AdapterFusionConfig,
ModelAdaptersConfig,
get_adapter_config_hash,
)
from .hub_mixin import PushAdapterToHubMixin
from .loading import AdapterFusionLoader, AdapterLoader, PredictionHeadLoader, WeightsLoader
from .modeling import Adapter, GLOWCouplingBlock, NICECouplingBlock
from .utils import inherit_doc
logger = logging.getLogger(__name__)
class InvertibleAdaptersMixin:
"""Mixin for Transformer models adding invertible adapters."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.invertible_adapters = nn.ModuleDict(dict())
def add_invertible_adapter(self, adapter_name: str):
"""
Adds an invertible adapter module for the adapter with the given name. If the given adapter does not specify an
invertible adapter config, this method does nothing.
Args:
adapter_name (str): The name of the adapter for which to add an invertible adapter module.
"""
if adapter_name in self.invertible_adapters:
raise ValueError(f"Model already contains an adapter module for '{adapter_name}'.")
adapter_config = self.config.adapters.get(adapter_name)
if adapter_config and adapter_config["inv_adapter"]:
if adapter_config["inv_adapter"] == "nice":
inv_adap = NICECouplingBlock(
[[self.config.hidden_size]],
non_linearity=adapter_config["non_linearity"],
reduction_factor=adapter_config["inv_adapter_reduction_factor"],
)
elif adapter_config["inv_adapter"] == "glow":
inv_adap = GLOWCouplingBlock(
[[self.config.hidden_size]],
non_linearity=adapter_config["non_linearity"],
reduction_factor=adapter_config["inv_adapter_reduction_factor"],
)
else:
raise ValueError(f"Invalid invertible adapter type '{adapter_config['inv_adapter']}'.")
self.invertible_adapters[adapter_name] = inv_adap
self.invertible_adapters[adapter_name].apply(Adapter.init_bert_weights)
def delete_invertible_adapter(self, adapter_name: str):
if adapter_name in self.invertible_adapters:
del self.invertible_adapters[adapter_name]
def get_invertible_adapter(self):
# TODO: Currently no fusion over invertible adapters, takes only very first language adapter position
if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:
first_adapter = self.config.adapters.active_setup.first()
if first_adapter in self.invertible_adapters:
return self.invertible_adapters[first_adapter]
return None
def enable_invertible_adapters(self, adapter_names):
for adapter_name in adapter_names:
if adapter_name in self.invertible_adapters:
for param in self.invertible_adapters[adapter_name].parameters():
param.requires_grad = True
def invertible_adapters_forward(self, hidden_states, rev=False):
# TODO: Currently no fusion over invertible adapters, takes only very first language adapter position
if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:
first_adapter = self.config.adapters.active_setup.first()
if first_adapter in self.invertible_adapters:
hidden_states = self.invertible_adapters[first_adapter](hidden_states, rev=rev)
return hidden_states
class ModelConfigAdaptersMixin(ABC):
"""
Mixin for model config classes, adding support for adapters.
Besides adding this mixin to the config class of a model supporting adapters, make sure the following attributes/
properties are present: hidden_dropout_prob, attention_probs_dropout_prob.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# adapter configuration
adapter_config_dict = kwargs.pop("adapters", None)
if adapter_config_dict:
self.adapters = ModelAdaptersConfig(**adapter_config_dict)
else:
self.adapters = ModelAdaptersConfig()
class ModelAdaptersMixin(PushAdapterToHubMixin, ABC):
"""Mixin for transformer models adding support for loading/ saving adapters."""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.model_name = None
# In some cases, the config is not an instance of a directly supported config class such as BertConfig.
# Thus, we check the adapters config here to make sure everything is correct.
if not hasattr(config, "adapters"):
config.adapters = ModelAdaptersConfig()
elif not isinstance(config.adapters, ModelAdaptersConfig):
config.adapters = ModelAdaptersConfig(**config.adapters)
def _init_adapter_modules(self):
"""
This method initializes adapter modules and fusion modules from the model config.
"""
# Initialize adapters from config
for adapter_name in self.config.adapters:
self._add_adapter(adapter_name)
# Initialize fusion from config
if hasattr(self.config, "adapter_fusion_models"):
for fusion_adapter_names in self.config.adapter_fusion_models:
self._add_fusion_layer(fusion_adapter_names)
# These methods have to be implemented by every deriving class:
@abstractmethod
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):
"""Sets the model into mode for training the given adapters."""
pass
def train_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""Sets the model into mode for training of adapter fusion determined by a list of adapter names."""
warnings.warn(
"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.",
FutureWarning,
)
self.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)
@abstractmethod
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""Sets the model into mode for training of adapter fusion determined by a list of adapter names."""
pass
@abstractmethod
def _add_adapter(self, adapter_name):
pass
@abstractmethod
def _add_fusion_layer(self, adapter_names):
pass
def has_adapters(self):
return len(self.config.adapters.adapters) > 0
@property
def has_parallel_adapters(self) -> bool:
if self.config.adapters.active_setup:
return self.config.adapters.active_setup.parallel_channels > 1
else:
return False
@property
def active_adapters(self) -> AdapterCompositionBlock:
return self.config.adapters.active_setup
@active_adapters.setter
def active_adapters(self, adapter_setup: Union[list, AdapterCompositionBlock]):
self.set_active_adapters(adapter_setup)
def set_active_adapters(
self, adapter_setup: Union[list, AdapterCompositionBlock], skip_layers: Optional[List[int]] = None
):
"""
Sets the adapter modules to be used by default in every forward pass. If no adapter with the given name is
found, no module of the respective type will be activated.
Args:
adapter_setup (list): The list of adapters to be activated by default. Can be a fusion or stacking configuration.
"""
adapter_setup = parse_composition(adapter_setup, model_type=self.config.model_type)
if adapter_setup:
for adapter_name in adapter_setup.flatten():
if adapter_name not in self.config.adapters.adapters:
raise ValueError(
f"No adapter with name '{adapter_name}' found. Please make sure that all specified adapters are correctly loaded."
)
self.config.adapters.active_setup = adapter_setup
self.config.adapters.skip_layers = skip_layers
def set_adapter_fusion_config(self, adapter_fusion_config, override_kwargs=None):
"""
Sets the adapter fusion configuration.
Args:
adapter_fusion_config (str or dict): adapter fusion configuration, can be either:
- a string identifying a pre-defined adapter fusion configuration
- a dictionary representing the adapter fusion configuration
- the path to a file containing the adapter fusion configuration
"""
if override_kwargs is None:
override_kwargs = {}
if isinstance(adapter_fusion_config, str) and adapter_fusion_config in ADAPTERFUSION_CONFIG_MAP:
self.config.adapter_fusion = AdapterFusionConfig.load(adapter_fusion_config, **override_kwargs)
elif isinstance(adapter_fusion_config, Mapping):
self.config.adapter_fusion = adapter_fusion_config
else:
raise ValueError("Invalid adapter type {}".format(adapter_fusion_config))
def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False):
"""
Adds a new adapter module of the specified type to the model.
Args:
adapter_name (str): The name of the adapter module to be added.
config (str or dict or AdapterConfig, optional): The adapter configuration, can be either:
- the string identifier of a pre-defined configuration dictionary
- a configuration dictionary specifying the full config
- if not given, the default configuration for this adapter type will be used
overwrite_ok (bool, optional): Overwrite an adapter with the same name if it exists. By default (False), an exception is thrown.
"""
if isinstance(config, dict):
config = AdapterConfig.from_dict(config) # ensure config is ok and up-to-date
# In case adapter already exists and we allow overwriting, explicitly delete the existing one first
if overwrite_ok and adapter_name in self.config.adapters:
self.delete_adapter(adapter_name)
self.config.adapters.add(adapter_name, config=config)
self.base_model._add_adapter(adapter_name)
def add_fusion(self, adapter_names: Union[Fuse, list], adapter_fusion_config=None, override_kwargs=None):
warnings.warn(
"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.",
FutureWarning,
)
self.add_adapter_fusion(adapter_names, adapter_fusion_config, override_kwargs)
def add_adapter_fusion(self, adapter_names: Union[Fuse, list], adapter_fusion_config=None, override_kwargs=None):
"""
Adds AdapterFusion to the model with alll the necessary configurations and weight initializations
Args:
adapter_names: a list of adapter names which should be fused
adapter_fusion_config (str or dict): adapter fusion configuration, can be either:
- a string identifying a pre-defined adapter fusion configuration
- a dictionary representing the adapter fusion configuration
- the path to a file containing the adapter fusion configuration
override_kwargs: dictionary items for values which should be overwritten in the default AdapterFusion configuration
"""
# TODO-V2 Allow nested items or directly pass Fuse block?
if isinstance(adapter_names, Fuse):
adapter_names = adapter_names.children
if not hasattr(self.config, "adapter_fusion"):
if override_kwargs is None:
override_kwargs = {}
if adapter_fusion_config is not None:
self.set_adapter_fusion_config(adapter_fusion_config, **override_kwargs)
else:
self.set_adapter_fusion_config(DEFAULT_ADAPTERFUSION_CONFIG)
elif hasattr(self.config, "adapter_fusion") and adapter_fusion_config is not None:
# This behavior may be a bit unintuitive as the given argument is ignored, but we can't throw an error because of the loader.
logger.warning("An AdapterFusion config has already been set and will NOT be overwritten")
if not hasattr(self.config, "adapter_fusion_models"):
self.config.adapter_fusion_models = []
if isinstance(adapter_names, list):
adapter_fusion_name = ",".join(adapter_names)
else:
adapter_fusion_name = adapter_names
if adapter_fusion_name not in self.config.adapter_fusion_models:
self.config.adapter_fusion_models.append(adapter_fusion_name)
self.base_model._add_fusion_layer(adapter_names)
def delete_adapter(self, adapter_name: str):
"""
Deletes the adapter with the specified name from the model.
Args:
adapter_name (str): The name of the adapter.
"""
if adapter_name not in self.config.adapters:
logger.info("No adapter '%s' found for deletion. Skipping.", adapter_name)
return
del self.config.adapters.adapters[adapter_name]
self.base_model._delete_adapter(adapter_name)
# Reset active adapters if this was the only active adapter
if self.active_adapters == Stack(adapter_name):
self.active_adapters = None
def delete_adapter_fusion(self, adapter_names: Union[Fuse, list]):
"""
Deletes the AdapterFusion layer of the specified adapters.
Args:
adapter_names (Union[Fuse, list]): List of adapters for which to delete the AdapterFusion layer.
"""
if isinstance(adapter_names, Fuse):
adapter_fusion_name = ",".join(adapter_names.children)
elif isinstance(adapter_names, list):
adapter_fusion_name = ",".join(adapter_names)
else:
adapter_fusion_name = adapter_names
if (
not hasattr(self.config, "adapter_fusion_models")
or adapter_fusion_name not in self.config.adapter_fusion_models
):
logger.info("No AdapterFusion '%s' found for deletion. Skipping.", adapter_fusion_name)
return
self.config.adapter_fusion_models.remove(adapter_fusion_name)
self.base_model._delete_fusion_layer(adapter_fusion_name)
def save_adapter(
self,
save_directory: str,
adapter_name: str,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves an adapter and its configuration file to a directory so that it can be shared or reloaded using
`load_adapter()`.
Args:
save_directory (str): Path to a directory where the adapter should be saved.
adapter_name (str): Name of the adapter to be saved.
Raises:
ValueError: If the given adapter name is invalid.
"""
loader = AdapterLoader(self)
loader.save(save_directory, adapter_name, meta_dict)
# save additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.save(save_directory, adapter_name)
def save_adapter_fusion(
self,
save_directory: str,
adapter_names: list,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves an adapter and its configuration file to a directory so that it can be shared or reloaded using
`load_adapter()`.
Args:
save_directory (str): Path to a directory where the adapter should be saved.
adapter_name (str): Name of the adapter to be saved.
Raises:
ValueError: If the given adapter name is invalid.
"""
loader = AdapterFusionLoader(self)
loader.save(save_directory, adapter_names)
# save additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.save(save_directory, adapter_names)
def load_adapter(
self,
adapter_name_or_path: str,
config: Union[dict, str] = None,
version: str = None,
model_name: str = None,
load_as: str = None,
source: str = "ah",
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
leave_out: Optional[List[int]] = None,
id2label=None,
**kwargs
) -> str:
"""
Loads a pre-trained pytorch adapter module from the local file system or a remote location.
Args:
adapter_name_or_path (str): can be either:
- the identifier of a pre-trained task adapter to be loaded from Adapter Hub
- a path to a directory containing adapter weights saved using `model.saved_adapter()`
- a URL pointing to a zip folder containing a saved adapter module
config (dict or str, optional): The requested configuration of the adapter.
If not specified, will be either: - the default adapter config for the requested adapter if specified -
the global default adapter config
version (str, optional): The version of the adapter to be loaded.
model_name (str, optional): The string identifier of the pre-trained model.
load_as (str, optional): Load the adapter using this name. By default, the name with which the adapter was
saved will be used.
source (str, optional): Identifier of the source(s) from where to load the adapter. Can be:
- "ah" (default): search on AdapterHub.
- "hf": search on HuggingFace model hub.
- None: only search on local file system
leave_out: Dynamically drop adapter modules in the specified Transformer layers when loading the adapter.
Returns:
str: The name with which the adapter was added to the model.
"""
loader = AdapterLoader(self)
load_dir, load_name = loader.load(
adapter_name_or_path, config, version, model_name, load_as, source=source, leave_out=leave_out, **kwargs
)
# load additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.load(
load_dir,
load_as=load_as,
loading_info=kwargs.get("loading_info", None),
main_load_name=load_name,
id2label=id2label,
)
return load_name
def load_adapter_fusion(
self,
adapter_fusion_name_or_path: str,
load_as: str = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
**kwargs
) -> str:
"""
Loads a pre-trained pytorch adapter module from the local file system or a remote location.
Args:
adapter_fusion_name_or_path (str): can be either:
- the identifier of a pre-trained task adapter fusion module to be loaded from Adapter Hub
- a path to a directory containing adapter weights saved using `model.saved_adapter()`
- a URL pointing to a zip folder containing a saved adapter module
config (dict or str, optional): The requested configuration of the adapter fusion.
If not specified, will be either: - the default adapter config for the requested adapter fusion if
specified - the global default adapter fusion config
model_name (str, optional): The string identifier of the pre-trained model.
load_as (str, optional): Load the adapter using this name. By default, the name with which the adapter was
saved will be used.
Returns:
str: The name with which the adapter was added to the model.
"""
loader = AdapterFusionLoader(self)
load_dir, load_name = loader.load(adapter_fusion_name_or_path, load_as)
# load additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.load(
load_dir,
load_as=load_as,
loading_info=kwargs.get("loading_info", None),
main_load_name=load_name,
)
return load_name
def save_all_adapters(
self,
save_directory: str,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves all adapters of this model together with their configuration to subfolders of the given location.
Args:
save_directory (str): Path to a directory where the adapters should be saved.
"""
for name in self.config.adapters:
adapter_config = self.config.adapters.get(name)
h = get_adapter_config_hash(adapter_config)
save_path = join(save_directory, name)
if meta_dict:
meta_dict.update({"config_id": h})
else:
meta_dict = {"config_id": h}
self.save_adapter(save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders)
def save_all_adapter_fusions(
self,
save_directory: str,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves all adapters of this model together with their configuration to subfolders of the given location.
Args:
save_directory (str): Path to a directory where the adapters should be saved.
"""
if not hasattr(self.config, "adapter_fusion_models"):
return
for name in self.config.adapter_fusion_models:
adapter_fusion_config = self.config.adapter_fusion
h = get_adapter_config_hash(adapter_fusion_config)
save_path = join(save_directory, name)
if meta_dict:
meta_dict.update({"config_id": h})
else:
meta_dict = {"config_id": h}
self.save_adapter_fusion(save_path, name, custom_weights_loaders=custom_weights_loaders)
def freeze_model(self, freeze=True):
"""Freezes all weights of the model."""
# first freeze/ unfreeze all model weights
for param in self.base_model.parameters():
param.requires_grad = not freeze
self.model_freezed = freeze
def pre_transformer_forward(self, **kwargs):
"""
This method should be called by every adapter-implementing model at the very beginning of the forward() method.
"""
# some warnings if we don't use available adapters
active_adapters = self.active_adapters or kwargs.get("adapter_names", None)
if not active_adapters and self.has_adapters():
logger.warning("There are adapters available but none are activated for the forward pass.")
self.config.adapters.is_parallelized = False
@inherit_doc
class ModelWithHeadsAdaptersMixin(ModelAdaptersMixin):
"""Mixin adding support for loading/ saving adapters to transformer models with head(s)."""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self._convert_to_flex_head = False
def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False):
"""
Adds a new adapter module of the specified type to the model.
Args:
adapter_name (str): The name of the adapter module to be added.
config (str or dict, optional): The adapter configuration, can be either:
- the string identifier of a pre-defined configuration dictionary
- a configuration dictionary specifying the full config
- if not given, the default configuration for this adapter type will be used
overwrite_ok (bool, optional): Overwrite an adapter with the same name if it exists. By default (False), an exception is thrown.
"""
self.base_model.add_adapter(adapter_name, config, overwrite_ok=overwrite_ok)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):
"""Sets the model into mode for training the given adapters."""
self.base_model.train_adapter(adapter_setup)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""Sets the model into mode for training of adapter fusion determined by a list of adapter names."""
self.base_model.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)
def _add_adapter(self, adapter_name):
self.base_model._add_adapter(adapter_name)
def _add_fusion_layer(self, adapter_names):
self.base_model._add_fusion_layer(adapter_names)
def save_head(self, save_directory: str, head_name: str = None):
loader = PredictionHeadLoader(self)
loader.save(save_directory, name=head_name)
def load_head(self, save_directory, load_as=None, id2label=None, **kwargs):
loader = PredictionHeadLoader(self, convert_to_flex_head=self._convert_to_flex_head)
return loader.load(save_directory, load_as=load_as, id2label=id2label, **kwargs)
def save_adapter(
self,
save_directory: str,
adapter_name: str,
with_head: bool = True,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))
super().save_adapter(
save_directory,
adapter_name,
meta_dict=meta_dict,
custom_weights_loaders=custom_weights_loaders,
)
def load_adapter(
self,
adapter_name_or_path: str,
config: Union[dict, str] = None,
version: str = None,
model_name: str = None,
load_as: str = None,
source: str = "ah",
with_head: bool = True,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
leave_out: Optional[List[int]] = None,
id2label=None,
**kwargs
) -> str:
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(
PredictionHeadLoader(
self,
error_on_missing=False,
convert_to_flex_head=self._convert_to_flex_head,
)
)
# Support passing a num_labels for compatibility reasons. Convert to label map here.
num_labels = kwargs.pop("num_labels", None)
if num_labels is not None:
id2label = {i: "LABEL_" + str(i) for i in range(num_labels)}
return super().load_adapter(
adapter_name_or_path,
config=config,
version=version,
model_name=model_name,
load_as=load_as,
source=source,
custom_weights_loaders=custom_weights_loaders,
leave_out=leave_out,
id2label=id2label,
**kwargs,
)
def save_all_adapters(
self,
save_directory: str,
with_head: bool = True,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))
super().save_all_adapters(
save_directory,
meta_dict=meta_dict,
custom_weights_loaders=custom_weights_loaders,
)
def get_labels(self):
return list(self.config.id2label.values())
def get_labels_dict(self):
return self.config.id2label
def get_adapter(self, name):
return self.base_model.get_adapter(name)
| 43.656672 | 140 | 0.658127 |
4a23e35120671f982b166cc60bfd875f3b41a129 | 1,141 | py | Python | tests/test_templatetags.py | NickolausDS/django-globus-portal-framework | 7d8dc9018d2bfcbd72d62344f0abe221b9259d70 | [
"Apache-2.0"
] | null | null | null | tests/test_templatetags.py | NickolausDS/django-globus-portal-framework | 7d8dc9018d2bfcbd72d62344f0abe221b9259d70 | [
"Apache-2.0"
] | 3 | 2021-10-18T15:05:38.000Z | 2022-02-01T19:52:15.000Z | tests/test_templatetags.py | NickolausDS/django-globus-portal-framework | 7d8dc9018d2bfcbd72d62344f0abe221b9259d70 | [
"Apache-2.0"
] | 2 | 2022-02-07T23:56:06.000Z | 2022-02-07T23:58:55.000Z | import pytest
from django.urls import path
from globus_portal_framework.templatetags.is_active import is_active
def view_simple(request):
pass
def view_complex(request, sun, moon, stars):
pass
urlpatterns = [
path('view-simple', view_simple, name='view-simple'),
path('view-complex/<int:sun>/<int:moon>/<int:stars>/', view_complex,
name='view-complex')
]
@pytest.mark.urls('tests.test_templatetags')
def test_view_simple(rf):
r = rf.get('/view-simple')
assert is_active(r, 'view-simple') == 'active'
@pytest.mark.urls('tests.test_templatetags')
def test_is_active_inactivated(rf):
r = rf.get('/view-simple')
assert is_active(r, 'view-complex') == ''
@pytest.mark.urls('tests.test_templatetags')
def test_is_active_with_kwargs(rf):
r = rf.get('/view-complex/1/2/3/')
assert is_active(r, 'view-complex', sun=1, moon=2, stars=3) == 'active'
@pytest.mark.urls('tests.test_templatetags')
def test_is_active_quietly_raises_error(rf):
"""Valid, but will raise warning"""
r = rf.get('view-complex/1/2/3/')
assert is_active(r, 'view-complex/', sun=1, moon=2, stars=3) == ''
| 25.355556 | 75 | 0.689746 |
4a23e3bf1040864b135a45d0df95875a4a30b2bb | 987 | py | Python | app/search.py | andela-kerinoso/data_structures_algo | ff9fb75f87e5d86050f4f530f72d7bf68253f3c7 | [
"MIT"
] | 1 | 2019-08-14T19:49:36.000Z | 2019-08-14T19:49:36.000Z | app/search.py | andela-kerinoso/data_structures_algo | ff9fb75f87e5d86050f4f530f72d7bf68253f3c7 | [
"MIT"
] | null | null | null | app/search.py | andela-kerinoso/data_structures_algo | ff9fb75f87e5d86050f4f530f72d7bf68253f3c7 | [
"MIT"
] | 2 | 2017-03-23T00:14:38.000Z | 2017-05-19T09:38:29.000Z | def unsorted_sequential_search(haystack, needle):
endpoint = len(haystack)
counter = 0
found = False
while counter < endpoint and not found:
if haystack[counter] == needle:
found = True
else:
counter += 1
return found
def sorted_sequential_search(haystack, needle):
endpoint = len(haystack)
counter = 0
found = stop = False
while counter < endpoint and not (found or stop):
if haystack[counter] == needle:
found = True
elif needle < haystack[counter]:
stop = True
else:
counter += 1
return found
def binary_search(haystack, needle):
if len(haystack) == 0:
return False
midpoint = len(haystack) // 2
if haystack[midpoint] == needle:
return True
if needle > haystack[midpoint]:
return binary_search(haystack[midpoint + 1:], needle)
else:
return binary_search(haystack[:midpoint], needle)
| 23.5 | 61 | 0.600811 |
4a23e49193e984f094b0522bb0024ed1e53bc0d9 | 16,621 | py | Python | kubernetes/client/models/v1_stateful_set_spec.py | sthagen/kubernetes-client-python | 3a183048d7d568ba5ea418bcfb8f61713908d3ea | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_stateful_set_spec.py | sthagen/kubernetes-client-python | 3a183048d7d568ba5ea418bcfb8f61713908d3ea | [
"Apache-2.0"
] | 3 | 2021-11-30T03:11:13.000Z | 2022-02-09T03:39:41.000Z | kubernetes/client/models/v1_stateful_set_spec.py | sthagen/kubernetes-client-python | 3a183048d7d568ba5ea418bcfb8f61713908d3ea | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.24
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1StatefulSetSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'min_ready_seconds': 'int',
'persistent_volume_claim_retention_policy': 'V1StatefulSetPersistentVolumeClaimRetentionPolicy',
'pod_management_policy': 'str',
'replicas': 'int',
'revision_history_limit': 'int',
'selector': 'V1LabelSelector',
'service_name': 'str',
'template': 'V1PodTemplateSpec',
'update_strategy': 'V1StatefulSetUpdateStrategy',
'volume_claim_templates': 'list[V1PersistentVolumeClaim]'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'persistent_volume_claim_retention_policy': 'persistentVolumeClaimRetentionPolicy',
'pod_management_policy': 'podManagementPolicy',
'replicas': 'replicas',
'revision_history_limit': 'revisionHistoryLimit',
'selector': 'selector',
'service_name': 'serviceName',
'template': 'template',
'update_strategy': 'updateStrategy',
'volume_claim_templates': 'volumeClaimTemplates'
}
def __init__(self, min_ready_seconds=None, persistent_volume_claim_retention_policy=None, pod_management_policy=None, replicas=None, revision_history_limit=None, selector=None, service_name=None, template=None, update_strategy=None, volume_claim_templates=None, local_vars_configuration=None): # noqa: E501
"""V1StatefulSetSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._min_ready_seconds = None
self._persistent_volume_claim_retention_policy = None
self._pod_management_policy = None
self._replicas = None
self._revision_history_limit = None
self._selector = None
self._service_name = None
self._template = None
self._update_strategy = None
self._volume_claim_templates = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if persistent_volume_claim_retention_policy is not None:
self.persistent_volume_claim_retention_policy = persistent_volume_claim_retention_policy
if pod_management_policy is not None:
self.pod_management_policy = pod_management_policy
if replicas is not None:
self.replicas = replicas
if revision_history_limit is not None:
self.revision_history_limit = revision_history_limit
self.selector = selector
self.service_name = service_name
self.template = template
if update_strategy is not None:
self.update_strategy = update_strategy
if volume_claim_templates is not None:
self.volume_claim_templates = volume_claim_templates
@property
def min_ready_seconds(self):
"""Gets the min_ready_seconds of this V1StatefulSetSpec. # noqa: E501
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. # noqa: E501
:return: The min_ready_seconds of this V1StatefulSetSpec. # noqa: E501
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""Sets the min_ready_seconds of this V1StatefulSetSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. # noqa: E501
:param min_ready_seconds: The min_ready_seconds of this V1StatefulSetSpec. # noqa: E501
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def persistent_volume_claim_retention_policy(self):
"""Gets the persistent_volume_claim_retention_policy of this V1StatefulSetSpec. # noqa: E501
:return: The persistent_volume_claim_retention_policy of this V1StatefulSetSpec. # noqa: E501
:rtype: V1StatefulSetPersistentVolumeClaimRetentionPolicy
"""
return self._persistent_volume_claim_retention_policy
@persistent_volume_claim_retention_policy.setter
def persistent_volume_claim_retention_policy(self, persistent_volume_claim_retention_policy):
"""Sets the persistent_volume_claim_retention_policy of this V1StatefulSetSpec.
:param persistent_volume_claim_retention_policy: The persistent_volume_claim_retention_policy of this V1StatefulSetSpec. # noqa: E501
:type: V1StatefulSetPersistentVolumeClaimRetentionPolicy
"""
self._persistent_volume_claim_retention_policy = persistent_volume_claim_retention_policy
@property
def pod_management_policy(self):
"""Gets the pod_management_policy of this V1StatefulSetSpec. # noqa: E501
podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once. # noqa: E501
:return: The pod_management_policy of this V1StatefulSetSpec. # noqa: E501
:rtype: str
"""
return self._pod_management_policy
@pod_management_policy.setter
def pod_management_policy(self, pod_management_policy):
"""Sets the pod_management_policy of this V1StatefulSetSpec.
podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once. # noqa: E501
:param pod_management_policy: The pod_management_policy of this V1StatefulSetSpec. # noqa: E501
:type: str
"""
self._pod_management_policy = pod_management_policy
@property
def replicas(self):
"""Gets the replicas of this V1StatefulSetSpec. # noqa: E501
replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1. # noqa: E501
:return: The replicas of this V1StatefulSetSpec. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1StatefulSetSpec.
replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1. # noqa: E501
:param replicas: The replicas of this V1StatefulSetSpec. # noqa: E501
:type: int
"""
self._replicas = replicas
@property
def revision_history_limit(self):
"""Gets the revision_history_limit of this V1StatefulSetSpec. # noqa: E501
revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10. # noqa: E501
:return: The revision_history_limit of this V1StatefulSetSpec. # noqa: E501
:rtype: int
"""
return self._revision_history_limit
@revision_history_limit.setter
def revision_history_limit(self, revision_history_limit):
"""Sets the revision_history_limit of this V1StatefulSetSpec.
revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10. # noqa: E501
:param revision_history_limit: The revision_history_limit of this V1StatefulSetSpec. # noqa: E501
:type: int
"""
self._revision_history_limit = revision_history_limit
@property
def selector(self):
"""Gets the selector of this V1StatefulSetSpec. # noqa: E501
:return: The selector of this V1StatefulSetSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1StatefulSetSpec.
:param selector: The selector of this V1StatefulSetSpec. # noqa: E501
:type: V1LabelSelector
"""
if self.local_vars_configuration.client_side_validation and selector is None: # noqa: E501
raise ValueError("Invalid value for `selector`, must not be `None`") # noqa: E501
self._selector = selector
@property
def service_name(self):
"""Gets the service_name of this V1StatefulSetSpec. # noqa: E501
serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller. # noqa: E501
:return: The service_name of this V1StatefulSetSpec. # noqa: E501
:rtype: str
"""
return self._service_name
@service_name.setter
def service_name(self, service_name):
"""Sets the service_name of this V1StatefulSetSpec.
serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller. # noqa: E501
:param service_name: The service_name of this V1StatefulSetSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and service_name is None: # noqa: E501
raise ValueError("Invalid value for `service_name`, must not be `None`") # noqa: E501
self._service_name = service_name
@property
def template(self):
"""Gets the template of this V1StatefulSetSpec. # noqa: E501
:return: The template of this V1StatefulSetSpec. # noqa: E501
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""Sets the template of this V1StatefulSetSpec.
:param template: The template of this V1StatefulSetSpec. # noqa: E501
:type: V1PodTemplateSpec
"""
if self.local_vars_configuration.client_side_validation and template is None: # noqa: E501
raise ValueError("Invalid value for `template`, must not be `None`") # noqa: E501
self._template = template
@property
def update_strategy(self):
"""Gets the update_strategy of this V1StatefulSetSpec. # noqa: E501
:return: The update_strategy of this V1StatefulSetSpec. # noqa: E501
:rtype: V1StatefulSetUpdateStrategy
"""
return self._update_strategy
@update_strategy.setter
def update_strategy(self, update_strategy):
"""Sets the update_strategy of this V1StatefulSetSpec.
:param update_strategy: The update_strategy of this V1StatefulSetSpec. # noqa: E501
:type: V1StatefulSetUpdateStrategy
"""
self._update_strategy = update_strategy
@property
def volume_claim_templates(self):
"""Gets the volume_claim_templates of this V1StatefulSetSpec. # noqa: E501
volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name. # noqa: E501
:return: The volume_claim_templates of this V1StatefulSetSpec. # noqa: E501
:rtype: list[V1PersistentVolumeClaim]
"""
return self._volume_claim_templates
@volume_claim_templates.setter
def volume_claim_templates(self, volume_claim_templates):
"""Sets the volume_claim_templates of this V1StatefulSetSpec.
volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name. # noqa: E501
:param volume_claim_templates: The volume_claim_templates of this V1StatefulSetSpec. # noqa: E501
:type: list[V1PersistentVolumeClaim]
"""
self._volume_claim_templates = volume_claim_templates
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1StatefulSetSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1StatefulSetSpec):
return True
return self.to_dict() != other.to_dict()
| 44.921622 | 555 | 0.699416 |
4a23e4a8cb38d07fb13ed7abcd41b4bff00cf532 | 575 | py | Python | Prototypes/Zeta Function/zeta_global.py | jackm245/Visualising-and-Investigating-the-Riemann-Hypothesis | 6eff14b6503cb2faf3bd8b0785239b690bce368a | [
"MIT"
] | 2 | 2021-05-01T19:07:28.000Z | 2021-05-18T07:34:42.000Z | Prototypes/Zeta Function/zeta_global.py | jackm245/Visualising-and-Investigating-the-Riemann-Hypothesis | 6eff14b6503cb2faf3bd8b0785239b690bce368a | [
"MIT"
] | null | null | null | Prototypes/Zeta Function/zeta_global.py | jackm245/Visualising-and-Investigating-the-Riemann-Hypothesis | 6eff14b6503cb2faf3bd8b0785239b690bce368a | [
"MIT"
] | null | null | null | from itertools import count, islice
# binomial coefficient
def binom(n, k):
v = 1
for i in range(k):
v *= (n - i) / (i + 1)
return v
# Global zeta function
def zeta(s, t=100):
if s == 1: return float('inf')
sum1 = 0
for n in range(t):
sum2 = 0
for k in range(n+1):
term1 = (-1)**k * binom(n, k) * (k+1)** (-s)
sum2 += term1
term2 = (1/(2**(n+1)))
sum1 += sum2 * term2
term1 = (1/(1-2**(1-s)))
return sum1 * term1
c_num = 100+100j
print(test_zeta(c_num))
print(zeta(c_num))
| 19.166667 | 56 | 0.497391 |
4a23e8d9179cef98457441580d4aca898be09c7b | 2,358 | py | Python | samples/cli/accelbyte_py_sdk_cli/lobby/_public_get_party_data_v1.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | samples/cli/accelbyte_py_sdk_cli/lobby/_public_get_party_data_v1.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | samples/cli/accelbyte_py_sdk_cli/lobby/_public_get_party_data_v1.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-lobby-server (staging)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.lobby import public_get_party_data_v1 as public_get_party_data_v1_internal
from accelbyte_py_sdk.api.lobby.models import ModelsPartyData
from accelbyte_py_sdk.api.lobby.models import RestapiErrorResponseBody
@click.command()
@click.argument("party_id", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def public_get_party_data_v1(
party_id: str,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(public_get_party_data_v1_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
result, error = public_get_party_data_v1_internal(
party_id=party_id,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"publicGetPartyDataV1 failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
public_get_party_data_v1.operation_id = "publicGetPartyDataV1"
public_get_party_data_v1.is_deprecated = False
| 32.30137 | 100 | 0.748092 |
4a23e94d1a33f33cdb704bdbbb6731f5f4ae9d70 | 3,137 | py | Python | pylua/test/test_timer_lua.py | malirod/pylua | e6a1718428ab77f87c58890436a89bdd408ff95b | [
"MIT"
] | 1 | 2021-12-31T08:58:14.000Z | 2021-12-31T08:58:14.000Z | pylua/test/test_timer_lua.py | malirod/pylua | e6a1718428ab77f87c58890436a89bdd408ff95b | [
"MIT"
] | 12 | 2016-09-08T06:24:05.000Z | 2016-11-30T06:49:09.000Z | pylua/test/test_timer_lua.py | malirod/pylua | e6a1718428ab77f87c58890436a89bdd408ff95b | [
"MIT"
] | 7 | 2016-09-08T17:27:43.000Z | 2018-12-27T17:32:09.000Z | # -*- coding: utf-8 -*-
import timeit
from pylua.timer import Timer, msec_to_sec
from pylua.test.mixin import EventLoopMixin, LuaRuntimeMixin
class TestTimer(EventLoopMixin, LuaRuntimeMixin):
def setup(self):
EventLoopMixin.setup(self)
LuaRuntimeMixin.setup(self)
self.lua_runtime.globals()['Timer'] = Timer
self.lua_runtime.globals()['event_loop'] = self.loop
self.lua_runtime.globals()['now'] = timeit.default_timer
self.lua_runtime.globals()['msec_to_sec'] = msec_to_sec
def teardown(self):
LuaRuntimeMixin.setup(self)
EventLoopMixin.setup(self)
def test_start_timer(self):
lua_code = '''\
local shot_counter = 0
local interval_ms = 30
local started = now()
local function timer_task()
delay = now() - started
assert(delay >=
msec_to_sec(interval_ms), "Timer doesn't work correctly")
shot_counter = shot_counter + 1
event_loop.stop()
end
local timer = Timer(timer_task, interval_ms)
timer.start()
event_loop.run_forever()
assert(shot_counter == 1, "Wrong number of tasks fired")
'''
self.lua_runtime.execute(lua_code)
def test_start_multiple_timers(self):
lua_code = '''\
local timers_count = 5
local shot_counter = 0
local interval_ms = 30
local started = now()
function timer_task()
delay = now() - started
assert(delay >=
msec_to_sec(interval_ms), "Timer doesn't work correctly")
shot_counter = shot_counter + 1
if shot_counter == timers_count then
event_loop.stop()
end
end
local timers = {}
for i = 1, timers_count do
local timer = Timer(timer_task, interval_ms)
table.insert(timers, timer)
timer.start()
end
event_loop.run_forever()
assert(shot_counter == timers_count, "Wrong number of tasks fired")
'''
self.lua_runtime.execute(lua_code)
def test_multishot_timer(self):
lua_code = '''\
local shots_count = 3
local shot_counter = 0
local interval_ms = 30
local started = now()
function timer_task()
delay = now() - started
assert(delay >=
msec_to_sec(interval_ms), "Timer doesn't work correctly")
shot_counter = shot_counter + 1
started = now()
if shot_counter >= shots_count then
event_loop.stop()
end
end
local timer = Timer(timer_task, interval_ms, true)
timer.start()
event_loop.run_forever()
assert(shot_counter == shots_count, "Wrong number of tasks fired")
'''
self.lua_runtime.execute(lua_code)
| 35.247191 | 79 | 0.542875 |
4a23e9d4a5c11381c684797be8e15e8a7a5aefc9 | 3,197 | py | Python | tests/test_controller.py | raboley/categorize-images | bd6fd3ab11efa5b1a9957f86113f9601e4f8f8a0 | [
"MIT"
] | null | null | null | tests/test_controller.py | raboley/categorize-images | bd6fd3ab11efa5b1a9957f86113f9601e4f8f8a0 | [
"MIT"
] | null | null | null | tests/test_controller.py | raboley/categorize-images | bd6fd3ab11efa5b1a9957f86113f9601e4f8f8a0 | [
"MIT"
] | null | null | null | # from .context import categorize
# import os
# import unittest
# class controller(unittest.TestCase):
# """
# ensure that this will recognize images
# Post the json in a folder
# determine the image type (stat, main, side 1 and 2)
# extract the weapon name from the stat page
# post the weapon name and image group folder date stamp to an archive file
# move and rename the image to staging location for image cropping
# """
# def setUp(self):
# self.source_json_path = './tests/_testArtifacts/test_controller/Ruby_Crystal_Ring_Stats.json'
# self.json = categorize.find_in_json.get_json(self.source_json_path)
# self.weapon_name = categorize.get_weapon_name(self.json)
# self.character_map = './tests/_testArtifacts/test_controller/weapon_character_pairs.json'
# self.weapon_map = './tests/_testArtifacts/test_controller/__temp/folder_weapon_pairs.json'
# self.pair = categorize.ArchivePair(self.weapon_map)
# self.image_path = './tests/_testArtifacts/test_controller/abcdefg_ruby_crystalring_bdc.jpg'
# self.final_path_folder = './tests/_testArtifacts/test_controller/__temp/'
# self.base_path = './tests/_testArtifacts/test_controller/'
# # Json can be gotten
# def test_can_get_json(self):
# self.json = categorize.find_in_json.get_json(self.source_json_path)
# self.assertIsNotNone(self.json)
# # Determine image type based on json
# def test_can_tell_it_is_stats_image(self):
# picture_type = categorize.determine_picture_type(self.json)
# self.assertEqual(picture_type, 'Stat')
# # If it is a stat screen
# # get the weapon name
# def test_can_get_weapon_name(self):
# weapon_name = categorize.get_weapon_name(self.json)
# self.assertEqual(weapon_name, 'Crystaring')
# # figure out the character name based on the weapon name
# def test_get_character_name_from_weapon_name(self):
# character_name = categorize.determine_character(self.weapon_name, self.character_map)
# self.assertEqual(character_name, 'Ruby')
# # write the weapon name to the pair file
# def test_can_write_image_weapon_pair_to_file(self):
# categorize.write_weapon_map_to_file('test_controller', self.weapon_name, self.pair)
# data = [{"key":'test_controller',"value":self.weapon_name}]
# self.assertEqual(data, self.pair.read_pair_file())
# # copy the file to the staging folder with the correct name
# def test_reads_weapon_name_from_file(self):
# couple = self.pair.get_pair('test_controller')
# self.assertEqual(couple['value'], 'Crystaring')
# def test_gets_final_file_name_from_files(self):
# file_object = categorize.FileOs(base_path=self.base_path)
# file_name = categorize.get_image_file_name(file_object=file_object)
# self.assertEquals(file_name, 'Ruby_Crystaring_Stats.jpg')
# def test_moves_the_file_to_staging_with_correct_name(self):
# pass
# def test_z_final_cleanup(self):
# os.remove(self.weapon_map)
# if __name__ == '__main__':
# unittest.main() | 45.671429 | 103 | 0.70441 |
4a23ea0f10d52c2e72e43fd5bb80448fb39a03d1 | 12,341 | py | Python | ml-agents/mlagents/trainers/torch/utils.py | Phong13/ml-agents | 393808b50581e66085578b01d9d907b65a9240f0 | [
"Apache-2.0"
] | null | null | null | ml-agents/mlagents/trainers/torch/utils.py | Phong13/ml-agents | 393808b50581e66085578b01d9d907b65a9240f0 | [
"Apache-2.0"
] | null | null | null | ml-agents/mlagents/trainers/torch/utils.py | Phong13/ml-agents | 393808b50581e66085578b01d9d907b65a9240f0 | [
"Apache-2.0"
] | null | null | null | from typing import List, Optional, Tuple
from mlagents.torch_utils import torch, nn
import numpy as np
from mlagents.trainers.torch.encoders import (
SimpleVisualEncoder,
ResNetVisualEncoder,
NatureVisualEncoder,
SmallVisualEncoder,
VectorInput,
)
from mlagents.trainers.settings import EncoderType, ScheduleType
from mlagents.trainers.exception import UnityTrainerException
from mlagents_envs.base_env import BehaviorSpec
from mlagents.trainers.torch.distributions import DistInstance, DiscreteDistInstance
class ModelUtils:
# Minimum supported side for each encoder type. If refactoring an encoder, please
# adjust these also.
MIN_RESOLUTION_FOR_ENCODER = {
EncoderType.MATCH3: 5,
EncoderType.SIMPLE: 20,
EncoderType.NATURE_CNN: 36,
EncoderType.RESNET: 15,
}
class ActionFlattener:
def __init__(self, behavior_spec: BehaviorSpec):
self._specs = behavior_spec
@property
def flattened_size(self) -> int:
if self._specs.is_action_continuous():
return self._specs.action_size
else:
return sum(self._specs.discrete_action_branches)
def forward(self, action: torch.Tensor) -> torch.Tensor:
if self._specs.is_action_continuous():
return action
else:
return torch.cat(
ModelUtils.actions_to_onehot(
torch.as_tensor(action, dtype=torch.long),
self._specs.discrete_action_branches,
),
dim=1,
)
@staticmethod
def update_learning_rate(optim: torch.optim.Optimizer, lr: float) -> None:
"""
Apply a learning rate to a torch optimizer.
:param optim: Optimizer
:param lr: Learning rate
"""
for param_group in optim.param_groups:
param_group["lr"] = lr
class DecayedValue:
def __init__(
self,
schedule: ScheduleType,
initial_value: float,
min_value: float,
max_step: int,
):
"""
Object that represnets value of a parameter that should be decayed, assuming it is a function of
global_step.
:param schedule: Type of learning rate schedule.
:param initial_value: Initial value before decay.
:param min_value: Decay value to this value by max_step.
:param max_step: The final step count where the return value should equal min_value.
:param global_step: The current step count.
:return: The value.
"""
self.schedule = schedule
self.initial_value = initial_value
self.min_value = min_value
self.max_step = max_step
def get_value(self, global_step: int) -> float:
"""
Get the value at a given global step.
:param global_step: Step count.
:returns: Decayed value at this global step.
"""
if self.schedule == ScheduleType.CONSTANT:
return self.initial_value
elif self.schedule == ScheduleType.LINEAR:
return ModelUtils.polynomial_decay(
self.initial_value, self.min_value, self.max_step, global_step
)
else:
raise UnityTrainerException(f"The schedule {self.schedule} is invalid.")
@staticmethod
def polynomial_decay(
initial_value: float,
min_value: float,
max_step: int,
global_step: int,
power: float = 1.0,
) -> float:
"""
Get a decayed value based on a polynomial schedule, with respect to the current global step.
:param initial_value: Initial value before decay.
:param min_value: Decay value to this value by max_step.
:param max_step: The final step count where the return value should equal min_value.
:param global_step: The current step count.
:param power: Power of polynomial decay. 1.0 (default) is a linear decay.
:return: The current decayed value.
"""
global_step = min(global_step, max_step)
decayed_value = (initial_value - min_value) * (
1 - float(global_step) / max_step
) ** (power) + min_value
return decayed_value
@staticmethod
def get_encoder_for_type(encoder_type: EncoderType) -> nn.Module:
ENCODER_FUNCTION_BY_TYPE = {
EncoderType.SIMPLE: SimpleVisualEncoder,
EncoderType.NATURE_CNN: NatureVisualEncoder,
EncoderType.RESNET: ResNetVisualEncoder,
EncoderType.MATCH3: SmallVisualEncoder,
}
return ENCODER_FUNCTION_BY_TYPE.get(encoder_type)
@staticmethod
def _check_resolution_for_encoder(
height: int, width: int, vis_encoder_type: EncoderType
) -> None:
min_res = ModelUtils.MIN_RESOLUTION_FOR_ENCODER[vis_encoder_type]
if height < min_res or width < min_res:
raise UnityTrainerException(
f"Visual observation resolution ({width}x{height}) is too small for"
f"the provided EncoderType ({vis_encoder_type.value}). The min dimension is {min_res}"
)
@staticmethod
def create_input_processors(
observation_shapes: List[Tuple[int, ...]],
h_size: int,
vis_encode_type: EncoderType,
normalize: bool = False,
) -> Tuple[nn.ModuleList, nn.ModuleList, int]:
"""
Creates visual and vector encoders, along with their normalizers.
:param observation_shapes: List of Tuples that represent the action dimensions.
:param action_size: Number of additional un-normalized inputs to each vector encoder. Used for
conditioining network on other values (e.g. actions for a Q function)
:param h_size: Number of hidden units per layer.
:param vis_encode_type: Type of visual encoder to use.
:param unnormalized_inputs: Vector inputs that should not be normalized, and added to the vector
obs.
:param normalize: Normalize all vector inputs.
:return: Tuple of visual encoders and vector encoders each as a list.
"""
visual_encoders: List[nn.Module] = []
vector_encoders: List[nn.Module] = []
visual_encoder_class = ModelUtils.get_encoder_for_type(vis_encode_type)
vector_size = 0
visual_output_size = 0
for i, dimension in enumerate(observation_shapes):
if len(dimension) == 3:
ModelUtils._check_resolution_for_encoder(
dimension[0], dimension[1], vis_encode_type
)
visual_encoders.append(
visual_encoder_class(
dimension[0], dimension[1], dimension[2], h_size
)
)
visual_output_size += h_size
elif len(dimension) == 1:
vector_size += dimension[0]
else:
raise UnityTrainerException(
f"Unsupported shape of {dimension} for observation {i}"
)
if vector_size > 0:
vector_encoders.append(VectorInput(vector_size, normalize))
# Total output size for all inputs + CNNs
total_processed_size = vector_size + visual_output_size
return (
nn.ModuleList(visual_encoders),
nn.ModuleList(vector_encoders),
total_processed_size,
)
@staticmethod
def list_to_tensor(
ndarray_list: List[np.ndarray], dtype: Optional[torch.dtype] = None
) -> torch.Tensor:
"""
Converts a list of numpy arrays into a tensor. MUCH faster than
calling as_tensor on the list directly.
"""
return torch.as_tensor(np.asanyarray(ndarray_list), dtype=dtype)
@staticmethod
def to_numpy(tensor: torch.Tensor) -> np.ndarray:
"""
Converts a Torch Tensor to a numpy array. If the Tensor is on the GPU, it will
be brought to the CPU.
"""
return tensor.detach().cpu().numpy()
@staticmethod
def break_into_branches(
concatenated_logits: torch.Tensor, action_size: List[int]
) -> List[torch.Tensor]:
"""
Takes a concatenated set of logits that represent multiple discrete action branches
and breaks it up into one Tensor per branch.
:param concatenated_logits: Tensor that represents the concatenated action branches
:param action_size: List of ints containing the number of possible actions for each branch.
:return: A List of Tensors containing one tensor per branch.
"""
action_idx = [0] + list(np.cumsum(action_size))
branched_logits = [
concatenated_logits[:, action_idx[i] : action_idx[i + 1]]
for i in range(len(action_size))
]
return branched_logits
@staticmethod
def actions_to_onehot(
discrete_actions: torch.Tensor, action_size: List[int]
) -> List[torch.Tensor]:
"""
Takes a tensor of discrete actions and turns it into a List of onehot encoding for each
action.
:param discrete_actions: Actions in integer form.
:param action_size: List of branch sizes. Should be of same size as discrete_actions'
last dimension.
:return: List of one-hot tensors, one representing each branch.
"""
onehot_branches = [
torch.nn.functional.one_hot(_act.T, action_size[i]).float()
for i, _act in enumerate(discrete_actions.long().T)
]
return onehot_branches
@staticmethod
def dynamic_partition(
data: torch.Tensor, partitions: torch.Tensor, num_partitions: int
) -> List[torch.Tensor]:
"""
Torch implementation of dynamic_partition :
https://www.tensorflow.org/api_docs/python/tf/dynamic_partition
Splits the data Tensor input into num_partitions Tensors according to the indices in
partitions.
:param data: The Tensor data that will be split into partitions.
:param partitions: An indices tensor that determines in which partition each element
of data will be in.
:param num_partitions: The number of partitions to output. Corresponds to the
maximum possible index in the partitions argument.
:return: A list of Tensor partitions (Their indices correspond to their partition index).
"""
res: List[torch.Tensor] = []
for i in range(num_partitions):
res += [data[(partitions == i).nonzero().squeeze(1)]]
return res
@staticmethod
def get_probs_and_entropy(
action_list: List[torch.Tensor], dists: List[DistInstance]
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
log_probs_list = []
all_probs_list = []
entropies_list = []
for action, action_dist in zip(action_list, dists):
log_prob = action_dist.log_prob(action)
log_probs_list.append(log_prob)
entropies_list.append(action_dist.entropy())
if isinstance(action_dist, DiscreteDistInstance):
all_probs_list.append(action_dist.all_log_prob())
log_probs = torch.stack(log_probs_list, dim=-1)
entropies = torch.stack(entropies_list, dim=-1)
if not all_probs_list:
log_probs = log_probs.squeeze(-1)
entropies = entropies.squeeze(-1)
all_probs = None
else:
all_probs = torch.cat(all_probs_list, dim=-1)
return log_probs, entropies, all_probs
@staticmethod
def masked_mean(tensor: torch.Tensor, masks: torch.Tensor) -> torch.Tensor:
"""
Returns the mean of the tensor but ignoring the values specified by masks.
Used for masking out loss functions.
:param tensor: Tensor which needs mean computation.
:param masks: Boolean tensor of masks with same dimension as tensor.
"""
return (tensor.T * masks).sum() / torch.clamp(
(torch.ones_like(tensor.T) * masks).float().sum(), min=1.0
)
| 40.595395 | 108 | 0.625962 |
4a23eb2928a35900a10bc0c57fa592f5f5c3897e | 6,642 | py | Python | agentQ/ga.py | johbjo09/nn-agentq | fc1524224804146f8611c72e396daf85d96d5732 | [
"MIT"
] | null | null | null | agentQ/ga.py | johbjo09/nn-agentq | fc1524224804146f8611c72e396daf85d96d5732 | [
"MIT"
] | null | null | null | agentQ/ga.py | johbjo09/nn-agentq | fc1524224804146f8611c72e396daf85d96d5732 | [
"MIT"
] | null | null | null | from time import time
import numpy as np
import math
import random
from abc import ABCMeta, abstractmethod, abstractproperty
from copy import deepcopy
class GeneticThing():
__metaclass__ = ABCMeta
@abstractproperty
def fitness(self):
pass
@abstractmethod
def mutate(self, r_mutate):
pass
@abstractmethod
def crosswith(self, that_thing):
pass
@abstractmethod
def distanceto(self, that_thing):
pass
# These are for computing the mean individual
@abstractmethod
def add(self, that_thing):
pass
@abstractmethod
def subtract(self, that_thing):
pass
@abstractmethod
def divide_by(self, divisor):
pass
class GeneticAlgorithm():
def __init__(self, population_size,
r_mutation=0.04,
apex_stddev = 1):
self.generation = 0
# Steady state population size
self._population_size = population_size
self._r_mutation = r_mutation
self.apex_stddev = apex_stddev
self.population = []
self.apexes = []
@property
def population_size(self):
population_size = 10 * self._population_size * np.exp(-0.5 * self.generation) + self._population_size
return int(population_size)
def _selection_base(self, population_size):
selection_base = self._population_size - len(self.apexes)
if selection_base < 1:
selection_base = 1
return selection_base
p_selection = 0.8 - 0.5 * np.exp(-self.generation / 5.0)
selection_base = int(self.population_size * p_selection)
print("selection_base: " + str(selection_base))
return selection_base
def _mutation_rate(self):
r_mutation = (1.0 - self._r_mutation) * np.exp(-0.005 * self.generation) + self._r_mutation
return r_mutation
def append(self, thing):
self.population.append(thing)
def __iter__(self):
return iter(self.population)
def evolve(self):
population_size = self.population_size
selection_base = self._selection_base(population_size)
r_mutation = self._mutation_rate()
selection_size = int(population_size / 2.0)
apex_maxsize = int(0.2 * selection_base)
if selection_size < 1:
selection_size = 1
if apex_maxsize < 1:
apex_maxsize = 1
self.population.sort(key=lambda s: -s.fitness)
self.population = self.population[0:selection_base]
self.population.extend(self.apexes)
population_mean = deepcopy(self.population[0])
for thing in self.population[1:]:
population_mean.add(thing)
population_mean.divide_by(len(self.population))
fitness = [ thing.fitness for thing in self.population ]
sum_fitness = sum(fitness)
max_fitness = max(fitness)
mean_fitness = np.mean(fitness)
stddev_fitness = np.sqrt(np.var(fitness))
apex_cutoff = mean_fitness + self.apex_stddev * stddev_fitness
p_fitness = lambda i: fitness[i]/max_fitness
# Distance to mean individual is measure of "distance"
population_mean = deepcopy(self.population[0])
for thing in self.population[1:]:
population_mean.add(thing)
population_mean.divide_by(len(self.population))
distances = [ thing.distanceto(population_mean) for thing in self.population ]
max_distance = max(distances)
p_distance = lambda i: distances[i]/max_distance
# Rank function
f_rank = lambda i: p_fitness(i)* 0.7 + 0.3 * p_distance(i)
if max_distance == 0:
f_rank = lambda i: p_fitness(i)
rankings = [ f_rank(i) for i in range(len(self.population)) ]
i_apex = list(filter(lambda i: fitness[i] > apex_cutoff, range(len(self.population))))
if len(i_apex) > apex_maxsize:
i_apex = range(apex_maxsize)
self.apexes = [ deepcopy(self.population[i]) for i in i_apex ]
print("Generation: {}, mean(fitness): {:.2f}, stddev(fitness): {:.2f}, r_mutation: {:.2f}".format(self.generation,
mean_fitness,
stddev_fitness,
r_mutation))
for i in i_apex:
print(" apex - fitness: {:.2f}, distance: {:.2f}, rank: {:.2f}".format(fitness[i], distances[i], rankings[i]))
next_generation = []
trials = 0
if self.generation < 3:
i_selections = []
i_selections += i_apex
while len(i_selections) < selection_size and (trials < (100 * population_size)):
trials += 1
i = random.randint(0, len(self.population)-1)
if i in i_selections:
continue
p_selection = rankings[i]
if random.random() < p_selection:
i_selections.append(i)
for i1 in i_selections:
ancestor1 = self.population[i1]
fitness1 = p_fitness(i1)
mutant1 = deepcopy(ancestor1)
mutant1.mutate(r_mutation * (1 - 0.5*fitness1))
next_generation.append(ancestor1)
next_generation.append(mutant1)
else:
while len(next_generation) < population_size:
i1 = random.randint(0, len(self.population)-1)
i2 = random.randint(0, len(self.population)-1)
p_selection1 = rankings[i1]
p_selection2 = rankings[i2]
if random.random() < p_selection1 and random.random() < p_selection2:
ancestor1 = self.population[i1]
ancestor2 = self.population[i2]
fitness1 = p_fitness(i1)
fitness2 = p_fitness(i2)
offspring1 = ancestor1.crosswith(ancestor2, fitness2/(fitness1+fitness2))
# offspring2 = ancestor2.crosswith(ancestor1, fitness1/(fitness1+fitness2))
# offspring2.mutate(1 - np.sqrt(fitness1 * fitness2))
next_generation.append(offspring1)
# next_generation.append(offspring2)
self.population = next_generation
self.generation += 1
return sum_fitness
| 31.932692 | 122 | 0.572719 |
4a23ebd2940dd6045ef0018c7ee6e296745ba8ad | 51,412 | py | Python | google/cloud/compute_v1/services/target_ssl_proxies/client.py | auphofBSF/python-compute | c81bfa752c9db93edd0cd56fec3a79599704d792 | [
"Apache-2.0"
] | null | null | null | google/cloud/compute_v1/services/target_ssl_proxies/client.py | auphofBSF/python-compute | c81bfa752c9db93edd0cd56fec3a79599704d792 | [
"Apache-2.0"
] | null | null | null | google/cloud/compute_v1/services/target_ssl_proxies/client.py | auphofBSF/python-compute | c81bfa752c9db93edd0cd56fec3a79599704d792 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.compute_v1.services.target_ssl_proxies import pagers
from google.cloud.compute_v1.types import compute
from .transports.base import TargetSslProxiesTransport, DEFAULT_CLIENT_INFO
from .transports.rest import TargetSslProxiesRestTransport
class TargetSslProxiesClientMeta(type):
"""Metaclass for the TargetSslProxies client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[TargetSslProxiesTransport]]
_transport_registry["rest"] = TargetSslProxiesRestTransport
def get_transport_class(cls, label: str = None,) -> Type[TargetSslProxiesTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class TargetSslProxiesClient(metaclass=TargetSslProxiesClientMeta):
"""The TargetSslProxies API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "compute.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TargetSslProxiesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TargetSslProxiesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> TargetSslProxiesTransport:
"""Return the transport used by the client instance.
Returns:
TargetSslProxiesTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, TargetSslProxiesTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the target ssl proxies client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, TargetSslProxiesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
client_cert_source_func = (
mtls.default_client_cert_source() if is_mtls else None
)
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, TargetSslProxiesTransport):
# transport is a TargetSslProxiesTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, "
"provide its scopes directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def delete(
self,
request: compute.DeleteTargetSslProxyRequest = None,
*,
project: str = None,
target_ssl_proxy: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Deletes the specified TargetSslProxy resource.
Args:
request (google.cloud.compute_v1.types.DeleteTargetSslProxyRequest):
The request object. A request message for
TargetSslProxies.Delete. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_ssl_proxy (str):
Name of the TargetSslProxy resource
to delete.
This corresponds to the ``target_ssl_proxy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- [Global](/compute/docs/reference/rest/{$api_version}/globalOperations)
\*
[Regional](/compute/docs/reference/rest/{$api_version}/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations)
You can use an operation resource to manage
asynchronous API requests. For more information, read
Handling API responses.
Operations can be global, regional or zonal. - For
global operations, use the globalOperations resource.
- For regional operations, use the regionOperations
resource. - For zonal operations, use the
zonalOperations resource.
For more information, read Global, Regional, and
Zonal Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, target_ssl_proxy])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.DeleteTargetSslProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.DeleteTargetSslProxyRequest):
request = compute.DeleteTargetSslProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_ssl_proxy is not None:
request.target_ssl_proxy = target_ssl_proxy
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get(
self,
request: compute.GetTargetSslProxyRequest = None,
*,
project: str = None,
target_ssl_proxy: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TargetSslProxy:
r"""Returns the specified TargetSslProxy resource. Gets a
list of available target SSL proxies by making a list()
request.
Args:
request (google.cloud.compute_v1.types.GetTargetSslProxyRequest):
The request object. A request message for
TargetSslProxies.Get. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_ssl_proxy (str):
Name of the TargetSslProxy resource
to return.
This corresponds to the ``target_ssl_proxy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.TargetSslProxy:
Represents a Target SSL Proxy resource.
A target SSL proxy is a component of a SSL Proxy load
balancer. Global forwarding rules reference a target
SSL proxy, and the target proxy then references an
external backend service. For more information, read
Using Target Proxies. (== resource_for
{$api_version}.targetSslProxies ==)
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, target_ssl_proxy])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.GetTargetSslProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.GetTargetSslProxyRequest):
request = compute.GetTargetSslProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_ssl_proxy is not None:
request.target_ssl_proxy = target_ssl_proxy
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def insert(
self,
request: compute.InsertTargetSslProxyRequest = None,
*,
project: str = None,
target_ssl_proxy_resource: compute.TargetSslProxy = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Creates a TargetSslProxy resource in the specified
project using the data included in the request.
Args:
request (google.cloud.compute_v1.types.InsertTargetSslProxyRequest):
The request object. A request message for
TargetSslProxies.Insert. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_ssl_proxy_resource (google.cloud.compute_v1.types.TargetSslProxy):
The body resource for this request
This corresponds to the ``target_ssl_proxy_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- [Global](/compute/docs/reference/rest/{$api_version}/globalOperations)
\*
[Regional](/compute/docs/reference/rest/{$api_version}/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations)
You can use an operation resource to manage
asynchronous API requests. For more information, read
Handling API responses.
Operations can be global, regional or zonal. - For
global operations, use the globalOperations resource.
- For regional operations, use the regionOperations
resource. - For zonal operations, use the
zonalOperations resource.
For more information, read Global, Regional, and
Zonal Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, target_ssl_proxy_resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.InsertTargetSslProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.InsertTargetSslProxyRequest):
request = compute.InsertTargetSslProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_ssl_proxy_resource is not None:
request.target_ssl_proxy_resource = target_ssl_proxy_resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.insert]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list(
self,
request: compute.ListTargetSslProxiesRequest = None,
*,
project: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPager:
r"""Retrieves the list of TargetSslProxy resources
available to the specified project.
Args:
request (google.cloud.compute_v1.types.ListTargetSslProxiesRequest):
The request object. A request message for
TargetSslProxies.List. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.services.target_ssl_proxies.pagers.ListPager:
Contains a list of TargetSslProxy
resources.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.ListTargetSslProxiesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.ListTargetSslProxiesRequest):
request = compute.ListTargetSslProxiesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def set_backend_service(
self,
request: compute.SetBackendServiceTargetSslProxyRequest = None,
*,
project: str = None,
target_ssl_proxy: str = None,
target_ssl_proxies_set_backend_service_request_resource: compute.TargetSslProxiesSetBackendServiceRequest = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Changes the BackendService for TargetSslProxy.
Args:
request (google.cloud.compute_v1.types.SetBackendServiceTargetSslProxyRequest):
The request object. A request message for
TargetSslProxies.SetBackendService. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_ssl_proxy (str):
Name of the TargetSslProxy resource
whose BackendService resource is to be
set.
This corresponds to the ``target_ssl_proxy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_ssl_proxies_set_backend_service_request_resource (google.cloud.compute_v1.types.TargetSslProxiesSetBackendServiceRequest):
The body resource for this request
This corresponds to the ``target_ssl_proxies_set_backend_service_request_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- [Global](/compute/docs/reference/rest/{$api_version}/globalOperations)
\*
[Regional](/compute/docs/reference/rest/{$api_version}/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations)
You can use an operation resource to manage
asynchronous API requests. For more information, read
Handling API responses.
Operations can be global, regional or zonal. - For
global operations, use the globalOperations resource.
- For regional operations, use the regionOperations
resource. - For zonal operations, use the
zonalOperations resource.
For more information, read Global, Regional, and
Zonal Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[
project,
target_ssl_proxy,
target_ssl_proxies_set_backend_service_request_resource,
]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.SetBackendServiceTargetSslProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.SetBackendServiceTargetSslProxyRequest):
request = compute.SetBackendServiceTargetSslProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_ssl_proxy is not None:
request.target_ssl_proxy = target_ssl_proxy
if target_ssl_proxies_set_backend_service_request_resource is not None:
request.target_ssl_proxies_set_backend_service_request_resource = (
target_ssl_proxies_set_backend_service_request_resource
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_backend_service]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def set_proxy_header(
self,
request: compute.SetProxyHeaderTargetSslProxyRequest = None,
*,
project: str = None,
target_ssl_proxy: str = None,
target_ssl_proxies_set_proxy_header_request_resource: compute.TargetSslProxiesSetProxyHeaderRequest = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Changes the ProxyHeaderType for TargetSslProxy.
Args:
request (google.cloud.compute_v1.types.SetProxyHeaderTargetSslProxyRequest):
The request object. A request message for
TargetSslProxies.SetProxyHeader. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_ssl_proxy (str):
Name of the TargetSslProxy resource
whose ProxyHeader is to be set.
This corresponds to the ``target_ssl_proxy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_ssl_proxies_set_proxy_header_request_resource (google.cloud.compute_v1.types.TargetSslProxiesSetProxyHeaderRequest):
The body resource for this request
This corresponds to the ``target_ssl_proxies_set_proxy_header_request_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- [Global](/compute/docs/reference/rest/{$api_version}/globalOperations)
\*
[Regional](/compute/docs/reference/rest/{$api_version}/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations)
You can use an operation resource to manage
asynchronous API requests. For more information, read
Handling API responses.
Operations can be global, regional or zonal. - For
global operations, use the globalOperations resource.
- For regional operations, use the regionOperations
resource. - For zonal operations, use the
zonalOperations resource.
For more information, read Global, Regional, and
Zonal Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[
project,
target_ssl_proxy,
target_ssl_proxies_set_proxy_header_request_resource,
]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.SetProxyHeaderTargetSslProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.SetProxyHeaderTargetSslProxyRequest):
request = compute.SetProxyHeaderTargetSslProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_ssl_proxy is not None:
request.target_ssl_proxy = target_ssl_proxy
if target_ssl_proxies_set_proxy_header_request_resource is not None:
request.target_ssl_proxies_set_proxy_header_request_resource = (
target_ssl_proxies_set_proxy_header_request_resource
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_proxy_header]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def set_ssl_certificates(
self,
request: compute.SetSslCertificatesTargetSslProxyRequest = None,
*,
project: str = None,
target_ssl_proxy: str = None,
target_ssl_proxies_set_ssl_certificates_request_resource: compute.TargetSslProxiesSetSslCertificatesRequest = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Changes SslCertificates for TargetSslProxy.
Args:
request (google.cloud.compute_v1.types.SetSslCertificatesTargetSslProxyRequest):
The request object. A request message for
TargetSslProxies.SetSslCertificates. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_ssl_proxy (str):
Name of the TargetSslProxy resource
whose SslCertificate resource is to be
set.
This corresponds to the ``target_ssl_proxy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_ssl_proxies_set_ssl_certificates_request_resource (google.cloud.compute_v1.types.TargetSslProxiesSetSslCertificatesRequest):
The body resource for this request
This corresponds to the ``target_ssl_proxies_set_ssl_certificates_request_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- [Global](/compute/docs/reference/rest/{$api_version}/globalOperations)
\*
[Regional](/compute/docs/reference/rest/{$api_version}/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations)
You can use an operation resource to manage
asynchronous API requests. For more information, read
Handling API responses.
Operations can be global, regional or zonal. - For
global operations, use the globalOperations resource.
- For regional operations, use the regionOperations
resource. - For zonal operations, use the
zonalOperations resource.
For more information, read Global, Regional, and
Zonal Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[
project,
target_ssl_proxy,
target_ssl_proxies_set_ssl_certificates_request_resource,
]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.SetSslCertificatesTargetSslProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.SetSslCertificatesTargetSslProxyRequest):
request = compute.SetSslCertificatesTargetSslProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_ssl_proxy is not None:
request.target_ssl_proxy = target_ssl_proxy
if target_ssl_proxies_set_ssl_certificates_request_resource is not None:
request.target_ssl_proxies_set_ssl_certificates_request_resource = (
target_ssl_proxies_set_ssl_certificates_request_resource
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_ssl_certificates]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def set_ssl_policy(
self,
request: compute.SetSslPolicyTargetSslProxyRequest = None,
*,
project: str = None,
target_ssl_proxy: str = None,
ssl_policy_reference_resource: compute.SslPolicyReference = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Sets the SSL policy for TargetSslProxy. The SSL
policy specifies the server-side support for SSL
features. This affects connections between clients and
the SSL proxy load balancer. They do not affect the
connection between the load balancer and the backends.
Args:
request (google.cloud.compute_v1.types.SetSslPolicyTargetSslProxyRequest):
The request object. A request message for
TargetSslProxies.SetSslPolicy. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_ssl_proxy (str):
Name of the TargetSslProxy resource
whose SSL policy is to be set. The name
must be 1-63 characters long, and comply
with RFC1035.
This corresponds to the ``target_ssl_proxy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
ssl_policy_reference_resource (google.cloud.compute_v1.types.SslPolicyReference):
The body resource for this request
This corresponds to the ``ssl_policy_reference_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- [Global](/compute/docs/reference/rest/{$api_version}/globalOperations)
\*
[Regional](/compute/docs/reference/rest/{$api_version}/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations)
You can use an operation resource to manage
asynchronous API requests. For more information, read
Handling API responses.
Operations can be global, regional or zonal. - For
global operations, use the globalOperations resource.
- For regional operations, use the regionOperations
resource. - For zonal operations, use the
zonalOperations resource.
For more information, read Global, Regional, and
Zonal Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[project, target_ssl_proxy, ssl_policy_reference_resource]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.SetSslPolicyTargetSslProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.SetSslPolicyTargetSslProxyRequest):
request = compute.SetSslPolicyTargetSslProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_ssl_proxy is not None:
request.target_ssl_proxy = target_ssl_proxy
if ssl_policy_reference_resource is not None:
request.ssl_policy_reference_resource = ssl_policy_reference_resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_ssl_policy]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("TargetSslProxiesClient",)
| 45.177504 | 143 | 0.618105 |
4a23ec0a10a019396f00a929660f8fcc7a383c34 | 840 | py | Python | profiles/migrations/0009_auto_20180716_0723.py | doriclazar/peak_30 | a87217e4d0d1f96d39ad214d40a879c7abfaaaee | [
"Apache-2.0"
] | null | null | null | profiles/migrations/0009_auto_20180716_0723.py | doriclazar/peak_30 | a87217e4d0d1f96d39ad214d40a879c7abfaaaee | [
"Apache-2.0"
] | 1 | 2018-07-14T07:35:55.000Z | 2018-07-16T07:40:49.000Z | profiles/migrations/0009_auto_20180716_0723.py | doriclazar/peak_30 | a87217e4d0d1f96d39ad214d40a879c7abfaaaee | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-07-16 07:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0008_auto_20180716_0715'),
]
operations = [
migrations.AlterField(
model_name='botprofile',
name='icon',
field=models.CharField(default='fas fa-android', max_length=64),
),
migrations.AlterField(
model_name='groupprofile',
name='icon',
field=models.CharField(default='fas fa-users', max_length=64),
),
migrations.AlterField(
model_name='userprofile',
name='icon',
field=models.CharField(default='fas fa-user', max_length=64),
),
]
| 27.096774 | 76 | 0.590476 |
4a23ec27db1276404b86c563a4e19aa3ed35d36f | 2,687 | py | Python | app/core/tests/test_modules.py | yahye-farah/recipte-api | b1bc11b99ebf37bb66a8a46bfb5ffe09f14fe361 | [
"MIT"
] | null | null | null | app/core/tests/test_modules.py | yahye-farah/recipte-api | b1bc11b99ebf37bb66a8a46bfb5ffe09f14fe361 | [
"MIT"
] | null | null | null | app/core/tests/test_modules.py | yahye-farah/recipte-api | b1bc11b99ebf37bb66a8a46bfb5ffe09f14fe361 | [
"MIT"
] | null | null | null | from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email="[email protected]", password="testpass123"):
'''Create a sample user'''
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_success(self):
'''Test if user with email is created successfuly'''
email = "[email protected]"
password = "test123"
user = get_user_model().objects.create_user(
email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalizaion(self):
'''test email is normalized before saving'''
email = "[email protected]"
user = get_user_model().objects.create_user(email, "test")
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
'''Throw error the invalid email is provided'''
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test')
def test_new_super_user_email(self):
'''Test creating super users'''
email = "[email protected]"
password = "test123"
user = get_user_model().objects.create_superuser(
email=email, password=password)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
'''Test tag string representation'''
tag = models.Tag.objects.create(
user=sample_user(),
name="Vegan"
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
'''Test the ingredient string representations'''
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name="Cucumber"
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
'''Test the recipe string representations'''
recipe = models.Recipe.objects.create(
user=sample_user(),
title="Steak and mushroom sauce",
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
'''Test that image is saved in the correct location'''
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
| 34.012658 | 70 | 0.646818 |
4a23ecbf679f8fe8afbf82509b58056a6efd74ec | 1,292 | py | Python | crlib.py | Zst/CRWar2Track | 6138898add5e242488ee91670b2e636007d1a2af | [
"MIT"
] | null | null | null | crlib.py | Zst/CRWar2Track | 6138898add5e242488ee91670b2e636007d1a2af | [
"MIT"
] | null | null | null | crlib.py | Zst/CRWar2Track | 6138898add5e242488ee91670b2e636007d1a2af | [
"MIT"
] | null | null | null | # Common utils for accessing Clash Royale API
import requests
import sys
def load_auth():
try:
with open("auth.txt", "r") as f:
rv = "Bearer " + f.read().rstrip()
return rv
except IOError:
sys.exit("Could not read authentication token, make sure one is available in an auth.txt file.")
auth = load_auth()
def get_clan_name(clan_tag):
r = requests.get("https://api.clashroyale.com/v1/clans/%23" + clan_tag,
headers={"Accept": "application/json", "authorization": auth},
params={"limit": 50, "clanTag": clan_tag})
return r.json()["name"]
def get_player_name(player_tag):
r = requests.get("https://api.clashroyale.com/v1/players/%23" + player_tag,
headers={"Accept": "application/json", "authorization": auth},
params={"limit": 50, "playerTag": player_tag})
return r.json()["name"]
def clan_member_tags(ct):
tags = []
r = requests.get("https://api.clashroyale.com/v1/clans/%23" + ct + "/members",
headers={"Accept": "application/json", "authorization": auth},
params={"limit": 50})
members = r.json()["items"]
for m in members:
tags.append(m["tag"][1:])
return tags
| 30.761905 | 104 | 0.580495 |
4a23eccd85192c47f45b02070123288ff6c2d365 | 5,632 | py | Python | dataflow.py | chengsoonong/nbtidy | a3d8efbaab91fbfcaaecfe38e280eb633e645f3e | [
"MIT"
] | null | null | null | dataflow.py | chengsoonong/nbtidy | a3d8efbaab91fbfcaaecfe38e280eb633e645f3e | [
"MIT"
] | 1 | 2015-09-23T06:58:32.000Z | 2015-10-06T02:15:51.000Z | dataflow.py | chengsoonong/nbtidy | a3d8efbaab91fbfcaaecfe38e280eb633e645f3e | [
"MIT"
] | null | null | null | """Extract file read and file write statements from jupyter notebooks.
Try to figure out the data flow from the file names.
"""
import os.path
import re
import nbformat
import argparse
from graphviz import Digraph
# ambiguous = ['\.Open', '\.open']
reader_names = ['\.read_csv', '\.read_hdf', '\.load']
writer_names = ['\.to_csv', '\.savefig', '\.to_hdf', '\.dump']
def find_filenames(fname, verbose):
"""Look inside code cells, extract filenames"""
nb = nbformat.read(fname, as_version=4)
input_files = []
output_files = []
for cell in nb.cells:
if cell.cell_type != 'code':
continue
cell_id = cell.execution_count
if verbose and cell_id is not None:
print('Code cell %d' % cell_id)
code_str = cell.source
if verbose:
print(code_str)
input_files.extend(code2name(code_str, reader_names, verbose))
output_files.extend(code2name(code_str, writer_names, verbose))
return input_files, output_files
def code2name(code_str, keywords, verbose):
"""Extract the file name from a piece of code"""
names = []
for keyword in keywords:
for match in re.finditer(r'(.*)%s(.*)' % keyword, code_str):
if verbose:
print('code2name: %s' % match)
# get rid of the brackets
try:
to_parse = match.group(2).split('(')[1].split(')')[0].split(',')[0]
except IndexError:
print('Could not parse %s' % match)
continue
if '"' in to_parse:
fname = to_parse.split('"')[1]
elif "'" in to_parse:
fname = to_parse.split("'")[1]
else:
fname = to_parse
names.append(fname)
return names
def construct_dict(dir_name, fnames, verbose):
"""Contruct a dictionary, like a dask graph,
from the list of input notebooks.
"""
workflow = {}
for name in fnames:
print('Processing %s' % name)
input_files, output_files = find_filenames(dir_name+'/'+name, verbose)
if verbose:
print('Input files')
print(input_files)
print('Output files')
print(output_files)
workflow[name] = {'input': input_files, 'output': output_files}
return workflow
def data_colour(fname):
"""Colour nodes based on file extension"""
colour = {'.csv': 'palegreen',
'.tsv': 'palegreen',
'.pdf': 'lightblue',
'.pickle': 'yellow',
'.pkl': 'yellow',
'.gz': 'palegreen1',
'.png': 'lightblue1',
'.h5': 'palegreen2',
'.shp': 'palegreen3',
'.dbf': 'palegreen3',
}
extension = os.path.splitext(fname)[1]
try:
c = colour[extension]
except KeyError:
print('Unknown extension for file name')
print(fname)
c = 'tomato'
return c
def to_graphviz(workflow, LR):
"""Convert dictionary to a dot graph."""
g = Digraph(name='dataflow')
if LR:
g.graph_attr['rankdir'] = 'LR'
else:
g.graph_attr['rankdir'] = 'TB'
seen = set()
cache = {}
for nbname, v in workflow.items():
g.node(nbname, shape='box3d')
for fname in v['input']:
if fname not in seen:
seen.add(fname)
g.node(fname, shape='octagon', style='filled',
fillcolor=data_colour(fname))
g.edge(fname, nbname)
for fname in v['output']:
if fname not in seen:
seen.add(fname)
g.node(fname, shape='octagon', style='filled',
fillcolor=data_colour(fname))
g.edge(nbname, fname)
g.node('Generated by dataflow.py from https://github.com/chengsoonong/nbtidy',
shape='none')
return g
def demo():
fnames = []
dir_name = 'examples'
for name in ['toyA', 'toyB', 'toyC']:
fnames.append('%s.ipynb' % name)
workflow = construct_dict(dir_name, fnames)
print(workflow)
g = to_graphviz(workflow)
data = g.pipe(format='pdf')
with open('toy.pdf', 'wb') as f:
f.write(data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('targets', help='List of files to analyse',
nargs='*')
parser.add_argument('-d', '--directory', help='Folder name (default is .)',
nargs='?', default='.')
parser.add_argument('-o', '--output', help='PDF file name (default dataflow.pdf)',
nargs='?', default='mydataflow.pdf')
parser.add_argument('-t', '--topdown', help='Draw graph top to bottom (default is left to right)',
dest='LR', action='store_false')
parser.add_argument('-l', '--leftright', help='Draw graph left to right (default)',
dest='LR', action='store_true')
parser.set_defaults(LR=True)
parser.add_argument('-v', '--verbose', help='Verbose output (default=False)',
dest='V', action='store_true')
parser.set_defaults(V=False)
args = parser.parse_args()
targets = [t for t in args.targets]
if len(targets) == 0:
print('---- Nothing to do ----')
parser.print_help()
exit(0)
workflow = construct_dict(args.directory, targets, args.V)
graph = to_graphviz(workflow, args.LR)
data = graph.pipe(format='pdf')
with open(args.output, 'wb') as f:
f.write(data)
| 34.341463 | 102 | 0.553445 |
4a23ed8a4c3c9ccedb269490e54307b693724e35 | 5,694 | py | Python | venv/lib/python2.7/dist-packages/landscape/manager/shutdownmanager.py | pengwu/scapy_env | 3db9c5dea2e219048a2387649d6d89be342903d9 | [
"MIT"
] | null | null | null | venv/lib/python2.7/dist-packages/landscape/manager/shutdownmanager.py | pengwu/scapy_env | 3db9c5dea2e219048a2387649d6d89be342903d9 | [
"MIT"
] | null | null | null | venv/lib/python2.7/dist-packages/landscape/manager/shutdownmanager.py | pengwu/scapy_env | 3db9c5dea2e219048a2387649d6d89be342903d9 | [
"MIT"
] | null | null | null | import logging
from twisted.internet.defer import Deferred
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.error import ProcessDone
from landscape.manager.plugin import ManagerPlugin, SUCCEEDED, FAILED
class ShutdownFailedError(Exception):
"""Raised when a call to C{/sbin/shutdown} fails.
@ivar data: The data that the process printed before failing.
"""
def __init__(self, data):
self.data = data
class ShutdownManager(ManagerPlugin):
def __init__(self, process_factory=None):
if process_factory is None:
from twisted.internet import reactor as process_factory
self._process_factory = process_factory
def register(self, registry):
"""Add this plugin to C{registry}.
The shutdown manager handles C{shutdown} activity messages broadcast
from the server.
"""
super(ShutdownManager, self).register(registry)
registry.register_message("shutdown", self.perform_shutdown)
def perform_shutdown(self, message):
"""Request a system restart or shutdown.
If the call to C{/sbin/shutdown} runs without errors the activity
specified in the message will be responded as succeeded. Otherwise,
it will be responded as failed.
"""
operation_id = message["operation-id"]
reboot = message["reboot"]
protocol = ShutdownProcessProtocol()
protocol.set_timeout(self.registry.reactor)
protocol.result.addCallback(self._respond_success, operation_id)
protocol.result.addErrback(self._respond_failure, operation_id)
command, args = self._get_command_and_args(protocol, reboot)
self._process_factory.spawnProcess(protocol, command, args=args)
def _respond_success(self, data, operation_id):
logging.info("Shutdown request succeeded.")
deferred = self._respond(SUCCEEDED, data, operation_id)
# After sending the result to the server, stop accepting messages and
# wait for the reboot/shutdown.
deferred.addCallback(
lambda _: self.registry.broker.stop_exchanger())
return deferred
def _respond_failure(self, failure, operation_id):
logging.info("Shutdown request failed.")
return self._respond(FAILED, failure.value.data, operation_id)
def _respond(self, status, data, operation_id):
message = {"type": "operation-result",
"status": status,
"result-text": data,
"operation-id": operation_id}
return self.registry.broker.send_message(
message, self._session_id, True)
def _get_command_and_args(self, protocol, reboot):
"""
Returns a C{command, args} 2-tuple suitable for use with
L{IReactorProcess.spawnProcess}.
"""
minutes = "+%d" % (protocol.delay // 60,)
if reboot:
args = ["/sbin/shutdown", "-r", minutes,
"Landscape is rebooting the system"]
else:
args = ["/sbin/shutdown", "-h", minutes,
"Landscape is shutting down the system"]
return "/sbin/shutdown", args
class ShutdownProcessProtocol(ProcessProtocol):
"""A ProcessProtocol for calling C{/sbin/shutdown}.
C{shutdown} doesn't return immediately when a time specification is
provided. Failures are reported immediately after it starts and return a
non-zero exit code. The process protocol calls C{shutdown} and waits for
failures for C{timeout} seconds. If no failures are reported it fires
C{result}'s callback with whatever output was received from the process.
If failures are reported C{result}'s errback is fired.
@ivar result: A L{Deferred} fired when C{shutdown} fails or
succeeds.
@ivar reboot: A flag indicating whether a shutdown or reboot should be
performed. Default is C{False}.
@ivar delay: The time in seconds from now to schedule the shutdown.
Default is 240 seconds. The time will be converted to minutes using
integer division when passed to C{shutdown}.
"""
def __init__(self, reboot=False, delay=240):
self.result = Deferred()
self.reboot = reboot
self.delay = delay
self._data = []
self._waiting = True
def get_data(self):
"""Get the data printed by the subprocess."""
return "".join(self._data)
def set_timeout(self, reactor, timeout=10):
"""
Set the error checking timeout, after which C{result}'s callback will
be fired.
"""
reactor.call_later(timeout, self._succeed)
def childDataReceived(self, fd, data):
"""Some data was received from the child.
Add it to our buffer to pass to C{result} when it's fired.
"""
if self._waiting:
self._data.append(data)
def processEnded(self, reason):
"""Fire back the C{result} L{Deferred}.
C{result}'s callback will be fired with the string of data received
from the subprocess, or if the subprocess failed C{result}'s errback
will be fired with the string of data received from the subprocess.
"""
if self._waiting:
if reason.check(ProcessDone):
self._succeed()
else:
self.result.errback(ShutdownFailedError(self.get_data()))
self._waiting = False
def _succeed(self):
"""Fire C{result}'s callback with data accumulated from the process."""
if self._waiting:
self.result.callback(self.get_data())
self._waiting = False
| 37.460526 | 79 | 0.652266 |
4a23eea056b9feaf93391244a42939f7765f13d4 | 5,692 | py | Python | bindings/python/ensmallen_graph/datasets/networkrepository/maayanfoodweb.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/networkrepository/maayanfoodweb.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/networkrepository/maayanfoodweb.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph maayan-foodweb.
The graph is automatically retrieved from the NetworkRepository repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 08:14:05.406899
The undirected graph maayan-foodweb has 183 nodes and 2452 unweighted edges,
of which 18 are self-loops. The graph is quite dense as it has a density
of 0.14670 and is connected, as it has a single component. The graph median
node degree is 25, the mean node degree is 26.70, and the node degree mode
is 4. The top 5 most central nodes are 70 (degree 106), 65 (degree 99),
74 (degree 89), 76 (degree 85) and 75 (degree 82).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import MaayanFoodweb
# Then load the graph
graph = MaayanFoodweb()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def MaayanFoodweb(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the maayan-foodweb graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of maayan-foodweb graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 08:14:05.406899
The undirected graph maayan-foodweb has 183 nodes and 2452 unweighted edges,
of which 18 are self-loops. The graph is quite dense as it has a density
of 0.14670 and is connected, as it has a single component. The graph median
node degree is 25, the mean node degree is 26.70, and the node degree mode
is 4. The top 5 most central nodes are 70 (degree 106), 65 (degree 99),
74 (degree 89), 76 (degree 85) and 75 (degree 82).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import MaayanFoodweb
# Then load the graph
graph = MaayanFoodweb()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="MaayanFoodweb",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 31.798883 | 94 | 0.684469 |
4a23ef17f4bcf692ba445f2320565b535956b1b3 | 2,471 | py | Python | mindspore/train/callback/_time_monitor.py | Greatpanc/mindspore_zhb | c2511f7d6815b9232ac4427e27e2c132ed03e0d9 | [
"Apache-2.0"
] | null | null | null | mindspore/train/callback/_time_monitor.py | Greatpanc/mindspore_zhb | c2511f7d6815b9232ac4427e27e2c132ed03e0d9 | [
"Apache-2.0"
] | null | null | null | mindspore/train/callback/_time_monitor.py | Greatpanc/mindspore_zhb | c2511f7d6815b9232ac4427e27e2c132ed03e0d9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TimeMonitor Callback class."""
import time
from ._callback import Callback
class TimeMonitor(Callback):
"""
Monitor the time in training.
Args:
data_size (int): How many steps are the intervals between print information each time.
if the program get `batch_num` during training, `data_size` will be set to `batch_num`,
otherwise `data_size` will be used. Default: None.
Raises:
ValueError: If data_size is not positive int.
"""
def __init__(self, data_size=None):
super(TimeMonitor, self).__init__()
self.data_size = data_size
self.epoch_time = time.time()
def epoch_begin(self, run_context):
"""
Record time at the begin of epoch.
Args:
run_context (RunContext): Context of the process running.
"""
self.epoch_time = time.time()
def epoch_end(self, run_context):
"""
Print process cost time at the end of epoch.
Args:
run_context (RunContext): Context of the process running.
"""
epoch_seconds = (time.time() - self.epoch_time) * 1000
step_size = self.data_size
cb_params = run_context.original_args()
if hasattr(cb_params, "batch_num"):
batch_num = cb_params.batch_num
if isinstance(batch_num, int) and batch_num > 0:
step_size = cb_params.batch_num
if not isinstance(step_size, int) or step_size < 1:
raise ValueError("For 'TimeMonitor', step size must be positive integer, "
"but got {}.".format(step_size))
step_seconds = epoch_seconds / step_size
print("epoch time: {:5.3f} ms, per step time: {:5.3f} ms".format(epoch_seconds, step_seconds), flush=True)
| 35.3 | 114 | 0.633347 |
4a23ef83cd5166532b14246dcbb0bbe393da8c7b | 1,609 | py | Python | data/subsample_documents.py | gtzinos/Multi-Instance-Learning | f7d4a5d05ea9c6efe2a405d710d710b4ae4df05e | [
"MIT"
] | 4 | 2018-06-19T12:45:47.000Z | 2019-06-25T18:31:23.000Z | data/subsample_documents.py | gtzinos/Multi-Instance-Learning | f7d4a5d05ea9c6efe2a405d710d710b4ae4df05e | [
"MIT"
] | 1 | 2018-12-29T09:04:09.000Z | 2018-12-29T09:04:09.000Z | data/subsample_documents.py | gtzinos/Multi-Instance-Learning | f7d4a5d05ea9c6efe2a405d710d710b4ae4df05e | [
"MIT"
] | 1 | 2021-12-07T13:47:57.000Z | 2021-12-07T13:47:57.000Z | import re
import numpy as np
np.random.seed(1000001)
source_location = '/home/studadmin/Desktop/delicioust140_documents/fdocuments'
docnamefile = 'DocNameTag.txt'
outdocfile = 'Subsampled_DocNameTag.txt'
Classes = set(['reference', 'design', 'programming', 'internet', 'computer', 'web', 'java', 'writing', 'english', 'grammar', 'style', 'language', 'books', 'education', 'philosophy', 'politics', 'religion', 'science', 'history','culture'])
doc_w_lbl = {}
docnames = open(docnamefile).readlines()
doc_lbls = list()
doc_fnames = list()
numlbls = list()
d = 0;
for ln in docnames:
stuff = re.findall('<(.*?)>', ln)
fname = stuff[0]
tags = stuff[1].split('|')
topics = [x for x in tags if x in Classes]
#fname = '%s/%s/%s' %(source_location,fname[:2],fname)
if '.html' not in fname:
continue
numlbls.append(len(topics))
doc_lbls.extend(['%s' %('|'.join(topics))])
doc_fnames.extend([fname])
for x in topics:
try:
doc_w_lbl[x].append(d)
except KeyError:
doc_w_lbl.update({x:[d]})
d += 1
if d%5000 == 0:
print(d)
numlbls = np.array(numlbls)
# sample 1100 from each lbl
all_ind = set()
for x in Classes:
prob = numlbls[doc_w_lbl[x]]/float(np.sum(numlbls[doc_w_lbl[x]]))
ind = np.random.choice(doc_w_lbl[x], min(1100, len(doc_w_lbl[x])), p = prob, replace = False)
all_ind = all_ind.union(set(ind))
# sample 1000 docs with no label
ind = np.random.choice(np.where(numlbls == 0)[0], 1000, replace = False)
all_ind = all_ind.union(set(ind))
all_ind = list(all_ind)
fp = open(outdocfile, 'w')
for d in all_ind:
fp.write('<%s><%s>\n' %(doc_fnames[d], doc_lbls[d]))
fp.close()
| 26.816667 | 238 | 0.670603 |
4a23f077147f1890c322d606616ee52c6fa3ffa2 | 4,185 | py | Python | tools/boundhelper.py | ranstar74/Sollumz | 7bb6963885e11b929915913699957e3ac9aa8815 | [
"MIT"
] | null | null | null | tools/boundhelper.py | ranstar74/Sollumz | 7bb6963885e11b929915913699957e3ac9aa8815 | [
"MIT"
] | null | null | null | tools/boundhelper.py | ranstar74/Sollumz | 7bb6963885e11b929915913699957e3ac9aa8815 | [
"MIT"
] | null | null | null | import bpy
from ..sollumz_properties import SollumType, SOLLUMZ_UI_NAMES, BOUND_POLYGON_TYPES
from ..ybn.collision_materials import create_collision_material_from_index
from ..tools.meshhelper import create_box, create_sphere, create_capsule, create_cylinder
from mathutils import Vector, Matrix
def create_bound_shape(type):
pobj = create_mesh(type)
# Constrain scale for bound polys
if pobj.sollum_type in BOUND_POLYGON_TYPES and type != SollumType.BOUND_POLY_BOX and type != SollumType.BOUND_POLY_TRIANGLE:
constraint = pobj.constraints.new(type='LIMIT_SCALE')
constraint.use_transform_limit = True
# Why blender? So ugly
constraint.use_min_x = True
constraint.use_min_y = True
constraint.use_min_z = True
constraint.use_max_x = True
constraint.use_max_y = True
constraint.use_max_z = True
constraint.min_x = 1
constraint.min_y = 1
constraint.min_z = 1
constraint.max_x = 1
constraint.max_y = 1
constraint.max_z = 1
if type == SollumType.BOUND_POLY_BOX:
create_box(pobj.data)
elif type == SollumType.BOUND_BOX:
pobj.bound_dimensions = Vector((1, 1, 1))
elif type == SollumType.BOUND_SPHERE or type == SollumType.BOUND_POLY_SPHERE:
pobj.bound_radius = 1
elif type == SollumType.BOUND_POLY_CAPSULE:
pobj.bound_radius = 1
pobj.bound_length = 1
elif type == SollumType.BOUND_CAPSULE:
pobj.bound_radius = 1
pobj.margin = 0.5
elif type == SollumType.BOUND_CYLINDER or type == SollumType.BOUND_POLY_CYLINDER:
pobj.bound_length = 2
pobj.bound_radius = 1
elif type == SollumType.BOUND_DISC:
pobj.margin = 0.04
pobj.bound_radius = 1
return pobj
def create_bound(sollum_type=SollumType.BOUND_COMPOSITE, aobj=None):
empty = bpy.data.objects.new(SOLLUMZ_UI_NAMES[sollum_type], None)
empty.empty_display_size = 0
empty.sollum_type = sollum_type
bpy.context.collection.objects.link(empty)
bpy.context.view_layer.objects.active = bpy.data.objects[empty.name]
if aobj:
if aobj.sollum_type == SollumType.BOUND_COMPOSITE:
empty.parent = aobj
return empty
def create_mesh(sollum_type):
name = SOLLUMZ_UI_NAMES[sollum_type]
mesh = bpy.data.meshes.new(name)
obj = bpy.data.objects.new(name, mesh)
obj.sollum_type = sollum_type
obj.data.materials.append(create_collision_material_from_index(0))
bpy.context.collection.objects.link(obj)
return obj
def convert_selected_to_bound(selected, use_name, multiple, bvhs, replace_original, do_center=True):
center = Vector()
cobjs = []
if not multiple:
cobj = create_bound()
cobjs.append(cobj)
gobj = create_bound(SollumType.BOUND_GEOMETRYBVH) if bvhs else create_bound(
SollumType.BOUND_GEOMETRY)
gobj.parent = cobj
if do_center:
for obj in selected:
center += obj.location
center /= len(selected)
gobj.location = center
for obj in selected:
if multiple:
cobj = create_bound()
cobjs.append(cobj)
gobj = create_bound(SollumType.BOUND_GEOMETRYBVH) if bvhs else create_bound(
SollumType.BOUND_GEOMETRY)
gobj.parent = cobj
if do_center:
gobj.location = obj.location
obj.location = Vector()
elif do_center:
obj.location -= center
if obj.type == 'MESH':
name = obj.name
poly_mesh = obj if replace_original else create_mesh(
SollumType.BOUND_POLY_TRIANGLE)
poly_mesh.parent = gobj
if replace_original:
poly_mesh.name = SOLLUMZ_UI_NAMES[SollumType.BOUND_POLY_TRIANGLE]
# set properties
poly_mesh.sollum_type = SollumType.BOUND_POLY_TRIANGLE
else:
poly_mesh.data = obj.data.copy()
poly_mesh.matrix_world = obj.matrix_world
if use_name:
cobj.name = name
return cobjs
| 32.44186 | 128 | 0.651852 |
4a23f117e0e4bbb2acd124b7fb678a1ecb8035c8 | 18,281 | py | Python | cwltool/load_tool.py | michael-kotliar/cwltool | 0bae6e4466cacbd03a8b9cc074f0920516c959a7 | [
"Apache-2.0"
] | null | null | null | cwltool/load_tool.py | michael-kotliar/cwltool | 0bae6e4466cacbd03a8b9cc074f0920516c959a7 | [
"Apache-2.0"
] | null | null | null | cwltool/load_tool.py | michael-kotliar/cwltool | 0bae6e4466cacbd03a8b9cc074f0920516c959a7 | [
"Apache-2.0"
] | null | null | null | """Loads a CWL document."""
import hashlib
import logging
import os
import re
import urllib
import uuid
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Tuple,
Union,
cast,
)
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import Loader, file_uri
from schema_salad.schema import validate_doc
from schema_salad.sourceline import SourceLine, cmap
from schema_salad.utils import (
ContextType,
FetcherCallableType,
IdxResultType,
ResolveType,
json_dumps,
)
from . import CWL_CONTENT_TYPES, process, update
from .context import LoadingContext
from .errors import WorkflowException
from .loghandler import _logger
from .process import Process, get_schema, shortname
from .update import ALLUPDATES
from .utils import CWLObjectType, ResolverType, visit_class
jobloaderctx = {
"cwl": "https://w3id.org/cwl/cwl#",
"cwltool": "http://commonwl.org/cwltool#",
"path": {"@type": "@id"},
"location": {"@type": "@id"},
"id": "@id",
} # type: ContextType
overrides_ctx = {
"overrideTarget": {"@type": "@id"},
"cwltool": "http://commonwl.org/cwltool#",
"http://commonwl.org/cwltool#overrides": {
"@id": "cwltool:overrides",
"mapSubject": "overrideTarget",
},
"requirements": {
"@id": "https://w3id.org/cwl/cwl#requirements",
"mapSubject": "class",
},
} # type: ContextType
def default_loader(
fetcher_constructor: Optional[FetcherCallableType] = None,
enable_dev: bool = False,
doc_cache: bool = True,
) -> Loader:
return Loader(
jobloaderctx,
fetcher_constructor=fetcher_constructor,
allow_attachments=lambda r: enable_dev,
doc_cache=doc_cache,
)
def resolve_tool_uri(
argsworkflow: str,
resolver: Optional[ResolverType] = None,
fetcher_constructor: Optional[FetcherCallableType] = None,
document_loader: Optional[Loader] = None,
) -> Tuple[str, str]:
uri = None # type: Optional[str]
split = urllib.parse.urlsplit(argsworkflow)
# In case of Windows path, urlsplit misjudge Drive letters as scheme, here we are skipping that
if split.scheme and split.scheme in ["http", "https", "file"]:
uri = argsworkflow
elif os.path.exists(os.path.abspath(argsworkflow)):
uri = file_uri(str(os.path.abspath(argsworkflow)))
elif resolver is not None:
uri = resolver(
document_loader or default_loader(fetcher_constructor), argsworkflow
)
if uri is None:
raise ValidationException("Not found: '%s'" % argsworkflow)
if argsworkflow != uri:
_logger.info("Resolved '%s' to '%s'", argsworkflow, uri)
fileuri = urllib.parse.urldefrag(uri)[0]
return uri, fileuri
def fetch_document(
argsworkflow: Union[str, CWLObjectType],
loadingContext: Optional[LoadingContext] = None,
) -> Tuple[LoadingContext, CommentedMap, str]:
"""Retrieve a CWL document."""
if loadingContext is None:
loadingContext = LoadingContext()
loadingContext.loader = default_loader()
else:
loadingContext = loadingContext.copy()
if loadingContext.loader is None:
loadingContext.loader = default_loader(
loadingContext.fetcher_constructor,
enable_dev=loadingContext.enable_dev,
doc_cache=loadingContext.doc_cache,
)
if isinstance(argsworkflow, str):
uri, fileuri = resolve_tool_uri(
argsworkflow,
resolver=loadingContext.resolver,
document_loader=loadingContext.loader,
)
workflowobj = cast(
CommentedMap,
loadingContext.loader.fetch(fileuri, content_types=CWL_CONTENT_TYPES),
)
return loadingContext, workflowobj, uri
if isinstance(argsworkflow, MutableMapping):
uri = (
cast(str, argsworkflow["id"])
if argsworkflow.get("id")
else "_:" + str(uuid.uuid4())
)
workflowobj = cast(
CommentedMap, cmap(cast(Dict[str, Any], argsworkflow), fn=uri)
)
loadingContext.loader.idx[uri] = workflowobj
return loadingContext, workflowobj, uri
raise ValidationException("Must be URI or object: '%s'" % argsworkflow)
def _convert_stdstreams_to_files(
workflowobj: Union[
CWLObjectType, MutableSequence[Union[CWLObjectType, str, int]], str
]
) -> None:
if isinstance(workflowobj, MutableMapping):
if workflowobj.get("class") == "CommandLineTool":
with SourceLine(
workflowobj,
"outputs",
ValidationException,
_logger.isEnabledFor(logging.DEBUG),
):
outputs = workflowobj.get("outputs", [])
if not isinstance(outputs, CommentedSeq):
raise ValidationException('"outputs" section is not ' "valid.")
for out in cast(
MutableSequence[CWLObjectType], workflowobj.get("outputs", [])
):
if not isinstance(out, CommentedMap):
raise ValidationException(
"Output '{}' is not a valid OutputParameter.".format(out)
)
for streamtype in ["stdout", "stderr"]:
if out.get("type") == streamtype:
if "outputBinding" in out:
raise ValidationException(
"Not allowed to specify outputBinding when"
" using %s shortcut." % streamtype
)
if streamtype in workflowobj:
filename = workflowobj[streamtype]
else:
filename = str(
hashlib.sha1( # nosec
json_dumps(workflowobj, sort_keys=True).encode(
"utf-8"
)
).hexdigest()
)
workflowobj[streamtype] = filename
out["type"] = "File"
out["outputBinding"] = cmap({"glob": filename})
for inp in cast(
MutableSequence[CWLObjectType], workflowobj.get("inputs", [])
):
if inp.get("type") == "stdin":
if "inputBinding" in inp:
raise ValidationException(
"Not allowed to specify inputBinding when"
" using stdin shortcut."
)
if "stdin" in workflowobj:
raise ValidationException(
"Not allowed to specify stdin path when"
" using stdin type shortcut."
)
else:
workflowobj["stdin"] = (
"$(inputs.%s.path)"
% cast(str, inp["id"]).rpartition("#")[2]
)
inp["type"] = "File"
else:
for entry in workflowobj.values():
_convert_stdstreams_to_files(
cast(
Union[
CWLObjectType,
MutableSequence[Union[CWLObjectType, str, int]],
str,
],
entry,
)
)
if isinstance(workflowobj, MutableSequence):
for entry in workflowobj:
_convert_stdstreams_to_files(
cast(
Union[
CWLObjectType,
MutableSequence[Union[CWLObjectType, str, int]],
str,
],
entry,
)
)
def _add_blank_ids(
workflowobj: Union[CWLObjectType, MutableSequence[Union[CWLObjectType, str]]]
) -> None:
if isinstance(workflowobj, MutableMapping):
if (
"run" in workflowobj
and isinstance(workflowobj["run"], MutableMapping)
and "id" not in workflowobj["run"]
and "$import" not in workflowobj["run"]
):
workflowobj["run"]["id"] = str(uuid.uuid4())
for entry in workflowobj.values():
_add_blank_ids(
cast(
Union[CWLObjectType, MutableSequence[Union[CWLObjectType, str]]],
entry,
)
)
if isinstance(workflowobj, MutableSequence):
for entry in workflowobj:
_add_blank_ids(
cast(
Union[CWLObjectType, MutableSequence[Union[CWLObjectType, str]]],
entry,
)
)
def resolve_and_validate_document(
loadingContext: LoadingContext,
workflowobj: Union[CommentedMap, CommentedSeq],
uri: str,
preprocess_only: bool = False,
skip_schemas: Optional[bool] = None,
) -> Tuple[LoadingContext, str]:
"""Validate a CWL document."""
if not loadingContext.loader:
raise ValueError("loadingContext must have a loader.")
else:
loader = loadingContext.loader
loadingContext = loadingContext.copy()
if not isinstance(workflowobj, MutableMapping):
raise ValueError(
"workflowjobj must be a dict, got '{}': {}".format(
type(workflowobj), workflowobj
)
)
jobobj = None
if "cwl:tool" in workflowobj:
jobobj, _ = loader.resolve_all(workflowobj, uri)
uri = urllib.parse.urljoin(uri, workflowobj["https://w3id.org/cwl/cwl#tool"])
del cast(Dict[str, Any], jobobj)["https://w3id.org/cwl/cwl#tool"]
workflowobj = fetch_document(uri, loadingContext)[1]
fileuri = urllib.parse.urldefrag(uri)[0]
cwlVersion = loadingContext.metadata.get("cwlVersion")
if not cwlVersion:
cwlVersion = workflowobj.get("cwlVersion")
if not cwlVersion and fileuri != uri:
# The tool we're loading is a fragment of a bigger file. Get
# the document root element and look for cwlVersion there.
metadata = cast(CWLObjectType, fetch_document(fileuri, loadingContext)[1])
cwlVersion = cast(str, metadata.get("cwlVersion"))
if not cwlVersion:
raise ValidationException(
"No cwlVersion found. "
"Use the following syntax in your CWL document to declare "
"the version: cwlVersion: <version>.\n"
"Note: if this is a CWL draft-2 (pre v1.0) document then it "
"will need to be upgraded first."
)
if not isinstance(cwlVersion, str):
with SourceLine(workflowobj, "cwlVersion", ValidationException):
raise ValidationException(
"'cwlVersion' must be a string, got {}".format(type(cwlVersion))
)
# strip out version
cwlVersion = re.sub(r"^(?:cwl:|https://w3id.org/cwl/cwl#)", "", cwlVersion)
if cwlVersion not in list(ALLUPDATES):
# print out all the Supported Versions of cwlVersion
versions = []
for version in list(ALLUPDATES):
if "dev" in version:
version += " (with --enable-dev flag only)"
versions.append(version)
versions.sort()
raise ValidationException(
"The CWL reference runner no longer supports pre CWL v1.0 "
"documents. Supported versions are: "
"\n{}".format("\n".join(versions))
)
if (
isinstance(jobobj, CommentedMap)
and "http://commonwl.org/cwltool#overrides" in jobobj
):
loadingContext.overrides_list.extend(resolve_overrides(jobobj, uri, uri))
del jobobj["http://commonwl.org/cwltool#overrides"]
if (
isinstance(jobobj, CommentedMap)
and "https://w3id.org/cwl/cwl#requirements" in jobobj
):
if cwlVersion not in ("v1.1.0-dev1", "v1.1"):
raise ValidationException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
"can set the cwlVersion to v1.1 or greater."
)
loadingContext.overrides_list.append(
{
"overrideTarget": uri,
"requirements": jobobj["https://w3id.org/cwl/cwl#requirements"],
}
)
del jobobj["https://w3id.org/cwl/cwl#requirements"]
(sch_document_loader, avsc_names) = process.get_schema(cwlVersion)[:2]
if isinstance(avsc_names, Exception):
raise avsc_names
processobj = None # type: Optional[ResolveType]
document_loader = Loader(
sch_document_loader.ctx,
schemagraph=sch_document_loader.graph,
idx=loader.idx,
cache=sch_document_loader.cache,
fetcher_constructor=loadingContext.fetcher_constructor,
skip_schemas=skip_schemas,
doc_cache=loadingContext.doc_cache,
)
if cwlVersion == "v1.0":
_add_blank_ids(workflowobj)
document_loader.resolve_all(workflowobj, fileuri)
processobj, metadata = document_loader.resolve_ref(uri)
if not isinstance(processobj, (CommentedMap, CommentedSeq)):
raise ValidationException("Workflow must be a CommentedMap or CommentedSeq.")
if not hasattr(processobj.lc, "filename"):
processobj.lc.filename = fileuri
if loadingContext.metadata:
metadata = loadingContext.metadata
if not isinstance(metadata, CommentedMap):
raise ValidationException(
"metadata must be a CommentedMap, was %s" % type(metadata)
)
if isinstance(processobj, CommentedMap):
uri = processobj["id"]
_convert_stdstreams_to_files(workflowobj)
if isinstance(jobobj, CommentedMap):
loadingContext.jobdefaults = jobobj
loadingContext.loader = document_loader
loadingContext.avsc_names = avsc_names
loadingContext.metadata = metadata
if preprocess_only:
return loadingContext, uri
if loadingContext.do_validate:
validate_doc(avsc_names, processobj, document_loader, loadingContext.strict)
# None means default behavior (do update)
if loadingContext.do_update in (True, None):
if "cwlVersion" not in metadata:
metadata["cwlVersion"] = cwlVersion
processobj = update.update(
processobj, document_loader, fileuri, loadingContext.enable_dev, metadata
)
document_loader.idx[processobj["id"]] = processobj
def update_index(pr: CommentedMap) -> None:
if "id" in pr:
document_loader.idx[pr["id"]] = pr
visit_class(
processobj, ("CommandLineTool", "Workflow", "ExpressionTool"), update_index
)
return loadingContext, uri
def make_tool(
uri: Union[str, CommentedMap, CommentedSeq], loadingContext: LoadingContext
) -> Process:
"""Make a Python CWL object."""
if loadingContext.loader is None:
raise ValueError("loadingContext must have a loader")
resolveduri, metadata = loadingContext.loader.resolve_ref(uri)
processobj = None
if isinstance(resolveduri, MutableSequence):
for obj in resolveduri:
if obj["id"].endswith("#main"):
processobj = obj
break
if not processobj:
raise WorkflowException(
"Tool file contains graph of multiple objects, must specify "
"one of #%s"
% ", #".join(
urllib.parse.urldefrag(i["id"])[1] for i in resolveduri if "id" in i
)
)
elif isinstance(resolveduri, MutableMapping):
processobj = resolveduri
else:
raise Exception("Must resolve to list or dict")
tool = loadingContext.construct_tool_object(processobj, loadingContext)
if loadingContext.jobdefaults:
jobobj = loadingContext.jobdefaults
for inp in tool.tool["inputs"]:
if shortname(inp["id"]) in jobobj:
inp["default"] = jobobj[shortname(inp["id"])]
return tool
def load_tool(
argsworkflow: Union[str, CWLObjectType],
loadingContext: Optional[LoadingContext] = None,
) -> Process:
loadingContext, workflowobj, uri = fetch_document(argsworkflow, loadingContext)
loadingContext, uri = resolve_and_validate_document(
loadingContext, workflowobj, uri
)
return make_tool(uri, loadingContext)
def resolve_overrides(
ov: IdxResultType,
ov_uri: str,
baseurl: str,
) -> List[CWLObjectType]:
ovloader = Loader(overrides_ctx)
ret, _ = ovloader.resolve_all(ov, baseurl)
if not isinstance(ret, CommentedMap):
raise Exception("Expected CommentedMap, got %s" % type(ret))
cwl_docloader = get_schema("v1.0")[0]
cwl_docloader.resolve_all(ret, ov_uri)
return cast(List[CWLObjectType], ret["http://commonwl.org/cwltool#overrides"])
def load_overrides(ov: str, base_url: str) -> List[CWLObjectType]:
ovloader = Loader(overrides_ctx)
return resolve_overrides(ovloader.fetch(ov), ov, base_url)
def recursive_resolve_and_validate_document(
loadingContext: LoadingContext,
workflowobj: Union[CommentedMap, CommentedSeq],
uri: str,
preprocess_only: bool = False,
skip_schemas: Optional[bool] = None,
) -> Tuple[LoadingContext, str, Process]:
"""Validate a CWL document, checking that a tool object can be built."""
loadingContext, uri = resolve_and_validate_document(
loadingContext,
workflowobj,
uri,
preprocess_only=preprocess_only,
skip_schemas=skip_schemas,
)
tool = make_tool(uri, loadingContext)
return loadingContext, uri, tool
| 35.088292 | 99 | 0.588808 |
4a23f17e6970ee827ef4d40de97cedd09f13de60 | 6,120 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/ovirt/ovirt_external_provider_info.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/ovirt/ovirt_external_provider_info.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/ovirt/ovirt_external_provider_info.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_external_provider_info
short_description: Retrieve information about one or more oVirt/RHV external providers
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve information about one or more oVirt/RHV external providers."
- This module was called C(ovirt_external_provider_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(ovirt_external_provider_info) module no longer returns C(ansible_facts)!
notes:
- "This module returns a variable C(ovirt_external_providers), which
contains a list of external_providers. You need to register the result with
the I(register) keyword to use it."
options:
type:
description:
- "Type of the external provider."
choices: ['os_image', 'os_network', 'os_volume', 'foreman']
required: true
name:
description:
- "Name of the external provider, can be used as glob expression."
extends_documentation_fragment: ovirt_info
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather information about all image external providers named C<glance>:
- ovirt_external_provider_info:
type: os_image
name: glance
register: result
- debug:
msg: "{{ result.ovirt_external_providers }}"
'''
RETURN = '''
ovirt_external_providers:
description:
- "List of dictionaries. Content depends on I(type)."
- "For type C(foreman), attributes appearing in the dictionary can be found on your oVirt/RHV instance
at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
- "For type C(os_image), attributes appearing in the dictionary can be found on your oVirt/RHV instance
at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider."
- "For type C(os_volume), attributes appearing in the dictionary can be found on your oVirt/RHV instance
at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider."
- "For type C(os_network), attributes appearing in the dictionary can be found on your oVirt/RHV instance
at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider."
returned: On success
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_info_full_argument_spec,
)
def _external_provider_service(provider_type, system_service):
if provider_type == 'os_image':
return system_service.openstack_image_providers_service()
elif provider_type == 'os_network':
return system_service.openstack_network_providers_service()
elif provider_type == 'os_volume':
return system_service.openstack_volume_providers_service()
elif provider_type == 'foreman':
return system_service.external_host_providers_service()
def main():
argument_spec = ovirt_info_full_argument_spec(
name=dict(default=None, required=False),
type=dict(
default=None,
required=True,
choices=[
'os_image', 'os_network', 'os_volume', 'foreman',
],
aliases=['provider'],
),
)
module = AnsibleModule(argument_spec)
is_old_facts = module._name == 'ovirt_external_provider_facts'
if is_old_facts:
module.deprecate("The 'ovirt_external_provider_facts' module has been renamed to 'ovirt_external_provider_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
external_providers_service = _external_provider_service(
provider_type=module.params.pop('type'),
system_service=connection.system_service(),
)
if module.params['name']:
external_providers = [
e for e in external_providers_service.list()
if fnmatch.fnmatch(e.name, module.params['name'])
]
else:
external_providers = external_providers_service.list()
result = dict(
ovirt_external_providers=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in external_providers
],
)
if is_old_facts:
module.exit_json(changed=False, ansible_facts=result)
else:
module.exit_json(changed=False, **result)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| 37.777778 | 122 | 0.679575 |
4a23f1c02f2f827896a5f3fc1754b40bc7b328f8 | 6,104 | py | Python | watertap/unit_models/zero_order/tests/test_bioreactor_zo.py | kurbansitterley/watertap | 1a8986a779bdcb36f1481f03eed24c6c42d26481 | [
"BSD-3-Clause-LBNL"
] | null | null | null | watertap/unit_models/zero_order/tests/test_bioreactor_zo.py | kurbansitterley/watertap | 1a8986a779bdcb36f1481f03eed24c6c42d26481 | [
"BSD-3-Clause-LBNL"
] | null | null | null | watertap/unit_models/zero_order/tests/test_bioreactor_zo.py | kurbansitterley/watertap | 1a8986a779bdcb36f1481f03eed24c6c42d26481 | [
"BSD-3-Clause-LBNL"
] | null | null | null | ###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
Tests for zero-order bioreactor model
"""
import pytest
from pyomo.environ import (
Block,
check_optimal_termination,
ConcreteModel,
Constraint,
value,
Var,
)
from pyomo.util.check_units import assert_units_consistent
from idaes.core import FlowsheetBlock
from idaes.core.solvers import get_solver
from idaes.core.util.model_statistics import degrees_of_freedom
from idaes.core.util.testing import initialization_tester
from idaes.core import UnitModelCostingBlock
from watertap.unit_models.zero_order import BioreactorZO
from watertap.core.wt_database import Database
from watertap.core.zero_order_properties import WaterParameterBlock
from watertap.core.zero_order_costing import ZeroOrderCosting
solver = get_solver()
class TestBioreactorZO:
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.params = WaterParameterBlock(
default={"solute_list": ["boron", "selenium", "foo"]}
)
m.fs.unit = BioreactorZO(
default={"property_package": m.fs.params, "database": m.db}
)
m.fs.unit.inlet.flow_mass_comp[0, "H2O"].fix(10000)
m.fs.unit.inlet.flow_mass_comp[0, "boron"].fix(1)
m.fs.unit.inlet.flow_mass_comp[0, "selenium"].fix(1)
m.fs.unit.inlet.flow_mass_comp[0, "foo"].fix(1)
return m
@pytest.mark.unit
def test_build(self, model):
assert model.fs.unit.config.database == model.db
assert isinstance(model.fs.unit.electricity, Var)
assert isinstance(model.fs.unit.energy_electric_flow_vol_inlet, Var)
assert isinstance(model.fs.unit.electricity_consumption, Constraint)
@pytest.mark.component
def test_load_parameters(self, model):
data = model.db.get_unit_operation_parameters("bioreactor")
model.fs.unit.load_parameters_from_database(use_default_removal=True)
assert model.fs.unit.recovery_frac_mass_H2O[0].fixed
assert (
model.fs.unit.recovery_frac_mass_H2O[0].value
== data["recovery_frac_mass_H2O"]["value"]
)
for (t, j), v in model.fs.unit.removal_frac_mass_solute.items():
assert v.fixed
if j == "foo":
assert v.value == data["default_removal_frac_mass_solute"]["value"]
else:
assert v.value == data["removal_frac_mass_solute"][j]["value"]
@pytest.mark.component
def test_degrees_of_freedom(self, model):
assert degrees_of_freedom(model.fs.unit) == 0
@pytest.mark.component
def test_unit_consistency(self, model):
assert_units_consistent(model.fs.unit)
@pytest.mark.component
def test_initialize(self, model):
initialization_tester(model)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert pytest.approx(9.7002, rel=1e-3) == value(
model.fs.unit.properties_treated[0].flow_vol
)
assert pytest.approx(2.0618e-2, rel=1e-3) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["boron"]
)
assert pytest.approx(3.608e-3, rel=1e-3) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["selenium"]
)
assert pytest.approx(1.0308e-1, rel=1e-3) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["foo"]
)
assert pytest.approx(0, abs=1e-8) == value(model.fs.unit.electricity[0])
@pytest.mark.component
def test_report(self, model):
model.fs.unit.report()
def test_costing():
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.params = WaterParameterBlock(default={"solute_list": ["sulfur", "toc", "tss"]})
m.fs.costing = ZeroOrderCosting()
m.fs.unit1 = BioreactorZO(
default={"property_package": m.fs.params, "database": m.db}
)
m.fs.unit1.inlet.flow_mass_comp[0, "H2O"].fix(10000)
m.fs.unit1.inlet.flow_mass_comp[0, "sulfur"].fix(1)
m.fs.unit1.inlet.flow_mass_comp[0, "toc"].fix(2)
m.fs.unit1.inlet.flow_mass_comp[0, "tss"].fix(3)
m.fs.unit1.load_parameters_from_database(use_default_removal=True)
assert degrees_of_freedom(m.fs.unit1) == 0
m.fs.unit1.costing = UnitModelCostingBlock(
default={"flowsheet_costing_block": m.fs.costing}
)
assert isinstance(m.fs.costing.bioreactor, Block)
assert isinstance(m.fs.costing.bioreactor.capital_a_parameter, Var)
assert isinstance(m.fs.costing.bioreactor.capital_b_parameter, Var)
assert isinstance(m.fs.costing.bioreactor.reference_state, Var)
assert isinstance(m.fs.unit1.costing.capital_cost, Var)
assert isinstance(m.fs.unit1.costing.capital_cost_constraint, Constraint)
assert_units_consistent(m.fs)
assert degrees_of_freedom(m.fs.unit1) == 0
assert m.fs.unit1.electricity[0] in m.fs.costing._registered_flows["electricity"]
| 35.283237 | 88 | 0.67464 |
4a23f234e6689a4396c8bb6f659efc291b7801e1 | 29,875 | py | Python | pythran/tests/test_slice.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,647 | 2015-01-13T01:45:38.000Z | 2022-03-28T01:23:41.000Z | pythran/tests/test_slice.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,116 | 2015-01-01T09:52:05.000Z | 2022-03-18T21:06:40.000Z | pythran/tests/test_slice.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 180 | 2015-02-12T02:47:28.000Z | 2022-03-14T10:28:18.000Z | """ Module to test slice implementation. """
import numpy
from pythran.typing import List, NDArray
from pythran.tests import TestEnv
class TestSlice(TestEnv):
"""
Unittest class for code using slices.
We skip tests for None step as it is equivalent to 1.
TODO : add tests for 1 == step (None as step)
"""
def test_empty_slices(self):
code = 'def empty_slices(x): return x[100:], x[100::2]'
self.run_test(code, numpy.arange(90),
empty_slices=[NDArray[int,:]])
def test_slice_combination1(self):
""" Check for "all none" combination. """
code = """
def slice_combination1(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::-4][begin:end:step],
a[::4][begin:end:step],
a[87::-4][begin:end:step],
a[1::4][begin:end:step],
a[-3::-4][begin:end:step],
a[-89::4][begin:end:step],
a[88:1:-4][begin:end:step],
a[1:88:4][begin:end:step],
a[-2:1:-4][begin:end:step],
a[-89:88:4][begin:end:step],
a[88:-88:-4][begin:end:step],
a[2:-1:4][begin:end:step],
a[-1:-88:-4][begin:end:step],
a[-88:-1:4][begin:end:step],
a[:1:-4][begin:end:step],
a[:87:4][begin:end:step],
a[:-87:-4][begin:end:step],
a[:-3:4][begin:end:step])
""".format(begin=None, end=None, step=None)
self.run_test(code, numpy.arange(90),
slice_combination1=[NDArray[int,:]])
def test_slice_combination2(self):
""" Check for positive step combination. """
code = """
def slice_combination2(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::-4][begin:end:step],
a[::4][begin:end:step],
a[87::-4][begin:end:step],
a[1::4][begin:end:step],
a[-3::-4][begin:end:step],
a[-89::4][begin:end:step],
a[88:1:-4][begin:end:step],
a[1:88:4][begin:end:step],
a[-2:1:-4][begin:end:step],
a[-89:88:4][begin:end:step],
a[88:-88:-4][begin:end:step],
a[2:-1:4][begin:end:step],
a[-1:-88:-4][begin:end:step],
a[-88:-1:4][begin:end:step],
a[:1:-4][begin:end:step],
a[:87:4][begin:end:step],
a[:-87:-4][begin:end:step],
a[:-3:4][begin:end:step])
""".format(begin=None, end=None, step=2)
self.run_test(code, numpy.arange(90),
slice_combination2=[NDArray[int, :]])
def test_slice_combination3(self):
""" Check for negative step combination. """
code = """
def slice_combination3(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step])
# Reversing values with not continuous step is not implemented
# a[::-4][begin:end:step],
# a[::4][begin:end:step],
# a[87::-4][begin:end:step],
# a[1::4][begin:end:step],
# a[-3::-4][begin:end:step],
# a[-89::4][begin:end:step],
# a[88:1:-4][begin:end:step],
# a[1:88:4][begin:end:step],
# a[-2:1:-4][begin:end:step],
# a[-89:88:4][begin:end:step],
# a[88:-88:-4][begin:end:step],
# a[2:-1:4][begin:end:step],
# a[-1:-88:-4][begin:end:step],
# a[-88:-1:4][begin:end:step],
# a[:1:-4][begin:end:step],
# a[:87:4][begin:end:step],
# a[:-87:-4][begin:end:step],
# a[:-3:4][begin:end:step])
""".format(begin=None, end=None, step=-2)
self.run_test(code, numpy.arange(90),
slice_combination3=[NDArray[int, :]])
def test_slice_combination4(self):
""" Check for pos step/no begin/pos end combination. """
code = """
def slice_combination4(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::4][begin:end:step],
a[87::-4][begin:end:step],
a[1::4][begin:end:step],
a[-3::-4][begin:end:step],
a[-89::4][begin:end:step],
a[88:1:-4][begin:end:step],
a[1:88:4][begin:end:step],
a[-2:1:-4][begin:end:step],
a[-89:88:4][begin:end:step],
a[88:-88:-4][begin:end:step],
a[2:-1:4][begin:end:step],
a[-1:-88:-4][begin:end:step],
a[-88:-1:4][begin:end:step],
a[:1:-4][begin:end:step],
a[:87:4][begin:end:step],
a[:-87:-4][begin:end:step],
a[:-3:4][begin:end:step])
""".format(begin=None, end=7, step=2)
self.run_test(code, numpy.arange(90),
slice_combination4=[NDArray[int, :]])
def test_slice_combination5(self):
""" Check for pos step/no begin/neg end combination. """
code = """
def slice_combination5(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step])
# Not implementer for negative end
# a[::4][begin:end:step],
# a[87::-4][begin:end:step],
# a[1::4][begin:end:step],
# a[-3::-4][begin:end:step],
# a[-89::4][begin:end:step],
# a[88:1:-4][begin:end:step],
# a[1:88:4][begin:end:step],
# a[-2:1:-4][begin:end:step],
# a[-89:88:4][begin:end:step],
# a[88:-88:-4][begin:end:step],
# a[2:-1:4][begin:end:step],
# a[-1:-88:-4][begin:end:step],
# a[-88:-1:4][begin:end:step],
# a[:1:-4][begin:end:step],
# a[:87:4][begin:end:step],
# a[:-87:-4][begin:end:step],
# a[:-3:4][begin:end:step])
""".format(begin=None, end=-3, step=2)
self.run_test(code, numpy.arange(90),
slice_combination5=[NDArray[int, :]])
def test_slice_combination6(self):
""" Check for pos step/pos begin/no end combination. """
code = """
def slice_combination6(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::4][begin:end:step],
a[87::-4][begin:end:step],
a[1::4][begin:end:step],
a[-3::-4][begin:end:step],
a[-89::4][begin:end:step],
a[88:1:-4][begin:end:step],
a[1:88:4][begin:end:step],
a[-2:1:-4][begin:end:step],
a[-89:88:4][begin:end:step],
a[88:-88:-4][begin:end:step],
a[2:-1:4][begin:end:step],
a[-1:-88:-4][begin:end:step],
a[-88:-1:4][begin:end:step],
a[:1:-4][begin:end:step],
a[:87:4][begin:end:step],
a[:-87:-4][begin:end:step],
a[:-3:4][begin:end:step])
""".format(begin=2, end=None, step=2)
self.run_test(code, numpy.arange(90),
slice_combination6=[NDArray[int, :]])
def test_slice_combination7(self):
""" Check for pos step/pos begin/pos end combination. """
code = """
def slice_combination7(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::4][begin:end:step],
a[87::-4][begin:end:step],
a[1::4][begin:end:step],
a[-3::-4][begin:end:step],
a[-89::4][begin:end:step],
a[88:1:-4][begin:end:step],
a[1:88:4][begin:end:step],
a[-2:1:-4][begin:end:step],
a[-89:88:4][begin:end:step],
a[88:-88:-4][begin:end:step],
a[2:-1:4][begin:end:step],
a[-1:-88:-4][begin:end:step],
a[-88:-1:4][begin:end:step],
a[:1:-4][begin:end:step],
a[:87:4][begin:end:step],
a[:-87:-4][begin:end:step],
a[:-3:4][begin:end:step])
""".format(begin=2, end=9, step=2)
self.run_test(code, numpy.arange(90),
slice_combination7=[NDArray[int, :]])
def test_slice_combination8(self):
""" Check for pos step/neg begin/no end combination. """
code = """
def slice_combination8(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step])
# Not implementer for negative begin
# a[::4][begin:end:step],
# a[87::-4][begin:end:step],
# a[1::4][begin:end:step],
# a[-3::-4][begin:end:step],
# a[-89::4][begin:end:step],
# a[88:1:-4][begin:end:step],
# a[1:88:4][begin:end:step],
# a[-2:1:-4][begin:end:step],
# a[-89:88:4][begin:end:step],
# a[88:-88:-4][begin:end:step],
# a[2:-1:4][begin:end:step],
# a[-1:-88:-4][begin:end:step],
# a[-88:-1:4][begin:end:step],
# a[:1:-4][begin:end:step],
# a[:87:4][begin:end:step],
# a[:-87:-4][begin:end:step],
# a[:-3:4][begin:end:step])
""".format(begin=-10, end=None, step=2)
self.run_test(code, numpy.arange(90),
slice_combination8=[NDArray[int, :]])
def test_step1slice_combination1(self):
""" Check for "all none" combination. """
code = """
def step1slice_combination1(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::-1][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=None, end=None, step=None)
self.run_test(code, numpy.arange(90),
step1slice_combination1=[NDArray[int, :]])
def test_step1slice_combination2(self):
""" Check for positive step combination. """
code = """
def step1slice_combination2(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::-1][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=None, end=None, step=2)
self.run_test(code, numpy.arange(90),
step1slice_combination2=[NDArray[int, :]])
def test_step1slice_combination3(self):
""" Check for negative step combination. """
code = """
def step1slice_combination3(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::-1][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-2:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=None, end=None, step=-2)
self.run_test(code, numpy.arange(90),
step1slice_combination3=[NDArray[int, :]])
def test_step1slice_combination4(self):
""" Check for pos step/no begin/pos end combination. """
code = """
def step1slice_combination4(a):
begin = {begin}
end = {end}
step = {step}
return(a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=None, end=7, step=2)
self.run_test(code, numpy.arange(90),
step1slice_combination4=[NDArray[int, :]])
def test_step1slice_combination5(self):
""" Check for pos step/no begin/neg end combination. """
code = """
def step1slice_combination5(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=None, end=-3, step=2)
self.run_test(code, numpy.arange(90),
step1slice_combination5=[NDArray[int, :]])
def test_step1slice_combination6(self):
""" Check for pos step/pos begin/no end combination. """
code = """
def step1slice_combination6(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=2, end=None, step=2)
self.run_test(code, numpy.arange(90),
step1slice_combination6=[NDArray[int, :]])
def test_step1slice_combination7(self):
""" Check for pos step/pos begin/pos end combination. """
code = """
def step1slice_combination7(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=2, end=9, step=2)
self.run_test(code, numpy.arange(90),
step1slice_combination7=[NDArray[int, :]])
def test_step1slice_combination8(self):
""" Check for pos step/neg begin/no end combination. """
code = """
def step1slice_combination8(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=-10, end=None, step=2)
self.run_test(code, numpy.arange(90),
step1slice_combination8=[NDArray[int, :]])
def test_step1slice_combination9(self):
""" Check for neg step/no begin/pos end combination. """
code = """
def step1slice_combination9(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=None, end=2, step=-2)
self.run_test(code, numpy.arange(90),
step1slice_combination9=[NDArray[int, :]])
def test_step1slice_combination10(self):
""" Check for neg step/no begin/neg end combination. """
code = """
def step1slice_combination10(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=None, end=-10, step=-2)
self.run_test(code, numpy.arange(90),
step1slice_combination10=[NDArray[int, :]])
def test_step1slice_combination11(self):
""" Check for neg step/pos begin/neg end combination. """
code = """
def step1slice_combination11(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=85, end=-10, step=-2)
self.run_test(code, numpy.arange(90),
step1slice_combination11=[NDArray[int, :]])
def test_step1slice_combination12(self):
""" Check for neg step/pos begin/no end combination. """
code = """
def step1slice_combination12(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=85, end=None, step=-2)
self.run_test(code, numpy.arange(90),
step1slice_combination12=[NDArray[int, :]])
def test_step1slice_combination13(self):
""" Check for neg step/pos begin/pos end combination. """
code = """
def step1slice_combination13(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=85, end=3, step=-2)
self.run_test(code, numpy.arange(90),
step1slice_combination13=[NDArray[int, :]])
def test_step1slice_combination14(self):
""" Check for pos step/neg begin/no end combination. """
code = """
def step1slice_combination14(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=-3, end=None, step=-2)
self.run_test(code, numpy.arange(90),
step1slice_combination14=[NDArray[int, :]])
def test_step1slice_combination15(self):
""" Check for neg step/neg begin/pos end combination. """
code = """
def step1slice_combination15(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=-3, end=4, step=-2)
self.run_test(code, numpy.arange(90),
step1slice_combination15=[NDArray[int, :]])
def test_step1slice_combination16(self):
""" Check for neg step/neg begin/neg end combination. """
code = """
def step1slice_combination16(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=-3, end=-10, step=-2)
self.run_test(code, numpy.arange(90),
step1slice_combination16=[NDArray[int, :]])
def test_step1slice_combination17(self):
""" Check for pos step/pos begin/neg end combination. """
code = """
def step1slice_combination17(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=3, end=-10, step=2)
self.run_test(code, numpy.arange(90),
step1slice_combination17=[NDArray[int, :]])
def test_step1slice_combination18(self):
""" Check for pos step/pos begin/neg end combination. """
code = """
def step1slice_combination18(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=-80, end=80, step=2)
self.run_test(code, numpy.arange(90),
step1slice_combination18=[NDArray[int, :]])
def test_step1slice_combination19(self):
""" Check for pos step/neg begin/neg end combination. """
code = """
def step1slice_combination19(a):
begin = {begin}
end = {end}
step = {step}
return (a[::][begin:end:step],
a[::1][begin:end:step],
a[87::-1][begin:end:step],
a[1::1][begin:end:step],
a[-3::-1][begin:end:step],
a[-89::1][begin:end:step],
a[88:1:-1][begin:end:step],
a[1:88:1][begin:end:step],
a[-2:1:-1][begin:end:step],
a[-89:88:1][begin:end:step],
a[88:-88:-1][begin:end:step],
a[2:-1:1][begin:end:step],
a[-1:-88:-1][begin:end:step],
a[-88:-1:1][begin:end:step],
a[:1:-1][begin:end:step],
a[:87:1][begin:end:step],
a[:-87:-1][begin:end:step],
a[:-3:1][begin:end:step])
""".format(begin=-80, end=-2, step=2)
self.run_test(code, numpy.arange(90),
step1slice_combination19=[NDArray[int, :]])
def test_slice_transpose0(self):
code = '''
import numpy as np
def slice_transpose0(n):
base = np.zeros((16, n)).T
slice1 = base[:10, 10:] # should have shape (10, 6)
return slice1'''
self.run_test(code, 16, slice_transpose0=[int])
| 36.124547 | 65 | 0.478594 |
4a23f3be5162ee5d0a402ff3967ed53d59bc11b6 | 575 | py | Python | tests/test_cli.py | buenrostrolab/parkour | 89e5a726b1606194d114c4bb61741ffb7119b9cf | [
"MIT"
] | 11 | 2017-06-01T01:40:07.000Z | 2019-08-19T22:21:58.000Z | tests/test_cli.py | buenrostrolab/parkour | 89e5a726b1606194d114c4bb61741ffb7119b9cf | [
"MIT"
] | 11 | 2017-05-29T19:14:16.000Z | 2018-11-05T02:06:09.000Z | tests/test_cli.py | buenrostrolab/parkour | 89e5a726b1606194d114c4bb61741ffb7119b9cf | [
"MIT"
] | 2 | 2017-07-20T19:45:02.000Z | 2019-09-29T15:24:59.000Z | import pytest
from click.testing import CliRunner
from parkour import cli
import md5
def file_checksums_equal(file1, file2):
with open(file1) as f:
checksum1 = md5.new(f.read()).digest()
with open(file2) as f:
checksum2 = md5.new(f.read()).digest()
return checksum1==checksum2
def test_trimmed_output():
runner = CliRunner()
result = runner.invoke(cli.main, ['-a', 'fastq/s3_1.fastq.gz', '-b', 'fastq/s3_2.fastq.gz', '-u', 'trim'])
print(result.output)
assert file_checksums_equal('p.s3_1.trim.fastq', 'correct_output/p.s3_1.trim.fastq')
| 28.75 | 107 | 0.693913 |
4a23f3f3ea316bb6888d2bfd8cba5ff7063548d6 | 715 | py | Python | pl_igscmaes/run_genps.py | Lucklyric/Lucklyric-TSInSAR-PF-IGS_CMAES | 704e45ea433de4fd1106603c9e7ae115adca1c67 | [
"Apache-2.0"
] | 1 | 2021-06-25T13:26:09.000Z | 2021-06-25T13:26:09.000Z | pl_igscmaes/run_genps.py | Lucklyric/TSInSAR-PF-IGS_CMAES | 704e45ea433de4fd1106603c9e7ae115adca1c67 | [
"Apache-2.0"
] | null | null | null | pl_igscmaes/run_genps.py | Lucklyric/TSInSAR-PF-IGS_CMAES | 704e45ea433de4fd1106603c9e7ae115adca1c67 | [
"Apache-2.0"
] | null | null | null | import hydra
from omegaconf import DictConfig, OmegaConf
import logging
from torch.utils.data import DataLoader
import pytorch_lightning as pl
import numpy as np
import json
import os
import joblib
from .data.generate_ps_v2 import generate_ps
log = logging.getLogger(__name__)
@hydra.main(config_path="config", config_name="config")
def main(cfg) -> None:
dbinfo = cfg.db
print(dbinfo)
processing_dir = cfg.processing_dir
print(cfg.output_path)
os.makedirs(processing_dir, exist_ok=True)
candidates = generate_ps(dbinfo, processing_dir)
joblib.dump(candidates, cfg.output_path)
if __name__ == "__main__":
try:
main(None)
except Exception as e:
raise e
| 21.666667 | 55 | 0.737063 |
4a23f3fe628a6fe5c021e97edfe1f2a0a5d7e9af | 5,079 | py | Python | build/x86/python/m5/internal/param_X86ACPISysDescTable.py | billionshang/gem5 | 18cc4294f32315595f865d07d1f33434e92b06b2 | [
"BSD-3-Clause"
] | null | null | null | build/x86/python/m5/internal/param_X86ACPISysDescTable.py | billionshang/gem5 | 18cc4294f32315595f865d07d1f33434e92b06b2 | [
"BSD-3-Clause"
] | null | null | null | build/x86/python/m5/internal/param_X86ACPISysDescTable.py | billionshang/gem5 | 18cc4294f32315595f865d07d1f33434e92b06b2 | [
"BSD-3-Clause"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_param_X86ACPISysDescTable', [dirname(__file__)])
except ImportError:
import _param_X86ACPISysDescTable
return _param_X86ACPISysDescTable
if fp is not None:
try:
_mod = imp.load_module('_param_X86ACPISysDescTable', fp, pathname, description)
finally:
fp.close()
return _mod
_param_X86ACPISysDescTable = swig_import_helper()
del swig_import_helper
else:
import _param_X86ACPISysDescTable
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import m5.internal.param_SimObject
import m5.internal.drain
import m5.internal.serialize
class X86ISA_COLONS_ACPI_COLONS_SysDescTable(m5.internal.param_SimObject.SimObject):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
X86ISA_COLONS_ACPI_COLONS_SysDescTable_swigregister = _param_X86ACPISysDescTable.X86ISA_COLONS_ACPI_COLONS_SysDescTable_swigregister
X86ISA_COLONS_ACPI_COLONS_SysDescTable_swigregister(X86ISA_COLONS_ACPI_COLONS_SysDescTable)
class X86ACPISysDescTableParams(m5.internal.param_SimObject.SimObjectParams):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
creator_id = _swig_property(_param_X86ACPISysDescTable.X86ACPISysDescTableParams_creator_id_get, _param_X86ACPISysDescTable.X86ACPISysDescTableParams_creator_id_set)
creator_revision = _swig_property(_param_X86ACPISysDescTable.X86ACPISysDescTableParams_creator_revision_get, _param_X86ACPISysDescTable.X86ACPISysDescTableParams_creator_revision_set)
oem_id = _swig_property(_param_X86ACPISysDescTable.X86ACPISysDescTableParams_oem_id_get, _param_X86ACPISysDescTable.X86ACPISysDescTableParams_oem_id_set)
oem_revision = _swig_property(_param_X86ACPISysDescTable.X86ACPISysDescTableParams_oem_revision_get, _param_X86ACPISysDescTable.X86ACPISysDescTableParams_oem_revision_set)
oem_table_id = _swig_property(_param_X86ACPISysDescTable.X86ACPISysDescTableParams_oem_table_id_get, _param_X86ACPISysDescTable.X86ACPISysDescTableParams_oem_table_id_set)
def __init__(self):
this = _param_X86ACPISysDescTable.new_X86ACPISysDescTableParams()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _param_X86ACPISysDescTable.delete_X86ACPISysDescTableParams
__del__ = lambda self: None
X86ACPISysDescTableParams_swigregister = _param_X86ACPISysDescTable.X86ACPISysDescTableParams_swigregister
X86ACPISysDescTableParams_swigregister(X86ACPISysDescTableParams)
| 37.345588 | 187 | 0.732034 |
4a23f432cf5f8bff2f66719b7591c29608fa7fb0 | 605 | py | Python | config/trainer_configs/save_config/faa_dense_net.py | jwspaeth/FAA-Project | afa9d3bec10deead48c4b17dff69df2e02691e41 | [
"MIT"
] | null | null | null | config/trainer_configs/save_config/faa_dense_net.py | jwspaeth/FAA-Project | afa9d3bec10deead48c4b17dff69df2e02691e41 | [
"MIT"
] | 2 | 2019-10-20T00:42:40.000Z | 2019-10-30T18:06:11.000Z | config/trainer_configs/save_config/faa_dense_net.py | jwspaeth/FAA-Project | afa9d3bec10deead48c4b17dff69df2e02691e41 | [
"MIT"
] | null | null | null | from yacs.config import CfgNode as CN
_C = CN()
# Define output parameters
_C.Output = CN()
_C.Output.batch_name = "FaaDense-test-3"
_C.Output.checkpoint_trigger = 1
# Define callback parameters
_C.Callback = CN()
_C.Callback.exists = True
_C.Callback.names = ["MyRecordingCallback", "MemoryCallback", "ResetHistoryCallback"]
_C.Callback.figwidth = 12
_C.Callback.figheight = 2
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _C.clone()
| 27.5 | 85 | 0.747107 |
4a23f4fdb7ea714c92d42e823f0e356df52f0518 | 1,029 | py | Python | MultiThread/2_immutable_pattern.py | AtsushiSakai/python_public_sandbox | ab656ef0e862cfd9e0e32a18bf55f6e75ed6b9fe | [
"MIT"
] | null | null | null | MultiThread/2_immutable_pattern.py | AtsushiSakai/python_public_sandbox | ab656ef0e862cfd9e0e32a18bf55f6e75ed6b9fe | [
"MIT"
] | null | null | null | MultiThread/2_immutable_pattern.py | AtsushiSakai/python_public_sandbox | ab656ef0e862cfd9e0e32a18bf55f6e75ed6b9fe | [
"MIT"
] | null | null | null | import threading
class Gate:
def __init__(self, name, address):
self.counter = 0
self._name = name
self._address = address
def through(self):
self.counter += 1
self.check()
@property
def name(self):
return self._name
@property
def address(self):
return self._address
def __str__(self):
return "No." + str(self.counter) + ": " + self._name + ", " + self._address
def check(self):
if self.name[0] != self.address[0]:
print("**** Broken *****", self.__str__())
class UserThread(threading.Thread):
def __init__(self, gate):
super(UserThread, self).__init__()
self.gate = gate
def run(self):
print("start")
while True:
# self.gate.name = "aaa" # error
self.gate.through()
def main():
print("Testing Gate")
gate = Gate("Alice", "Alaska")
UserThread(gate).start()
UserThread(gate).start()
UserThread(gate).start()
main() | 19.788462 | 83 | 0.555879 |
4a23f5923eef948b39fa1b57b4ff107e41ff7cc4 | 1,020,790 | py | Python | nova/tests/unit/virt/libvirt/test_driver.py | bopopescu/TestNova | fb6a183b54f87cc078dc6de5be89711ec0d9ac26 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/virt/libvirt/test_driver.py | bopopescu/TestNova | fb6a183b54f87cc078dc6de5be89711ec0d9ac26 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/virt/libvirt/test_driver.py | bopopescu/TestNova | fb6a183b54f87cc078dc6de5be89711ec0d9ac26 | [
"Apache-2.0"
] | 1 | 2020-07-22T22:13:56.000Z | 2020-07-22T22:13:56.000Z | # Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
from collections import deque
from collections import OrderedDict
import contextlib
import copy
import datetime
import errno
import glob
import os
import random
import re
import shutil
import signal
import threading
import time
import unittest
from castellan import key_manager
import ddt
import eventlet
from eventlet import greenthread
import fixtures
from lxml import etree
import mock
from mox3 import mox
from os_brick import encryptors
from os_brick import exception as brick_exception
from os_brick.initiator import connector
import os_vif
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import fileutils
from oslo_utils import fixture as utils_fixture
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_utils import versionutils
import six
from six.moves import builtins
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova.api.openstack.placement.objects import resource_provider as rp_object
from nova.compute import manager
from nova.compute import power_state
from nova.compute import provider_tree
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import context
from nova.db import api as db
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
from nova.objects import virtual_interface as obj_vif
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.fs
import nova.privsep.libvirt
from nova import rc_fields
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_diagnostics
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_diagnostics
from nova.tests.unit.objects import test_pci_device
from nova.tests.unit.objects import test_vcpu_model
from nova.tests.unit.virt.libvirt import fake_imagebackend
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.tests import uuidsentinel as uuids
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import volume as volume_drivers
CONF = nova.conf.CONF
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_NodeDevXml = \
{"pci_0000_04_00_3": """
<device>
<name>pci_0000_04_00_3</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igb</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>0</slot>
<function>3</function>
<product id='0x1521'>I350 Gigabit Network Connection</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='virt_functions'>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
</capability>
</capability>
</device>""",
"pci_0000_04_10_7": """
<device>
<name>pci_0000_04_10_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>16</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>""",
"pci_0000_04_11_7": """
<device>
<name>pci_0000_04_11_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>17</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<numa node='0'/>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>""",
"pci_0000_04_00_1": """
<device>
<name>pci_0000_04_00_1</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:04:00.1</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>mlx5_core</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>0</slot>
<function>1</function>
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
<vendor id='0x15b3'>Mellanox Technologies</vendor>
<iommuGroup number='15'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
</iommuGroup>
<numa node='0'/>
<pci-express>
<link validity='cap' port='0' speed='8' width='16'/>
<link validity='sta' speed='8' width='16'/>
</pci-express>
</capability>
</device>""",
# libvirt >= 1.3.0 nodedev-dumpxml
"pci_0000_03_00_0": """
<device>
<name>pci_0000_03_00_0</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>mlx5_core</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>3</bus>
<slot>0</slot>
<function>0</function>
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
<vendor id='0x15b3'>Mellanox Technologies</vendor>
<capability type='virt_functions' maxCount='16'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x2'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x3'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x4'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x5'/>
</capability>
<iommuGroup number='15'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
</iommuGroup>
<numa node='0'/>
<pci-express>
<link validity='cap' port='0' speed='8' width='16'/>
<link validity='sta' speed='8' width='16'/>
</pci-express>
</capability>
</device>""",
"pci_0000_03_00_1": """
<device>
<name>pci_0000_03_00_1</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.1</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>mlx5_core</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>3</bus>
<slot>0</slot>
<function>1</function>
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
<vendor id='0x15b3'>Mellanox Technologies</vendor>
<capability type='virt_functions' maxCount='16'/>
<iommuGroup number='15'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
</iommuGroup>
<numa node='0'/>
<pci-express>
<link validity='cap' port='0' speed='8' width='16'/>
<link validity='sta' speed='8' width='16'/>
</pci-express>
</capability>
</device>""",
"net_enp2s2_02_9a_a1_37_be_54": """
<device>
<name>net_enp2s2_02_9a_a1_37_be_54</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:02:02.0/net/enp2s2</path>
<parent>pci_0000_04_11_7</parent>
<capability type='net'>
<interface>enp2s2</interface>
<address>02:9a:a1:37:be:54</address>
<link state='down'/>
<feature name='rx'/>
<feature name='tx'/>
<feature name='sg'/>
<feature name='tso'/>
<feature name='gso'/>
<feature name='gro'/>
<feature name='rxvlan'/>
<feature name='txvlan'/>
<capability type='80203'/>
</capability>
</device>""",
"pci_0000_06_00_0": """
<device>
<name>pci_0000_06_00_0</name>
<path>/sys/devices/pci0000:00/0000:00:06.0</path>
<parent></parent>
<driver>
<name>nvidia</name>
</driver>
<capability type="pci">
<domain>0</domain>
<bus>10</bus>
<slot>1</slot>
<function>5</function>
<product id="0x0FFE">GRID M60-0B</product>
<vendor id="0x10DE">Nvidia</vendor>
<numa node="8"/>
<capability type='mdev_types'>
<type id='nvidia-11'>
<name>GRID M60-0B</name>
<deviceAPI>vfio-pci</deviceAPI>
<availableInstances>16</availableInstances>
</type>
</capability>
</capability>
</device>""",
"mdev_4b20d080_1b54_4048_85b3_a6a62d165c01": """
<device>
<name>mdev_4b20d080_1b54_4048_85b3_a6a62d165c01</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/4b20d080-1b54-4048-85b3-a6a62d165c01</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>vfio_mdev</name>
</driver>
<capability type='mdev'>
<type id='nvidia-11'/>
<iommuGroup number='12'/>
</capability>
</device>
""",
}
_fake_cpu_info = {
"arch": "test_arch",
"model": "test_model",
"vendor": "test_vendor",
"topology": {
"sockets": 1,
"cores": 8,
"threads": 16
},
"features": ["feature1", "feature2"]
}
eph_default_ext = utils.get_hash_str(nova.privsep.fs._DEFAULT_FILE_SYSTEM)[:7]
_fake_qemu64_cpu_feature = """
<cpu mode='custom' match='exact'>
<model fallback='forbid'>qemu64</model>
<feature policy='require' name='svm'/>
<feature policy='require' name='lm'/>
<feature policy='require' name='nx'/>
<feature policy='require' name='syscall'/>
<feature policy='require' name='cx16'/>
<feature policy='require' name='pni'/>
<feature policy='require' name='sse2'/>
<feature policy='require' name='sse'/>
<feature policy='require' name='fxsr'/>
<feature policy='require' name='mmx'/>
<feature policy='require' name='clflush'/>
<feature policy='require' name='pse36'/>
<feature policy='require' name='pat'/>
<feature policy='require' name='cmov'/>
<feature policy='require' name='mca'/>
<feature policy='require' name='pge'/>
<feature policy='require' name='mtrr'/>
<feature policy='require' name='sep'/>
<feature policy='require' name='apic'/>
<feature policy='require' name='cx8'/>
<feature policy='require' name='mce'/>
<feature policy='require' name='pae'/>
<feature policy='require' name='msr'/>
<feature policy='require' name='tsc'/>
<feature policy='require' name='pse'/>
<feature policy='require' name='de'/>
<feature policy='require' name='fpu'/>
</cpu>
"""
_fake_broadwell_cpu_feature = """
<cpu mode='custom' match='exact'>
<model fallback='forbid'>Broadwell-noTSX</model>
<vendor>Intel</vendor>
<feature policy='require' name='smap'/>
<feature policy='require' name='adx'/>
<feature policy='require' name='rdseed'/>
<feature policy='require' name='invpcid'/>
<feature policy='require' name='erms'/>
<feature policy='require' name='bmi2'/>
<feature policy='require' name='smep'/>
<feature policy='require' name='avx2'/>
<feature policy='require' name='bmi1'/>
<feature policy='require' name='fsgsbase'/>
<feature policy='require' name='3dnowprefetch'/>
<feature policy='require' name='lahf_lm'/>
<feature policy='require' name='lm'/>
<feature policy='require' name='rdtscp'/>
<feature policy='require' name='nx'/>
<feature policy='require' name='syscall'/>
<feature policy='require' name='avx'/>
<feature policy='require' name='xsave'/>
<feature policy='require' name='aes'/>
<feature policy='require' name='tsc-deadline'/>
<feature policy='require' name='popcnt'/>
<feature policy='require' name='movbe'/>
<feature policy='require' name='x2apic'/>
<feature policy='require' name='sse4.2'/>
<feature policy='require' name='sse4.1'/>
<feature policy='require' name='pcid'/>
<feature policy='require' name='cx16'/>
<feature policy='require' name='fma'/>
<feature policy='require' name='ssse3'/>
<feature policy='require' name='pclmuldq'/>
<feature policy='require' name='pni'/>
<feature policy='require' name='sse2'/>
<feature policy='require' name='sse'/>
<feature policy='require' name='fxsr'/>
<feature policy='require' name='mmx'/>
<feature policy='require' name='clflush'/>
<feature policy='require' name='pse36'/>
<feature policy='require' name='pat'/>
<feature policy='require' name='cmov'/>
<feature policy='require' name='mca'/>
<feature policy='require' name='pge'/>
<feature policy='require' name='mtrr'/>
<feature policy='require' name='sep'/>
<feature policy='require' name='apic'/>
<feature policy='require' name='cx8'/>
<feature policy='require' name='mce'/>
<feature policy='require' name='pae'/>
<feature policy='require' name='msr'/>
<feature policy='require' name='tsc'/>
<feature policy='require' name='pse'/>
<feature policy='require' name='de'/>
<feature policy='require' name='fpu'/>
</cpu>
"""
def eph_name(size):
return ('ephemeral_%(size)s_%(ext)s' %
{'size': size, 'ext': eph_default_ext})
def fake_disk_info_byname(instance, type='qcow2'):
"""Return instance_disk_info corresponding accurately to the properties of
the given Instance object. The info is returned as an OrderedDict of
name->disk_info for each disk.
:param instance: The instance we're generating fake disk_info for.
:param type: libvirt's disk type.
:return: disk_info
:rtype: OrderedDict
"""
instance_dir = os.path.join(CONF.instances_path, instance.uuid)
def instance_path(name):
return os.path.join(instance_dir, name)
disk_info = OrderedDict()
# root disk
if (instance.image_ref is not None and
instance.image_ref != uuids.fake_volume_backed_image_ref):
cache_name = imagecache.get_cache_fname(instance.image_ref)
disk_info['disk'] = {
'type': type,
'path': instance_path('disk'),
'virt_disk_size': instance.flavor.root_gb * units.Gi,
'backing_file': cache_name,
'disk_size': instance.flavor.root_gb * units.Gi,
'over_committed_disk_size': 0}
swap_mb = instance.flavor.swap
if swap_mb > 0:
disk_info['disk.swap'] = {
'type': type,
'path': instance_path('disk.swap'),
'virt_disk_size': swap_mb * units.Mi,
'backing_file': 'swap_%s' % swap_mb,
'disk_size': swap_mb * units.Mi,
'over_committed_disk_size': 0}
eph_gb = instance.flavor.ephemeral_gb
if eph_gb > 0:
disk_info['disk.local'] = {
'type': type,
'path': instance_path('disk.local'),
'virt_disk_size': eph_gb * units.Gi,
'backing_file': eph_name(eph_gb),
'disk_size': eph_gb * units.Gi,
'over_committed_disk_size': 0}
if instance.config_drive:
disk_info['disk.config'] = {
'type': 'raw',
'path': instance_path('disk.config'),
'virt_disk_size': 1024,
'backing_file': '',
'disk_size': 1024,
'over_committed_disk_size': 0}
return disk_info
def fake_diagnostics_object(with_cpus=False, with_disks=False, with_nic=False):
diag_dict = {'config_drive': False,
'driver': 'libvirt',
'hypervisor': 'kvm',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'state': 'running',
'uptime': 10}
if with_cpus:
diag_dict['cpu_details'] = []
for id, t in enumerate([15340000000, 1640000000,
3040000000, 1420000000]):
diag_dict['cpu_details'].append({'id': id, 'time': t})
if with_disks:
diag_dict['disk_details'] = []
for i in range(2):
diag_dict['disk_details'].append(
{'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0,
'errors_count': 1})
if with_nic:
diag_dict['nic_details'] = [
{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}]
return fake_diagnostics.fake_diagnostics_obj(**diag_dict)
def fake_disk_info_json(instance, type='qcow2'):
"""Return fake instance_disk_info corresponding accurately to the
properties of the given Instance object.
:param instance: The instance we're generating fake disk_info for.
:param type: libvirt's disk type.
:return: JSON representation of instance_disk_info for all disks.
:rtype: str
"""
disk_info = fake_disk_info_byname(instance, type)
return jsonutils.dumps(disk_info.values())
def get_injection_info(network_info=None, admin_pass=None, files=None):
return libvirt_driver.InjectionInfo(
network_info=network_info, admin_pass=admin_pass, files=files)
def _concurrency(signal, wait, done, target, is_block_dev=False):
signal.send()
wait.wait()
done.send()
class FakeVirtDomain(object):
def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None,
info=None):
if uuidstr is None:
uuidstr = uuids.fake
self.uuidstr = uuidstr
self.id = id
self.domname = name
self._info = info or (
[power_state.RUNNING, 2048 * units.Mi,
1234 * units.Mi, None, None])
if fake_xml:
self._fake_dom_xml = fake_xml
else:
self._fake_dom_xml = """
<domain type='kvm'>
<name>testinstance1</name>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
def name(self):
if self.domname is None:
return "fake-domain %s" % self
else:
return self.domname
def ID(self):
return self.id
def info(self):
return self._info
def create(self):
pass
def managedSave(self, *args):
pass
def createWithFlags(self, launch_flags):
pass
def XMLDesc(self, flags):
return self._fake_dom_xml
def UUIDString(self):
return self.uuidstr
def attachDeviceFlags(self, xml, flags):
pass
def attachDevice(self, xml):
pass
def detachDeviceFlags(self, xml, flags):
pass
def snapshotCreateXML(self, xml, flags):
pass
def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
pass
def blockRebase(self, disk, base, bandwidth=0, flags=0):
pass
def blockJobInfo(self, path, flags):
pass
def blockJobAbort(self, path, flags):
pass
def resume(self):
pass
def destroy(self):
pass
def fsFreeze(self, disks=None, flags=0):
pass
def fsThaw(self, disks=None, flags=0):
pass
def isActive(self):
return True
def isPersistent(self):
return True
def undefine(self):
return True
class CacheConcurrencyTestCase(test.NoDBTestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# utils.synchronized() will create the lock_path for us if it
# doesn't already exist. It will also delete it when it's done,
# which can cause race conditions with the multiple threads we
# use for tests. So, create the path here so utils.synchronized()
# won't delete it out from under one of the threads.
self.lock_path = os.path.join(CONF.instances_path, 'locks')
fileutils.ensure_tree(self.lock_path)
def fake_exists(fname):
basedir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if fname == basedir or fname == self.lock_path:
return True
return False
self.stub_out('os.path.exists', fake_exists)
self.stub_out('nova.utils.execute', lambda *a, **kw: None)
self.stub_out('nova.virt.disk.api.extend',
lambda image, size, use_cow=False: None)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def _fake_instance(self, uuid):
return objects.Instance(id=1, uuid=uuid)
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
uuid = uuids.fake
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.by_name(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.by_name(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
def test_different_fname_concurrency(self):
# Ensures that two different fname caches are concurrent.
uuid = uuids.fake
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.by_name(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.by_name(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
eventlet.sleep(0)
# Wait for thread 2 to start.
sig2.wait()
wait2.send()
tries = 0
while not done2.ready() and tries < 10:
eventlet.sleep(0)
tries += 1
try:
self.assertTrue(done2.ready())
finally:
wait1.send()
eventlet.sleep(0)
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
class FakeInvalidVolumeDriver(object):
def __init__(self, *args, **kwargs):
raise brick_exception.InvalidConnectorProtocol('oops!')
class FakeConfigGuestDisk(object):
def __init__(self, *args, **kwargs):
self.source_type = None
self.driver_cache = None
class FakeConfigGuest(object):
def __init__(self, *args, **kwargs):
self.driver_cache = None
class FakeNodeDevice(object):
def __init__(self, fakexml):
self.xml = fakexml
def XMLDesc(self, flags):
return self.xml
def _create_test_instance():
flavor = objects.Flavor(memory_mb=2048,
swap=0,
vcpu_weight=None,
root_gb=10,
id=2,
name=u'm1.small',
ephemeral_gb=20,
rxtx_factor=1.0,
flavorid=u'1',
vcpus=2,
extra_specs={})
return {
'id': 1,
'uuid': uuids.instance,
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'display_name': "Acme webserver",
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5', # m1.small
'extra_specs': {},
'system_metadata': {
'image_disk_format': 'raw'
},
'flavor': flavor,
'new_flavor': None,
'old_flavor': None,
'pci_devices': objects.PciDeviceList(),
'numa_topology': None,
'config_drive': None,
'vm_mode': None,
'kernel_id': None,
'ramdisk_id': None,
'os_type': 'linux',
'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb',
'ephemeral_key_uuid': None,
'vcpu_model': None,
'host': 'fake-host',
'task_state': None,
'trusted_certs': None
}
@ddt.ddt
class LibvirtConnTestCase(test.NoDBTestCase,
test_diagnostics.DiagnosticsComparisonMixin):
REQUIRES_LOCKING = True
_EPHEMERAL_20_DEFAULT = eph_name(20)
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
temp_dir = self.useFixture(fixtures.TempDir()).path
self.flags(instances_path=temp_dir,
firewall_driver=None)
self.flags(snapshots_directory=temp_dir, group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.flags(sysinfo_serial="hardware", group="libvirt")
# normally loaded during nova-compute startup
os_vif.initialize()
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
self.stub_out('nova.virt.disk.api.extend',
lambda image, size, use_cow=False: None)
self.stub_out('nova.virt.libvirt.imagebackend.Image.'
'resolve_driver_format',
imagebackend.Image._get_driver_format)
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.test_instance = _create_test_instance()
self.test_image_meta = {
"disk_format": "raw",
}
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self)
self.device_xml_tmpl = """
<domain type='kvm'>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='{device_path}'/>
<target bus='virtio' dev='vdb'/>
<serial>58a84f6d-3f0c-4e19-a0af-eb657b790657</serial>
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
function='0x0'/>
</disk>
</devices>
</domain>
"""
def relpath(self, path):
return os.path.relpath(path, CONF.instances_path)
def tearDown(self):
nova.tests.unit.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
def test_driver_capabilities(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr.capabilities['has_imagecache'],
'Driver capabilities for \'has_imagecache\' '
'is invalid')
self.assertTrue(drvr.capabilities['supports_evacuate'],
'Driver capabilities for \'supports_evacuate\' '
'is invalid')
self.assertFalse(drvr.capabilities['supports_migrate_to_same_host'],
'Driver capabilities for '
'\'supports_migrate_to_same_host\' is invalid')
self.assertTrue(drvr.capabilities['supports_attach_interface'],
'Driver capabilities for '
'\'supports_attach_interface\' '
'is invalid')
self.assertTrue(drvr.capabilities['supports_extend_volume'],
'Driver capabilities for '
'\'supports_extend_volume\' '
'is invalid')
self.assertFalse(drvr.requires_allocation_refresh,
'Driver does not need allocation refresh')
self.assertTrue(drvr.capabilities['supports_trusted_certs'],
'Driver capabilities for '
'\'supports_trusted_certs\' '
'is invalid')
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtDriver(libvirt is not used)."""
# A fake libvirt.virConnect
class FakeLibvirtDriver(object):
def defineXML(self, xml):
return FakeVirtDomain()
# Creating mocks
fake = FakeLibvirtDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver._conn', fake)
self.stub_out('nova.virt.libvirt.host.Host.get_connection',
lambda x: fake)
def fake_lookup(self, instance_name):
return FakeVirtDomain()
def fake_execute(self, *args, **kwargs):
open(args[-1], "a").close()
def _create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'disabled': kwargs.get('disabled', False),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0}
return objects.Service(**service_ref)
def _get_pause_flag(self, drvr, network_info, power_on=True,
vifs_already_plugged=False):
timeout = CONF.vif_plugging_timeout
events = []
if (drvr._conn_supports_start_paused and
utils.is_neutron() and
not vifs_already_plugged and
power_on and timeout):
events = drvr._get_neutron_events(network_info)
return bool(events)
def test_public_api_signatures(self):
baseinst = driver.ComputeDriver(None)
inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertPublicAPISignatures(baseinst, inst)
def test_legacy_block_device_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertFalse(drvr.need_legacy_block_device_info)
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_ok(self, mock_version):
mock_version.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_abort(self, mock_version):
mock_version.return_value = False
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1)
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_version_deprecation_warning(self, mock_warning,
mock_get_libversion):
# Skip test if there's no currently planned new min version
if (versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) ==
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_VERSION)):
self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION")
# Test that a warning is logged if the libvirt version is less than
# the next required minimum version.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': versionutils.convert_version_to_str(
versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_QEMU_VERSION) - 1)
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_qemu_version_deprecation_warning(self, mock_warning,
mock_get_libversion):
# Skip test if there's no currently planned new min version
if (versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_QEMU_VERSION) ==
versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_VERSION)):
self.skipTest("NEXT_MIN_QEMU_VERSION == MIN_QEMU_VERSION")
# Test that a warning is logged if the libvirt version is less than
# the next required minimum version.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': versionutils.convert_version_to_str(
versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_QEMU_VERSION))}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_version_ok(self, mock_warning, mock_get_libversion):
# Skip test if there's no currently planned new min version
if (versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) ==
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_VERSION)):
self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION")
# Test that a warning is not logged if the libvirt version is greater
# than or equal to NEXT_MIN_LIBVIRT_VERSION.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': versionutils.convert_version_to_str(
versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertFalse(version_arg_found)
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_QEMU_VERSION))
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_qemu_version_ok(self, mock_warning, mock_get_libversion):
# Skip test if there's no currently planned new min version
if (versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_QEMU_VERSION) ==
versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_VERSION)):
self.skipTest("NEXT_MIN_QEMU_VERSION == MIN_QEMU_VERSION")
# Test that a warning is not logged if the libvirt version is greater
# than or equal to NEXT_MIN_QEMU_VERSION.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': versionutils.convert_version_to_str(
versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_QEMU_VERSION))}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertFalse(version_arg_found)
# NOTE(sdague): python2.7 and python3.5 have different behaviors
# when it comes to comparing against the sentinel, so
# has_min_version is needed to pass python3.5.
@mock.patch.object(nova.virt.libvirt.host.Host, "has_min_version",
return_value=True)
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=mock.sentinel.qemu_version)
def test_qemu_image_version(self, mock_get_libversion, min_ver):
"""Test that init_host sets qemu image version
A sentinel is used here so that we aren't chasing this value
against minimums that get raised over time.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
self.assertEqual(images.QEMU_VERSION, mock.sentinel.qemu_version)
@mock.patch.object(fields.Architecture, "from_host",
return_value=fields.Architecture.PPC64)
def test_min_version_ppc_ok(self, mock_arch):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
@mock.patch.object(fields.Architecture, "from_host",
return_value=fields.Architecture.S390X)
def test_min_version_s390_ok(self, mock_arch):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
def test_file_backed_memory_support_called(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(drvr,
'_check_file_backed_memory_support') as mock_check_fb_support:
drvr.init_host("dummyhost")
self.assertTrue(mock_check_fb_support.called)
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_VERSION))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_FILE_BACKED_VERSION))
def test_min_version_file_backed_ok(self, mock_libv, mock_qemu):
self.flags(file_backed_memory=1024, group='libvirt')
self.flags(ram_allocation_ratio=1.0)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._check_file_backed_memory_support()
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_VERSION) - 1)
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_FILE_BACKED_VERSION))
def test_min_version_file_backed_old_libvirt(self, mock_libv, mock_qemu):
self.flags(file_backed_memory=1024, group="libvirt")
self.flags(ram_allocation_ratio=1.0)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.InternalError,
drvr._check_file_backed_memory_support)
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_VERSION))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_FILE_BACKED_VERSION) - 1)
def test_min_version_file_backed_old_qemu(self, mock_libv, mock_qemu):
self.flags(file_backed_memory=1024, group="libvirt")
self.flags(ram_allocation_ratio=1.0)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.InternalError,
drvr._check_file_backed_memory_support)
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_VERSION))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_FILE_BACKED_VERSION))
def test_min_version_file_backed_bad_ram_allocation_ratio(self, mock_libv,
mock_qemu):
self.flags(file_backed_memory=1024, group="libvirt")
self.flags(ram_allocation_ratio=1.5)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.InternalError,
drvr._check_file_backed_memory_support)
def _do_test_parse_migration_flags(self, lm_expected=None,
bm_expected=None):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._parse_migration_flags()
if lm_expected is not None:
self.assertEqual(lm_expected, drvr._live_migration_flags)
if bm_expected is not None:
self.assertEqual(bm_expected, drvr._block_migration_flags)
def test_parse_live_migration_flags_default(self):
self._do_test_parse_migration_flags(
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE))
def test_parse_live_migration_flags(self):
self._do_test_parse_migration_flags(
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE))
def test_parse_block_migration_flags_default(self):
self._do_test_parse_migration_flags(
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
def test_parse_block_migration_flags(self):
self._do_test_parse_migration_flags(
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
def test_parse_migration_flags_p2p_xen(self):
self.flags(virt_type='xen', group='libvirt')
self._do_test_parse_migration_flags(
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
def test_live_migration_tunnelled_true(self):
self.flags(live_migration_tunnelled=True, group='libvirt')
self._do_test_parse_migration_flags(
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED))
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_live_migration_permit_postcopy_true(self, host):
self.flags(live_migration_permit_post_copy=True, group='libvirt')
self._do_test_parse_migration_flags(
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY))
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_live_migration_permit_auto_converge_true(self, host):
self.flags(live_migration_permit_auto_converge=True, group='libvirt')
self._do_test_parse_migration_flags(
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE))
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_live_migration_permit_auto_converge_and_post_copy_true(self,
host):
self.flags(live_migration_permit_auto_converge=True, group='libvirt')
self.flags(live_migration_permit_post_copy=True, group='libvirt')
self._do_test_parse_migration_flags(
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY))
@mock.patch.object(host.Host, 'has_min_version', return_value=False)
def test_live_migration_auto_converge_and_post_copy_true_old_libvirt(
self, min_ver):
self.flags(live_migration_permit_auto_converge=True, group='libvirt')
self.flags(live_migration_permit_post_copy=True, group='libvirt')
self._do_test_parse_migration_flags(
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE))
min_ver.assert_called_with(
lv_ver=libvirt_driver.MIN_LIBVIRT_POSTCOPY_VERSION)
@mock.patch.object(host.Host, 'has_min_version', return_value=False)
def test_live_migration_permit_postcopy_true_old_libvirt(self, host):
self.flags(live_migration_permit_post_copy=True, group='libvirt')
self._do_test_parse_migration_flags(
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
def test_live_migration_permit_postcopy_false(self):
self._do_test_parse_migration_flags(
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
def test_live_migration_permit_autoconverge_false(self):
self._do_test_parse_migration_flags(
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with("root", "123")
@mock.patch('nova.objects.Instance.save')
@mock.patch('oslo_serialization.base64.encode_as_text')
@mock.patch('nova.api.metadata.password.convert_password')
@mock.patch('nova.crypto.ssh_encrypt_text')
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_saves_sysmeta(self, mock_get_guest,
ver, mock_image, mock_encrypt,
mock_convert, mock_encode,
mock_save):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
# Password will only be saved in sysmeta if the key_data is present
instance.key_data = 'ssh-rsa ABCFEFG'
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
mock_convert.return_value = {'password_0': 'converted-password'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with("root", "123")
mock_encrypt.assert_called_once_with(instance.key_data, '123')
mock_encode.assert_called_once_with(mock_encrypt.return_value)
mock_convert.assert_called_once_with(None, mock_encode.return_value)
self.assertEqual('converted-password',
instance.system_metadata['password_0'])
mock_save.assert_called_once_with()
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_parallels(self, mock_get_guest, ver):
self.flags(virt_type='parallels', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with("root", "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_windows(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
instance.os_type = "windows"
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with(
"Administrator", "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_image(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes",
"os_admin_user": "foo"
}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with("foo", "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_set_admin_password_bad_hyp(self, mock_svc, mock_image):
self.flags(virt_type='lxc', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.SetAdminPasswdNotSupported,
drvr.set_admin_password, instance, "123")
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_set_admin_password_guest_agent_not_running(self, mock_svc):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.QemuGuestAgentNotEnabled,
drvr.set_admin_password, instance, "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_error(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_guest.set_user_password.side_effect = (
fakelibvirt.libvirtError("error"))
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(
drvr, '_save_instance_password_if_sshkey_present') as save_p:
self.assertRaises(exception.NovaException,
drvr.set_admin_password, instance, "123")
save_p.assert_not_called()
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_error_with_unicode(
self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_guest.set_user_password.side_effect = (
fakelibvirt.libvirtError(
b"failed: \xe9\x94\x99\xe8\xaf\xaf\xe3\x80\x82"))
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.set_admin_password, instance, "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_not_implemented(
self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
not_implemented = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Guest agent disappeared while executing command",
error_code=fakelibvirt.VIR_ERR_AGENT_UNRESPONSIVE)
mock_guest.set_user_password.side_effect = not_implemented
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(NotImplementedError,
drvr.set_admin_password, instance, "123")
@mock.patch.object(objects.Service, 'save')
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable(self, mock_svc, mock_save):
# Tests disabling an enabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(False)
self.assertTrue(svc.disabled)
mock_save.assert_called_once_with()
@mock.patch.object(objects.Service, 'save')
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_enable(self, mock_svc, mock_save):
# Tests enabling a disabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=True, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(True)
# since disabled_reason is not set and not prefixed with "AUTO:",
# service should not be enabled.
mock_save.assert_not_called()
self.assertTrue(svc.disabled)
@mock.patch.object(objects.Service, 'save')
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_enable_state_enabled(self, mock_svc,
mock_save):
# Tests enabling an enabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=False, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(True)
self.assertFalse(svc.disabled)
mock_save.assert_not_called()
@mock.patch.object(objects.Service, 'save')
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable_state_disabled(self, mock_svc,
mock_save):
# Tests disabling a disabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=True, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(False)
mock_save.assert_not_called()
self.assertTrue(svc.disabled)
def test_set_host_enabled_swallows_exceptions(self):
# Tests that set_host_enabled will swallow exceptions coming from the
# db_api code so they don't break anything calling it, e.g. the
# _get_new_connection method.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(db, 'service_get_by_compute_host') as db_mock:
# Make db.service_get_by_compute_host raise NovaException; this
# is more robust than just raising ComputeHostNotFound.
db_mock.side_effect = exception.NovaException
drvr._set_host_enabled(False)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
def test_prepare_pci_device(self, mock_lookup):
pci_devices = [dict(hypervisor_name='xxx')]
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = drvr._host.get_connection()
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
drvr._prepare_pci_devices_for_use(pci_devices)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
@mock.patch.object(fakelibvirt.virNodeDevice, "dettach")
def test_prepare_pci_device_exception(self, mock_detach, mock_lookup):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = drvr._host.get_connection()
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
mock_detach.side_effect = fakelibvirt.libvirtError("xxxx")
self.assertRaises(exception.PciDevicePrepareFailed,
drvr._prepare_pci_devices_for_use, pci_devices)
@mock.patch.object(host.Host, "has_min_version", return_value=False)
def test_device_metadata(self, mock_version):
xml = """
<domain>
<name>dummy</name>
<uuid>32dfcb37-5af1-552b-357c-be8c3aa38310</uuid>
<memory>1048576</memory>
<vcpu>1</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-2.4'>hvm</type>
</os>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='qcow2'/>
<source dev='/dev/mapper/generic'/>
<target dev='sda' bus='scsi'/>
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
</disk>
<disk type='block' device='disk'>
<driver name='qemu' type='qcow2'/>
<source dev='/dev/mapper/generic-1'/>
<target dev='hda' bus='ide'/>
<address type='drive' controller='0' bus='1' target='0' unit='0'/>
</disk>
<disk type='block' device='disk'>
<driver name='qemu' type='qcow2'/>
<source dev='/dev/mapper/generic-2'/>
<target dev='hdb' bus='ide'/>
<address type='drive' controller='0' bus='1' target='1' unit='1'/>
</disk>
<disk type='block' device='disk'>
<driver name='qemu' type='qcow2'/>
<source dev='/dev/mapper/aa1'/>
<target dev='sdb' bus='usb'/>
</disk>
<disk type='block' device='disk'>
<driver name='qemu' type='qcow2'/>
<source dev='/var/lib/libvirt/images/centos'/>
<backingStore/>
<target dev='vda' bus='virtio'/>
<boot order='1'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x09'
function='0x0'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='/var/lib/libvirt/images/generic.qcow2'/>
<target dev='vdb' bus='virtio'/>
<address type='virtio-mmio'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/var/lib/libvirt/images/test.qcow2'/>
<backingStore/>
<target dev='vdc' bus='virtio'/>
<alias name='virtio-disk1'/>
<address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0000'/>
</disk>
<interface type='network'>
<mac address='52:54:00:f6:35:8f'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x0'/>
</interface>
<interface type='network'>
<mac address='51:5a:2c:a4:5e:1b'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04'
function='0x1'/>
</interface>
<interface type='network'>
<mac address='fa:16:3e:d1:28:e4'/>
<source network='default'/>
<model type='virtio'/>
<address type='virtio-mmio'/>
</interface>
<interface type='network'>
<mac address='52:54:00:14:6f:50'/>
<source network='default' bridge='virbr0'/>
<target dev='vnet0'/>
<model type='virtio'/>
<alias name='net0'/>
<address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0001'/>
</interface>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address bus="0x06" domain="0x0000" function="0x1"
slot="0x00"/>
</source>
</hostdev>
</devices>
</domain>"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
instance_ref = objects.Instance(**self.test_instance)
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sda', 'tag': "db",
'volume_id': uuids.volume_1}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/hda', 'tag': "nfvfunc1",
'volume_id': uuids.volume_2}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 3,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdb', 'tag': "nfvfunc2",
'volume_id': uuids.volume_3}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 4,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/hdb',
'volume_id': uuids.volume_4}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 5,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vda', 'tag': "nfvfunc3",
'volume_id': uuids.volume_5}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 6,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdb', 'tag': "nfvfunc4",
'volume_id': uuids.volume_6}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 7,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdc', 'tag': "nfvfunc5",
'volume_id': uuids.volume_7}),
]
)
vif = obj_vif.VirtualInterface(context=self.context)
vif.address = '52:54:00:f6:35:8f'
vif.network_id = 123
vif.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310'
vif.uuid = '12ec4b21-ef22-6c21-534b-ba3e3ab3a311'
vif.tag = 'mytag1'
vif1 = obj_vif.VirtualInterface(context=self.context)
vif1.address = '51:5a:2c:a4:5e:1b'
vif1.network_id = 123
vif1.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310'
vif1.uuid = 'abec4b21-ef22-6c21-534b-ba3e3ab3a312'
vif2 = obj_vif.VirtualInterface(context=self.context)
vif2.address = 'fa:16:3e:d1:28:e4'
vif2.network_id = 123
vif2.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310'
vif2.uuid = '645686e4-7086-4eab-8c2f-c41f017a1b16'
vif2.tag = 'mytag2'
vif3 = obj_vif.VirtualInterface(context=self.context)
vif3.address = '52:54:00:14:6f:50'
vif3.network_id = 123
vif3.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310'
vif3.uuid = '99cc3604-782d-4a32-a27c-bc33ac56ce86'
vif3.tag = 'mytag3'
vif4 = obj_vif.VirtualInterface(context=self.context)
vif4.address = 'da:d1:f2:91:95:c1'
vif4.tag = 'pf_tag'
vifs = [vif, vif1, vif2, vif3, vif4]
network_info = _fake_network_info(self, 4)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT_PHYSICAL
network_info[0]['address'] = "51:5a:2c:a4:5e:1b"
network_info[0]['details'] = dict(vlan='2145')
network_info[0]['profile'] = dict(trusted='true')
instance_ref.info_cache = objects.InstanceInfoCache(
network_info=network_info)
with test.nested(
mock.patch('nova.objects.VirtualInterfaceList'
'.get_by_instance_uuid', return_value=vifs),
mock.patch('nova.objects.BlockDeviceMappingList'
'.get_by_instance_uuid', return_value=bdms),
mock.patch('nova.virt.libvirt.host.Host.get_guest',
return_value=guest),
mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc',
return_value=xml),
mock.patch.object(pci_utils, 'get_mac_by_pci_address',
return_value='da:d1:f2:91:95:c1')):
metadata_obj = drvr._build_device_metadata(self.context,
instance_ref)
metadata = metadata_obj.devices
self.assertEqual(11, len(metadata))
self.assertIsInstance(metadata[0],
objects.DiskMetadata)
self.assertIsInstance(metadata[0].bus,
objects.SCSIDeviceBus)
self.assertEqual(['db'], metadata[0].tags)
self.assertEqual(uuids.volume_1, metadata[0].serial)
self.assertFalse(metadata[0].bus.obj_attr_is_set('address'))
self.assertEqual(['nfvfunc1'], metadata[1].tags)
self.assertEqual(uuids.volume_2, metadata[1].serial)
self.assertIsInstance(metadata[1],
objects.DiskMetadata)
self.assertIsInstance(metadata[1].bus,
objects.IDEDeviceBus)
self.assertEqual(['nfvfunc1'], metadata[1].tags)
self.assertFalse(metadata[1].bus.obj_attr_is_set('address'))
self.assertIsInstance(metadata[2],
objects.DiskMetadata)
self.assertIsInstance(metadata[2].bus,
objects.USBDeviceBus)
self.assertEqual(['nfvfunc2'], metadata[2].tags)
self.assertEqual(uuids.volume_3, metadata[2].serial)
self.assertFalse(metadata[2].bus.obj_attr_is_set('address'))
self.assertIsInstance(metadata[3],
objects.DiskMetadata)
self.assertIsInstance(metadata[3].bus,
objects.PCIDeviceBus)
self.assertEqual(['nfvfunc3'], metadata[3].tags)
# NOTE(artom) We're not checking volume 4 because it's not tagged
# and only tagged devices appear in the metadata
self.assertEqual(uuids.volume_5, metadata[3].serial)
self.assertEqual('0000:00:09.0', metadata[3].bus.address)
self.assertIsInstance(metadata[4],
objects.DiskMetadata)
self.assertEqual(['nfvfunc4'], metadata[4].tags)
self.assertEqual(uuids.volume_6, metadata[4].serial)
self.assertIsInstance(metadata[5],
objects.DiskMetadata)
self.assertEqual(['nfvfunc5'], metadata[5].tags)
self.assertEqual(uuids.volume_7, metadata[5].serial)
self.assertIsInstance(metadata[6],
objects.NetworkInterfaceMetadata)
self.assertIsInstance(metadata[6].bus,
objects.PCIDeviceBus)
self.assertEqual(['mytag1'], metadata[6].tags)
self.assertEqual('0000:00:03.0', metadata[6].bus.address)
self.assertFalse(metadata[6].vf_trusted)
# Make sure that interface with vlan is exposed to the metadata
self.assertIsInstance(metadata[7],
objects.NetworkInterfaceMetadata)
self.assertEqual('51:5a:2c:a4:5e:1b', metadata[7].mac)
self.assertEqual(2145, metadata[7].vlan)
self.assertTrue(metadata[7].vf_trusted)
self.assertIsInstance(metadata[8],
objects.NetworkInterfaceMetadata)
self.assertEqual(['mytag2'], metadata[8].tags)
self.assertFalse(metadata[8].vf_trusted)
self.assertIsInstance(metadata[9],
objects.NetworkInterfaceMetadata)
self.assertEqual(['mytag3'], metadata[9].tags)
self.assertFalse(metadata[9].vf_trusted)
self.assertIsInstance(metadata[10],
objects.NetworkInterfaceMetadata)
self.assertEqual(['pf_tag'], metadata[10].tags)
self.assertEqual('da:d1:f2:91:95:c1', metadata[10].mac)
self.assertEqual('0000:06:00.1', metadata[10].bus.address)
@mock.patch.object(host.Host, 'get_connection')
@mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc')
def test_detach_pci_devices(self, mocked_get_xml_desc, mock_conn):
fake_domXML1_with_pci = (
"""<domain> <devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='xxx'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</disk>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0001"
bus="0x04"/>
</source>
</hostdev></devices></domain>""")
fake_domXML1_without_pci = (
"""<domain> <devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='xxx'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0001' bus='0x00'
slot='0x04' function='0x0'/>
</disk></devices></domain>""")
pci_device_info = {'compute_node_id': 1,
'instance_uuid': 'uuid',
'address': '0001:04:10.1'}
pci_device = objects.PciDevice(**pci_device_info)
pci_devices = [pci_device]
mocked_get_xml_desc.return_value = fake_domXML1_without_pci
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
dom = fakelibvirt.Domain(
drvr._get_connection(), fake_domXML1_with_pci, False)
guest = libvirt_guest.Guest(dom)
drvr._detach_pci_devices(guest, pci_devices)
@mock.patch.object(host.Host, 'get_connection')
@mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc')
def test_detach_pci_devices_timeout(self, mocked_get_xml_desc, mock_conn):
fake_domXML1_with_pci = (
"""<domain> <devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='xxx'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</disk>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0001"
bus="0x04"/>
</source>
</hostdev></devices></domain>""")
pci_device_info = {'compute_node_id': 1,
'instance_uuid': 'uuid',
'address': '0001:04:10.1'}
pci_device = objects.PciDevice(**pci_device_info)
pci_devices = [pci_device]
mocked_get_xml_desc.return_value = fake_domXML1_with_pci
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
dom = fakelibvirt.Domain(
drvr._get_connection(), fake_domXML1_with_pci, False)
guest = libvirt_guest.Guest(dom)
self.assertRaises(exception.PciDeviceDetachFailed,
drvr._detach_pci_devices, guest, pci_devices)
@mock.patch.object(connector, 'get_connector_properties')
def test_get_connector(self, fake_get_connector):
initiator = 'fake.initiator.iqn'
ip = 'fakeip'
host = 'fakehost'
wwpns = ['100010604b019419']
wwnns = ['200010604b019419']
self.flags(my_ip=ip)
self.flags(host=host)
expected = {
'ip': ip,
'initiator': initiator,
'host': host,
'wwpns': wwpns,
'wwnns': wwnns
}
volume = {
'id': 'fake'
}
# TODO(walter-boring) add the fake in os-brick
fake_get_connector.return_value = expected
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
result = drvr.get_volume_connector(volume)
self.assertThat(expected, matchers.DictMatches(result))
@mock.patch.object(connector, 'get_connector_properties')
def test_get_connector_storage_ip(self, fake_get_connector):
ip = '100.100.100.100'
storage_ip = '101.101.101.101'
self.flags(my_block_storage_ip=storage_ip, my_ip=ip)
volume = {
'id': 'fake'
}
expected = {
'ip': storage_ip
}
# TODO(walter-boring) add the fake in os-brick
fake_get_connector.return_value = expected
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
result = drvr.get_volume_connector(volume)
self.assertEqual(storage_ip, result['ip'])
def test_lifecycle_event_registration(self):
calls = []
def fake_registerErrorHandler(*args, **kwargs):
calls.append('fake_registerErrorHandler')
def fake_get_host_capabilities(**args):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = fields.Architecture.ARMV7
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
calls.append('fake_get_host_capabilities')
return caps
@mock.patch.object(fakelibvirt, 'registerErrorHandler',
side_effect=fake_registerErrorHandler)
@mock.patch.object(host.Host, "get_capabilities",
side_effect=fake_get_host_capabilities)
def test_init_host(get_host_capabilities, register_error_handler):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("test_host")
test_init_host()
# NOTE(dkliban): Will fail if get_host_capabilities is called before
# registerErrorHandler
self.assertEqual(['fake_registerErrorHandler',
'fake_get_host_capabilities'], calls)
def test_sanitize_log_to_xml(self):
# setup fake data
data = {'auth_password': 'scrubme'}
bdm = [{'connection_info': {'data': data}}]
bdi = {'block_device_mapping': bdm}
# Tests that the parameters to the _get_guest_xml method
# are sanitized for passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.assertNotIn('scrubme', args[0])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = mock.Mock()
with test.nested(
mock.patch.object(libvirt_driver.LOG, 'debug',
side_effect=fake_debug),
mock.patch.object(drvr, '_get_guest_config', return_value=conf)
) as (
debug_mock, conf_mock
):
drvr._get_guest_xml(self.context, self.test_instance,
network_info={}, disk_info={},
image_meta={}, block_device_info=bdi)
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
@mock.patch.object(time, "time")
def test_get_guest_config(self, time_mock):
time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
test_instance = copy.deepcopy(self.test_instance)
test_instance["display_name"] = "purple tomatoes"
test_instance['system_metadata']['owner_project_name'] = 'sweetshop'
test_instance['system_metadata']['owner_user_name'] = 'cupcake'
ctxt = context.RequestContext(project_id=123,
project_name="aubergine",
user_id=456,
user_name="pie")
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
instance_ref = objects.Instance(**test_instance)
instance_ref.flavor = flavor
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info,
context=ctxt)
self.assertEqual(cfg.uuid, instance_ref["uuid"])
self.assertEqual(2, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertEqual(cfg.memory, 6 * units.Ki)
self.assertEqual(cfg.vcpus, 28)
self.assertEqual(cfg.os_type, fields.VMMode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(len(cfg.metadata), 1)
self.assertIsInstance(cfg.metadata[0],
vconfig.LibvirtConfigGuestMetaNovaInstance)
self.assertEqual(version.version_string_with_package(),
cfg.metadata[0].package)
self.assertEqual("purple tomatoes",
cfg.metadata[0].name)
self.assertEqual(1234567.89,
cfg.metadata[0].creationTime)
self.assertEqual("image",
cfg.metadata[0].roottype)
self.assertEqual(str(instance_ref["image_ref"]),
cfg.metadata[0].rootid)
self.assertIsInstance(cfg.metadata[0].owner,
vconfig.LibvirtConfigGuestMetaNovaOwner)
self.assertEqual("838a72b0-0d54-4827-8fd6-fb1227633ceb",
cfg.metadata[0].owner.userid)
self.assertEqual("cupcake",
cfg.metadata[0].owner.username)
self.assertEqual("fake",
cfg.metadata[0].owner.projectid)
self.assertEqual("sweetshop",
cfg.metadata[0].owner.projectname)
self.assertIsInstance(cfg.metadata[0].flavor,
vconfig.LibvirtConfigGuestMetaNovaFlavor)
self.assertEqual("m1.small",
cfg.metadata[0].flavor.name)
self.assertEqual(6,
cfg.metadata[0].flavor.memory)
self.assertEqual(28,
cfg.metadata[0].flavor.vcpus)
self.assertEqual(496,
cfg.metadata[0].flavor.disk)
self.assertEqual(8128,
cfg.metadata[0].flavor.ephemeral)
self.assertEqual(33550336,
cfg.metadata[0].flavor.swap)
def test_get_guest_config_q35(self):
self.flags(virt_type="kvm",
group='libvirt')
TEST_AMOUNT_OF_PCIE_SLOTS = 8
CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_machine_type":
"pc-q35-test"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
num_ports = 0
for device in cfg.devices:
try:
if (device.root_name == 'controller' and
device.model == 'pcie-root-port'):
num_ports += 1
except AttributeError:
pass
self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
def test_get_guest_config_pcie_i440fx(self):
self.flags(virt_type="kvm",
group='libvirt')
TEST_AMOUNT_OF_PCIE_SLOTS = 8
CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_machine_type":
"pc-i440fx-test"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
num_ports = 0
for device in cfg.devices:
try:
if (device.root_name == 'controller' and
device.model == 'pcie-root-port'):
num_ports += 1
except AttributeError:
pass
# i440fx is not pcie machine so there should be no pcie ports
self.assertEqual(0, num_ports)
def test_get_guest_config_missing_ownership_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
test_instance = copy.deepcopy(self.test_instance)
ctxt = context.RequestContext(project_id=123,
project_name="aubergine",
user_id=456,
user_name="pie")
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
instance_ref = objects.Instance(**test_instance)
instance_ref.flavor = flavor
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info,
context=ctxt)
self.assertEqual("N/A",
cfg.metadata[0].owner.username)
self.assertEqual("N/A",
cfg.metadata[0].owner.projectname)
def test_get_guest_config_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus)
self.assertEqual(fields.VMMode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertEqual("console=tty0 console=ttyS0 console=hvc0",
cfg.os_cmdline)
self.assertIsNone(cfg.os_root)
self.assertEqual(3, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_lxc_with_id_maps(self):
self.flags(virt_type='lxc', group='libvirt')
self.flags(uid_maps=['0:1000:100'], group='libvirt')
self.flags(gid_maps=['0:1000:100'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
self.assertEqual(instance_ref.vcpus, cfg.vcpus)
self.assertEqual(fields.VMMode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertEqual("console=tty0 console=ttyS0 console=hvc0",
cfg.os_cmdline)
self.assertIsNone(cfg.os_root)
self.assertEqual(3, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
self.assertEqual(len(cfg.idmaps), 2)
self.assertIsInstance(cfg.idmaps[0],
vconfig.LibvirtConfigGuestUIDMap)
self.assertIsInstance(cfg.idmaps[1],
vconfig.LibvirtConfigGuestGIDMap)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_numa_host_instance_fits(self, is_able):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_numa_host_instance_no_fit(self, is_able):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(random, 'choice'),
mock.patch.object(drvr, '_has_numa_support',
return_value=False)
) as (get_host_cap_mock,
get_vcpu_pin_set_mock, choice_mock,
_has_numa_support_mock):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def _test_get_guest_memory_backing_config(
self, host_topology, inst_topology, numatune):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(
drvr, "_get_host_numa_topology",
return_value=host_topology):
return drvr._get_guest_memory_backing_config(
inst_topology, numatune, {})
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_get_guest_memory_backing_config_large_success(self, mock_version):
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), siblings=[set([1])], memory=1024,
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
result = self._test_get_guest_memory_backing_config(
host_topology, inst_topology, numa_tune)
self.assertEqual(1, len(result.hugepages))
self.assertEqual(2048, result.hugepages[0].size_kb)
self.assertEqual([0], result.hugepages[0].nodeset)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_get_guest_memory_backing_config_smallest(self, mock_version):
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), siblings=[set([1])], memory=1024,
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
result = self._test_get_guest_memory_backing_config(
host_topology, inst_topology, numa_tune)
self.assertIsNone(result)
def test_get_guest_memory_backing_config_realtime(self):
flavor = {"extra_specs": {
"hw:cpu_realtime": "yes",
"hw:cpu_policy": "dedicated"
}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
membacking = drvr._get_guest_memory_backing_config(
None, None, flavor)
self.assertTrue(membacking.locked)
self.assertFalse(membacking.sharedpages)
def test_get_guest_memory_backing_config_file_backed(self):
self.flags(file_backed_memory=1024, group="libvirt")
result = self._test_get_guest_memory_backing_config(
None, None, None
)
self.assertTrue(result.sharedaccess)
self.assertTrue(result.filesource)
self.assertTrue(result.allocateimmediate)
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_guest_memory_backing_config_file_backed_discard(self,
mock_lib_version, mock_version):
self.flags(file_backed_memory=1024, group='libvirt')
mock_lib_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION)
mock_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION)
result = self._test_get_guest_memory_backing_config(
None, None, None
)
self.assertTrue(result.discard)
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_guest_memory_backing_config_file_backed_discard_libvirt(self,
mock_lib_version, mock_version):
self.flags(file_backed_memory=1024, group='libvirt')
mock_lib_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION) - 1
mock_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION)
result = self._test_get_guest_memory_backing_config(
None, None, None
)
self.assertFalse(result.discard)
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_guest_memory_backing_config_file_backed_discard_qemu(self,
mock_lib_version, mock_version):
self.flags(file_backed_memory=1024, group='libvirt')
mock_lib_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION)
mock_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION) - 1
result = self._test_get_guest_memory_backing_config(
None, None, None
)
self.assertFalse(result.discard)
def test_get_guest_memory_backing_config_file_backed_hugepages(self):
self.flags(file_backed_memory=1024, group="libvirt")
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), siblings=[set([1])], memory=1024,
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
self.assertRaises(exception.MemoryPagesUnsupported,
self._test_get_guest_memory_backing_config,
host_topology, inst_topology, numa_tune)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_numa_host_instance_pci_no_numa_info(
self, is_able):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
request_id=None,
extra_info={},
numa_node=None)
pci_device = objects.PciDevice(**pci_device_info)
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
mock.patch.object(pci_manager, "get_instance_pci_devs",
return_value=[pci_device])):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_numa_host_instance_2pci_no_fit(self, is_able):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
request_id=None,
extra_info={},
numa_node=1)
pci_device = objects.PciDevice(**pci_device_info)
pci_device_info.update(numa_node=0, address='0000:00:00.2')
pci_device2 = objects.PciDevice(**pci_device_info)
with test.nested(
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(random, 'choice'),
mock.patch.object(pci_manager, "get_instance_pci_devs",
return_value=[pci_device, pci_device2]),
mock.patch.object(conn, '_has_numa_support',
return_value=False)
) as (get_host_cap_mock,
get_vcpu_pin_set_mock, choice_mock, pci_mock,
_has_numa_support_mock):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
@mock.patch.object(host.Host, 'get_capabilities')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled')
def _test_get_guest_config_numa_unsupported(self, fake_lib_version,
fake_version, fake_type,
fake_arch, exception_class,
pagesize, mock_host,
mock_caps, mock_lib_version,
mock_version, mock_type):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]),
memory=1024, pagesize=pagesize)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fake_arch
caps.host.topology = fakelibvirt.NUMATopology()
mock_type.return_value = fake_type
mock_version.return_value = fake_version
mock_lib_version.return_value = fake_lib_version
mock_caps.return_value = caps
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(exception_class,
drvr._get_guest_config,
instance_ref, [],
image_meta, disk_info)
def test_get_guest_config_numa_other_arch_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_VERSION),
versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_VERSION),
host.HV_DRIVER_QEMU,
fields.Architecture.S390,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_xen(self):
self.flags(virt_type='xen', group='libvirt')
self._test_get_guest_config_numa_unsupported(
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_VERSION),
versionutils.convert_version_to_int((4, 5, 0)),
'XEN',
fields.Architecture.X86_64,
exception.NUMATopologyUnsupported,
None)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(
self, is_able):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology(kb_mem=4194304)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([2, 3])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8)))
) as (has_min_version_mock, get_host_cap_mock,
get_vcpu_pin_set_mock, get_online_cpus_mock):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
# NOTE(ndipanov): we make sure that pin_set was taken into account
# when choosing viable cells
self.assertEqual(set([2, 3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_non_numa_host_instance_topo(self, is_able):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([2]), memory=1024)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.numatune)
self.assertIsNotNone(cfg.cpu.numa)
for instance_cell, numa_cfg_cell in zip(
instance_topology.cells, cfg.cpu.numa.cells):
self.assertEqual(instance_cell.id, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_numa_host_instance_topo(self, is_able):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024, pagesize=None),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]), memory=1024,
pagesize=None)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([2, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
for instance_cell, numa_cfg_cell, index in zip(
instance_topology.cells,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode, index in zip(
instance_topology.cells,
cfg.numatune.memnodes,
range(len(instance_topology.cells))):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_reordered(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset)
for index, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells,
cfg.cpu.numa.cells)):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertIsNone(numa_cfg_cell.memAccess)
allnodes = set([cell.id for cell in instance_topology.cells])
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
self.assertEqual("strict", cfg.numatune.memory.mode)
for index, (instance_cell, memnode) in enumerate(zip(
instance_topology.cells,
cfg.numatune.memnodes)):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024,
cpu_pinning={0: 24, 1: 25}),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024,
cpu_pinning={2: 0, 3: 1})])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology(
sockets_per_cell=4, cores_per_socket=3, threads_per_core=2)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
# Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([0, 1, 24, 25]),
cfg.cputune.emulatorpin.cpuset)
for i, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells, cfg.cpu.numa.cells)):
self.assertEqual(i, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertIsNone(numa_cfg_cell.memAccess)
allnodes = set([cell.id for cell in instance_topology.cells])
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
self.assertEqual("strict", cfg.numatune.memory.mode)
for i, (instance_cell, memnode) in enumerate(zip(
instance_topology.cells, cfg.numatune.memnodes)):
self.assertEqual(i, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_mempages_shared(self):
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=2048)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
for i, cell in enumerate(caps.host.topology.cells):
cell.mempages = fakelibvirt.create_mempages(
[(4, 1024 * i), (2048, i)])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([2, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for instance_cell, numa_cfg_cell, index in zip(
instance_topology.cells,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertEqual("shared", numa_cfg_cell.memAccess)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode, index in zip(
instance_topology.cells,
cfg.numatune.memnodes,
range(len(instance_topology.cells))):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
self.assertEqual(0, len(cfg.cputune.vcpusched))
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(self):
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=2, cpuset=set([0, 1]),
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=3, cpuset=set([2, 3]),
memory=1024, pagesize=2048)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={
"hw:cpu_realtime": "yes",
"hw:cpu_policy": "dedicated",
"hw:cpu_realtime_mask": "^0-1"
})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
for i, cell in enumerate(caps.host.topology.cells):
cell.mempages = fakelibvirt.create_mempages(
[(4, 1024 * i), (2048, i)])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([4, 5, 6, 7])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for instance_cell, numa_cfg_cell, index in zip(
instance_topology.cells,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertEqual("shared", numa_cfg_cell.memAccess)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode, index in zip(
instance_topology.cells,
cfg.numatune.memnodes,
range(len(instance_topology.cells))):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
self.assertEqual(1, len(cfg.cputune.vcpusched))
self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler)
# Ensure vCPUs 0-1 are pinned on host CPUs 4-5 and 2-3 are
# set on host CPUs 6-7 according the realtime mask ^0-1
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[3].cpuset)
# We ensure that emulator threads are pinned on host CPUs
# 4-5 which are "normal" vCPUs
self.assertEqual(set([4, 5]), cfg.cputune.emulatorpin.cpuset)
# We ensure that the vCPUs RT are 2-3 set to the host CPUs
# which are 6, 7
self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus)
def test_get_guest_config_numa_host_instance_isolated_emulthreads(self):
instance_topology = objects.InstanceNUMATopology(
emulator_threads_policy=(
fields.CPUEmulatorThreadsPolicy.ISOLATE),
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]),
memory=1024, pagesize=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={0: 4, 1: 5},
cpuset_reserved=set([6])),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]),
memory=1024, pagesize=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={2: 7, 3: 8})])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = fakelibvirt.NUMATopology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref, image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([4, 5, 6, 7, 8])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(10))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(set([6]), cfg.cputune.emulatorpin.cpuset)
self.assertEqual(set([4]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(set([5]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(set([7]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(set([8]), cfg.cputune.vcpupin[3].cpuset)
def test_get_guest_config_numa_host_instance_shared_emulthreads_err(
self):
self.flags(cpu_shared_set="48-50", group="compute")
instance_topology = objects.InstanceNUMATopology(
emulator_threads_policy=(
fields.CPUEmulatorThreadsPolicy.SHARE),
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]),
memory=1024, pagesize=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={0: 4, 1: 5},
cpuset_reserved=set([6])),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]),
memory=1024, pagesize=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={2: 7, 3: 8})])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = fakelibvirt.NUMATopology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref, image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([4, 5, 6, 7, 8])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(10))),
):
# pCPUs [48-50] are not online
self.assertRaises(exception.Invalid, drvr._get_guest_config,
instance_ref, [], image_meta, disk_info)
def test_get_guest_config_numa_host_instance_shared_emulator_threads(
self):
self.flags(cpu_shared_set="48-50", group="compute")
instance_topology = objects.InstanceNUMATopology(
emulator_threads_policy=(
fields.CPUEmulatorThreadsPolicy.SHARE),
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]),
memory=1024, pagesize=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={0: 4, 1: 5},
cpuset_reserved=set([6])),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]),
memory=1024, pagesize=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_pinning={2: 7, 3: 8})])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = fakelibvirt.NUMATopology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref, image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([4, 5, 6, 7, 8])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(list(range(10)) +
[48, 50])),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
# cpu_shared_set is configured with [48, 49, 50] but only
# [48, 50] are online.
self.assertEqual(set([48, 50]), cfg.cputune.emulatorpin.cpuset)
self.assertEqual(set([4]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(set([5]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(set([7]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(set([8]), cfg.cputune.vcpupin[3].cpuset)
def test_get_cpu_numa_config_from_instance(self):
topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128),
])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(topology, True)
self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA)
self.assertEqual(0, conf.cells[0].id)
self.assertEqual(set([1, 2]), conf.cells[0].cpus)
self.assertEqual(131072, conf.cells[0].memory)
self.assertEqual("shared", conf.cells[0].memAccess)
self.assertEqual(1, conf.cells[1].id)
self.assertEqual(set([3, 4]), conf.cells[1].cpus)
self.assertEqual(131072, conf.cells[1].memory)
self.assertEqual("shared", conf.cells[1].memAccess)
def test_get_cpu_numa_config_from_instance_none(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(None, False)
self.assertIsNone(conf)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support",
return_value=True)
def test_get_memnode_numa_config_from_instance(self, mock_numa):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128),
objects.InstanceNUMACell(id=16, cpuset=set([5, 6]), memory=128)
])
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=0, cpuset=set([1, 2]), siblings=[set([1]), set([2])],
memory=1024, mempages=[]),
objects.NUMACell(
id=1, cpuset=set([3, 4]), siblings=[set([3]), set([4])],
memory=1024, mempages=[]),
objects.NUMACell(
id=16, cpuset=set([5, 6]), siblings=[set([5]), set([6])],
memory=1024, mempages=[])])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with test.nested(
mock.patch.object(drvr, "_get_host_numa_topology",
return_value=host_topology)):
guest_numa_config = drvr._get_guest_numa_config(instance_topology,
flavor={}, allowed_cpus=[1, 2, 3, 4, 5, 6], image_meta={})
self.assertEqual(2, guest_numa_config.numatune.memnodes[2].cellid)
self.assertEqual([16],
guest_numa_config.numatune.memnodes[2].nodeset)
self.assertEqual(set([5, 6]),
guest_numa_config.numaconfig.cells[2].cpus)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support",
return_value=True)
@mock.patch.object(host.Host, "get_capabilities")
def test_does_not_want_hugepages(self, mock_caps, mock_numa):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=4),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=4)])
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
mock_caps.return_value = caps
host_topology = drvr._get_host_numa_topology()
self.assertFalse(drvr._wants_hugepages(None, None))
self.assertFalse(drvr._wants_hugepages(host_topology, None))
self.assertFalse(drvr._wants_hugepages(None, instance_topology))
self.assertFalse(drvr._wants_hugepages(host_topology,
instance_topology))
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support",
return_value=True)
@mock.patch.object(host.Host, "get_capabilities")
def test_does_want_hugepages(self, mock_caps, mock_numa):
for arch in [fields.Architecture.I686,
fields.Architecture.X86_64,
fields.Architecture.AARCH64,
fields.Architecture.PPC64LE,
fields.Architecture.PPC64]:
self._test_does_want_hugepages(mock_caps, mock_numa, arch)
def _test_does_want_hugepages(self, mock_caps, mock_numa, architecture):
self.flags(reserved_huge_pages=[
{'node': 0, 'size': 2048, 'count': 128},
{'node': 1, 'size': 2048, 'count': 1},
{'node': 3, 'size': 2048, 'count': 64}])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=2048)])
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = architecture
caps.host.topology = fakelibvirt.NUMATopology()
for i, cell in enumerate(caps.host.topology.cells):
cell.mempages = fakelibvirt.create_mempages(
[(4, 1024 * i), (2048, i)])
mock_caps.return_value = caps
host_topology = drvr._get_host_numa_topology()
self.assertEqual(128, host_topology.cells[0].mempages[1].reserved)
self.assertEqual(1, host_topology.cells[1].mempages[1].reserved)
self.assertEqual(0, host_topology.cells[2].mempages[1].reserved)
self.assertEqual(64, host_topology.cells[3].mempages[1].reserved)
self.assertTrue(drvr._wants_hugepages(host_topology,
instance_topology))
def test_get_guest_config_clock(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
hpet_map = {
fields.Architecture.X86_64: True,
fields.Architecture.I686: True,
fields.Architecture.PPC: False,
fields.Architecture.PPC64: False,
fields.Architecture.ARMV7: False,
fields.Architecture.AARCH64: False,
}
for guestarch, expect_hpet in hpet_map.items():
with mock.patch.object(libvirt_driver.libvirt_utils,
'get_arch',
return_value=guestarch):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta,
disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "utc")
self.assertIsInstance(cfg.clock.timers[0],
vconfig.LibvirtConfigGuestTimer)
self.assertIsInstance(cfg.clock.timers[1],
vconfig.LibvirtConfigGuestTimer)
self.assertEqual(cfg.clock.timers[0].name, "pit")
self.assertEqual(cfg.clock.timers[0].tickpolicy,
"delay")
self.assertEqual(cfg.clock.timers[1].name, "rtc")
self.assertEqual(cfg.clock.timers[1].tickpolicy,
"catchup")
if expect_hpet:
self.assertEqual(3, len(cfg.clock.timers))
self.assertIsInstance(cfg.clock.timers[2],
vconfig.LibvirtConfigGuestTimer)
self.assertEqual('hpet', cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
else:
self.assertEqual(2, len(cfg.clock.timers))
@mock.patch.object(libvirt_utils, 'get_arch')
def test_get_guest_config_windows_timer(self, mock_get_arch):
mock_get_arch.return_value = fields.Architecture.I686
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(4, len(cfg.clock.timers), cfg.clock.timers)
self.assertEqual("pit", cfg.clock.timers[0].name)
self.assertEqual("rtc", cfg.clock.timers[1].name)
self.assertEqual("hpet", cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
self.assertEqual("hypervclock", cfg.clock.timers[3].name)
self.assertTrue(cfg.clock.timers[3].present)
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_hyperv_feature2(self, mock_version):
mock_version.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
self.assertTrue(cfg.features[2].relaxed)
self.assertTrue(cfg.features[2].spinlocks)
self.assertEqual(8191, cfg.features[2].spinlock_retries)
self.assertTrue(cfg.features[2].vapic)
def test_get_guest_config_with_two_nics(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 2),
image_meta, disk_info)
self.assertEqual(2, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertEqual(cfg.memory, instance_ref.flavor.memory_mb * units.Ki)
self.assertEqual(cfg.vcpus, instance_ref.flavor.vcpus)
self.assertEqual(cfg.os_type, fields.VMMode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
def test_get_guest_config_bug_1118829(self):
self.flags(virt_type='uml', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
disk_info = {'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {u'vda': {'bus': 'virtio',
'type': 'disk',
'dev': u'vda'},
'root': {'bus': 'virtio',
'type': 'disk',
'dev': 'vda'}}}
# NOTE(jdg): For this specific test leave this blank
# This will exercise the failed code path still,
# and won't require fakes and stubs of the iscsi discovery
block_device_info = {}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
drvr._get_guest_config(instance_ref, [], image_meta, disk_info,
None, block_device_info)
self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
def test_get_guest_config_with_root_device_name(self):
self.flags(virt_type='uml', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {'root_device_name': '/dev/vdb'}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
block_device_info)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info,
None, block_device_info)
self.assertEqual(0, len(cfg.features))
self.assertEqual(cfg.memory, instance_ref.flavor.memory_mb * units.Ki)
self.assertEqual(cfg.vcpus, instance_ref.flavor.vcpus)
self.assertEqual(cfg.os_type, "uml")
self.assertEqual(cfg.os_boot_dev, [])
self.assertEqual(cfg.os_root, '/dev/vdb')
self.assertEqual(len(cfg.devices), 3)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
def test_has_uefi_support_not_supported_arch(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self._stub_host_capabilities_cpu_arch(fields.Architecture.ALPHA)
self.assertFalse(drvr._has_uefi_support())
@mock.patch('os.path.exists', return_value=False)
def test_has_uefi_support_with_no_loader_existed(self, mock_exist):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertFalse(drvr._has_uefi_support())
@mock.patch('os.path.exists', return_value=True)
def test_has_uefi_support(self, mock_has_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self._stub_host_capabilities_cpu_arch(fields.Architecture.X86_64)
with mock.patch.object(drvr._host,
'has_min_version', return_value=True):
self.assertTrue(drvr._has_uefi_support())
def test_get_guest_config_with_uefi(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_firmware_type": "uefi"}})
instance_ref = objects.Instance(**self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(drvr, "_has_uefi_support",
return_value=True) as mock_support:
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
mock_support.assert_called_once_with()
self.assertEqual(cfg.os_loader_type, "pflash")
def test_get_guest_config_with_block_device(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
conn_info = {'driver_volume_type': 'fake', 'data': {}}
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdc'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdd'}),
]
)
info = {'block_device_mapping': driver_block_device.convert_volumes(
bdms
)}
info['block_device_mapping'][0]['connection_info'] = conn_info
info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info,
None, info)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdc')
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'vdd')
mock_save.assert_called_with()
def test_get_guest_config_lxc_with_attached_volume(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
conn_info = {'driver_volume_type': 'fake', 'data': {}}
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'boot_index': 0}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 3,
'source_type': 'volume', 'destination_type': 'volume',
}),
]
)
info = {'block_device_mapping': driver_block_device.convert_volumes(
bdms
)}
info['block_device_mapping'][0]['connection_info'] = conn_info
info['block_device_mapping'][1]['connection_info'] = conn_info
info['block_device_mapping'][2]['connection_info'] = conn_info
info['block_device_mapping'][0]['mount_device'] = '/dev/vda'
info['block_device_mapping'][1]['mount_device'] = '/dev/vdc'
info['block_device_mapping'][2]['mount_device'] = '/dev/vdd'
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info,
None, info)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[1].target_dev, 'vdc')
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdd')
mock_save.assert_called_with()
def test_get_guest_config_with_configdrive(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
# make configdrive.required_by() return True
instance_ref['config_drive'] = True
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
# Pick the first drive letter on the bus that is available
# as the config drive. Delete the last device hardcode as
# the config drive here.
expect = {"ppc": "sda", "ppc64": "sda",
"ppc64le": "sda", "aarch64": "sda"}
disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hda")
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, disk)
def test_get_guest_config_default_with_virtio_scsi_bus(self):
self._test_get_guest_config_with_virtio_scsi_bus()
@mock.patch.object(rbd_utils.RBDDriver, 'get_mon_addrs')
@mock.patch.object(rbd_utils, 'rbd')
def test_get_guest_config_rbd_with_virtio_scsi_bus(
self, mock_rdb, mock_get_mon_addrs):
self.flags(images_type='rbd', group='libvirt')
mock_get_mon_addrs.return_value = ("host", 9876)
self._test_get_guest_config_with_virtio_scsi_bus()
def _test_get_guest_config_with_virtio_scsi_bus(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_scsi_model": "virtio-scsi",
"hw_disk_bus": "scsi"}})
instance_ref = objects.Instance(**self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
[])
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(0, cfg.devices[0].device_addr.unit)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(1, cfg.devices[1].device_addr.unit)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestController)
self.assertEqual(cfg.devices[2].model, 'virtio-scsi')
def test_get_guest_config_with_virtio_scsi_bus_bdm(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_scsi_model": "virtio-scsi",
"hw_disk_bus": "scsi"}})
instance_ref = objects.Instance(**self.test_instance)
conn_info = {'driver_volume_type': 'fake', 'data': {}}
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdd', 'disk_bus': 'scsi'}),
]
)
bd_info = {
'block_device_mapping': driver_block_device.convert_volumes(bdms)}
bd_info['block_device_mapping'][0]['connection_info'] = conn_info
bd_info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
bd_info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info, [], bd_info)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'sdc')
self.assertEqual(cfg.devices[2].target_bus, 'scsi')
self.assertEqual(2, cfg.devices[2].device_addr.unit)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'sdd')
self.assertEqual(cfg.devices[3].target_bus, 'scsi')
self.assertEqual(3, cfg.devices[3].device_addr.unit)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestController)
self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
mock_save.assert_called_with()
def test_get_guest_config_one_scsi_volume_with_configdrive(self):
"""Tests that the unit attribute is only incremented for block devices
that have a scsi bus. Unit numbering should begin at 0 since we are not
booting from volume.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_scsi_model": "virtio-scsi",
"hw_disk_bus": "scsi"}})
instance_ref = objects.Instance(**self.test_instance)
instance_ref.config_drive = 'True'
conn_info = {'driver_volume_type': 'fake'}
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
]
)
bd_info = {
'block_device_mapping': driver_block_device.convert_volumes(bdms)}
bd_info['block_device_mapping'][0]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
bd_info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'):
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info, [], bd_info)
# The device order is determined by the order that devices are
# appended in _get_guest_storage_config in the driver.
# The first device will be the instance's local disk (since we're
# not booting from volume). It should begin unit numbering at 0.
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIn('disk', cfg.devices[0].source_path)
self.assertEqual('sda', cfg.devices[0].target_dev)
self.assertEqual('scsi', cfg.devices[0].target_bus)
self.assertEqual(0, cfg.devices[0].device_addr.unit)
# The second device will be the ephemeral disk
# (the flavor in self.test_instance has ephemeral_gb > 0).
# It should have the next unit number of 1.
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIn('disk.local', cfg.devices[1].source_path)
self.assertEqual('sdb', cfg.devices[1].target_dev)
self.assertEqual('scsi', cfg.devices[1].target_bus)
self.assertEqual(1, cfg.devices[1].device_addr.unit)
# This is the config drive. It should not have unit number set.
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertIn('disk.config', cfg.devices[2].source_path)
self.assertEqual('hda', cfg.devices[2].target_dev)
self.assertEqual('ide', cfg.devices[2].target_bus)
self.assertIsNone(cfg.devices[2].device_addr)
# And this is the attached volume.
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual('sdc', cfg.devices[3].target_dev)
self.assertEqual('scsi', cfg.devices[3].target_bus)
self.assertEqual(2, cfg.devices[3].device_addr.unit)
def test_get_guest_config_boot_from_volume_with_configdrive(self):
"""Tests that the unit attribute is only incremented for block devices
that have a scsi bus and that the bootable volume in a boot-from-volume
scenario always has the unit set to 0.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_scsi_model": "virtio-scsi",
"hw_disk_bus": "scsi"}})
instance_ref = objects.Instance(**self.test_instance)
instance_ref.config_drive = 'True'
conn_info = {'driver_volume_type': 'fake'}
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, [
# This is the boot volume (boot_index = 0).
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sda', 'boot_index': 0}),
# This is just another attached volume.
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
]
)
bd_info = {
'block_device_mapping': driver_block_device.convert_volumes(bdms)}
bd_info['block_device_mapping'][0]['connection_info'] = conn_info
bd_info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
bd_info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'):
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info, [], bd_info)
# The device order is determined by the order that devices are
# appended in _get_guest_storage_config in the driver.
# The first device will be the ephemeral disk
# (the flavor in self.test_instance has ephemeral_gb > 0).
# It should begin unit numbering at 1 because 0 is reserved for the
# boot volume for boot-from-volume.
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIn('disk.local', cfg.devices[0].source_path)
self.assertEqual('sdb', cfg.devices[0].target_dev)
self.assertEqual('scsi', cfg.devices[0].target_bus)
self.assertEqual(1, cfg.devices[0].device_addr.unit)
# The second device will be the config drive. It should not have a
# unit number set.
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIn('disk.config', cfg.devices[1].source_path)
self.assertEqual('hda', cfg.devices[1].target_dev)
self.assertEqual('ide', cfg.devices[1].target_bus)
self.assertIsNone(cfg.devices[1].device_addr)
# The third device will be the boot volume. It should have a
# unit number of 0.
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual('sda', cfg.devices[2].target_dev)
self.assertEqual('scsi', cfg.devices[2].target_bus)
self.assertEqual(0, cfg.devices[2].device_addr.unit)
# The fourth device will be the other attached volume.
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual('sdc', cfg.devices[3].target_dev)
self.assertEqual('scsi', cfg.devices[3].target_bus)
self.assertEqual(2, cfg.devices[3].device_addr.unit)
def _get_guest_config_with_graphics(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
return cfg
def test_get_guest_config_with_vnc(self):
self.flags(enabled=True,
server_listen='10.0.0.1',
keymap='en-ie',
group='vnc')
self.flags(virt_type='kvm', group='libvirt')
self.flags(pointer_model='ps2mouse')
self.flags(enabled=False, group='spice')
cfg = self._get_guest_config_with_graphics()
self.assertEqual(len(cfg.devices), 7)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, 'vnc')
self.assertEqual(cfg.devices[4].keymap, 'en-ie')
self.assertEqual(cfg.devices[4].listen, '10.0.0.1')
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=False, group='spice')
cfg = self._get_guest_config_with_graphics()
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_spice_and_tablet(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=False,
server_listen='10.0.0.1',
keymap='en-ie',
group='spice')
cfg = self._get_guest_config_with_graphics()
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, 'tablet')
self.assertEqual(cfg.devices[5].type, 'spice')
self.assertEqual(cfg.devices[5].keymap, 'en-ie')
self.assertEqual(cfg.devices[5].listen, '10.0.0.1')
def test_get_guest_config_with_spice_and_agent(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
cfg = self._get_guest_config_with_graphics()
expect = {"ppc": "vga", "ppc64": "vga",
"ppc64le": "vga", "aarch64": "virtio"}
video_type = expect.get(blockinfo.libvirt_utils.get_arch({}), "qxl")
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[4].type, 'spicevmc')
self.assertEqual(cfg.devices[5].type, "spice")
self.assertEqual(cfg.devices[6].type, video_type)
def test_get_guest_config_with_vnc_no_keymap(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True, keymap=None, group='vnc')
self.flags(enabled=False, group='spice')
cfg = self._get_guest_config_with_graphics()
for device in cfg.devices:
if device.root_name == 'graphics':
self.assertIsInstance(device,
vconfig.LibvirtConfigGuestGraphics)
self.assertEqual('vnc', device.type)
self.assertIsNone(device.keymap)
def test_get_guest_config_with_spice_no_keymap(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True, keymap=None, group='spice')
self.flags(enabled=False, group='vnc')
cfg = self._get_guest_config_with_graphics()
for device in cfg.devices:
if device.root_name == 'graphics':
self.assertIsInstance(device,
vconfig.LibvirtConfigGuestGraphics)
self.assertEqual('spice', device.type)
self.assertIsNone(device.keymap)
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
@mock.patch('nova.console.serial.acquire_port')
@mock.patch('nova.virt.hardware.get_number_of_serial_ports',
return_value=1)
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',)
def test_create_serial_console_devices_based_on_arch(self, mock_get_arch,
mock_get_port_number,
mock_acquire_port,
mock_ports,
mock_guest):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(**self.test_instance)
expected = {
fields.Architecture.X86_64: vconfig.LibvirtConfigGuestSerial,
fields.Architecture.S390: vconfig.LibvirtConfigGuestConsole,
fields.Architecture.S390X: vconfig.LibvirtConfigGuestConsole}
for guest_arch, device_type in expected.items():
mock_get_arch.return_value = guest_arch
guest = vconfig.LibvirtConfigGuest()
drvr._create_consoles(virt_type="kvm", guest_cfg=guest,
instance=instance, flavor={},
image_meta={})
self.assertEqual(2, len(guest.devices))
console_device = guest.devices[0]
self.assertIsInstance(console_device, device_type)
self.assertEqual("tcp", console_device.type)
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
@mock.patch('nova.virt.hardware.get_number_of_serial_ports',
return_value=4)
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',
side_effect=[fields.Architecture.X86_64,
fields.Architecture.S390,
fields.Architecture.S390X])
def test_create_serial_console_devices_with_limit_exceeded_based_on_arch(
self, mock_get_arch, mock_get_port_number, mock_ports, mock_guest):
self.flags(enabled=True, group='serial_console')
self.flags(virt_type="qemu", group='libvirt')
flavor = 'fake_flavor'
image_meta = objects.ImageMeta()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
guest = vconfig.LibvirtConfigGuest()
instance = objects.Instance(**self.test_instance)
self.assertRaises(exception.SerialPortNumberLimitExceeded,
drvr._create_consoles,
"kvm", guest, instance, flavor, image_meta)
mock_get_arch.assert_called_with(image_meta)
mock_get_port_number.assert_called_with(flavor,
image_meta)
drvr._create_consoles("kvm", guest, instance, flavor, image_meta)
mock_get_arch.assert_called_with(image_meta)
mock_get_port_number.assert_called_with(flavor,
image_meta)
drvr._create_consoles("kvm", guest, instance, flavor, image_meta)
mock_get_arch.assert_called_with(image_meta)
mock_get_port_number.assert_called_with(flavor,
image_meta)
@mock.patch('nova.console.serial.acquire_port')
def test_get_guest_config_serial_console(self, acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
acquire_port.return_value = 11111
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(8, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual(11111, cfg.devices[2].listen_port)
def test_get_guest_config_serial_console_through_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 3}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(10, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual("tcp", cfg.devices[3].type)
self.assertEqual("tcp", cfg.devices[4].type)
def test_get_guest_config_serial_console_invalid_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': "a"}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(
exception.ImageSerialPortNumberInvalid,
drvr._get_guest_config, instance_ref, [],
image_meta, disk_info)
def test_get_guest_config_serial_console_image_and_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_serial_port_count": "3"}})
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 4}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info)
self.assertEqual(10, len(cfg.devices), cfg.devices)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual("tcp", cfg.devices[3].type)
self.assertEqual("tcp", cfg.devices[4].type)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch('nova.console.serial.acquire_port')
@mock.patch('nova.virt.hardware.get_number_of_serial_ports',
return_value=1)
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',)
def test_guest_config_char_device_logd(self, mock_get_arch,
mock_get_number_serial_ports,
mock_acquire_port,
mock_host_has_min_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def _test_consoles(arch_to_mock, serial_enabled,
expected_device_type, expected_device_cls,
virt_type='qemu'):
guest_cfg = vconfig.LibvirtConfigGuest()
mock_get_arch.return_value = arch_to_mock
self.flags(enabled=serial_enabled, group='serial_console')
instance = objects.Instance(**self.test_instance)
drvr._create_consoles(virt_type, guest_cfg, instance=instance,
flavor=None, image_meta=None)
self.assertEqual(1, len(guest_cfg.devices))
device = guest_cfg.devices[0]
self.assertEqual(expected_device_type, device.type)
self.assertIsInstance(device, expected_device_cls)
self.assertIsInstance(device.log,
vconfig.LibvirtConfigGuestCharDeviceLog)
self.assertEqual("off", device.log.append)
self.assertIsNotNone(device.log.file)
self.assertTrue(device.log.file.endswith("console.log"))
_test_consoles(fields.Architecture.X86_64, True,
"tcp", vconfig.LibvirtConfigGuestSerial)
_test_consoles(fields.Architecture.X86_64, False,
"pty", vconfig.LibvirtConfigGuestSerial)
_test_consoles(fields.Architecture.S390, True,
"tcp", vconfig.LibvirtConfigGuestConsole)
_test_consoles(fields.Architecture.S390X, False,
"pty", vconfig.LibvirtConfigGuestConsole)
_test_consoles(fields.Architecture.X86_64, False,
"pty", vconfig.LibvirtConfigGuestConsole, 'xen')
@mock.patch('nova.console.serial.acquire_port')
def test_get_guest_config_serial_console_through_port_rng_exhausted(
self, acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
acquire_port.side_effect = exception.SocketPortRangeExhaustedException(
'127.0.0.1')
self.assertRaises(
exception.SocketPortRangeExhaustedException,
drvr._get_guest_config, instance_ref, [],
image_meta, disk_info)
@mock.patch('nova.console.serial.release_port')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
def test_serial_console_release_port(
self, mock_get_serial_ports_from_guest, mock_get_guest,
mock_get_info, mock_release_port):
self.flags(enabled="True", group='serial_console')
guest = libvirt_guest.Guest(FakeVirtDomain())
guest.power_off = mock.Mock()
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN)
mock_get_guest.return_value = guest
mock_get_serial_ports_from_guest.return_value = iter([
('127.0.0.1', 10000), ('127.0.0.1', 10001)])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._destroy(objects.Instance(**self.test_instance))
mock_release_port.assert_has_calls(
[mock.call(host='127.0.0.1', port=10000),
mock.call(host='127.0.0.1', port=10001)])
@mock.patch('nova.virt.disk.api.get_disk_info',
return_value=mock.Mock(disk_size=0))
@mock.patch('nova.virt.libvirt.storage.lvm.get_volume_size',
return_value='fake-size')
def test_detach_encrypted_volumes(self, mock_get_volume_size,
mock_getsize):
"""Test that unencrypted volumes are not disconnected with dmcrypt."""
instance = objects.Instance(**self.test_instance)
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<driver name='fake-driver' type='fake-type' />
<source file='filename'/>
<target dev='vdc' bus='virtio'/>
</disk>
<disk type='block' device='disk'>
<driver name='fake-driver' type='fake-type' />
<source dev='/dev/mapper/disk'/>
<target dev='vda'/>
</disk>
<disk type='block' device='disk'>
<driver name='fake-driver' type='fake-type' />
<source dev='/dev/mapper/swap'/>
<target dev='vdb'/>
</disk>
</devices>
</domain>
"""
dom = FakeVirtDomain(fake_xml=xml)
instance.ephemeral_key_uuid = uuids.ephemeral_key_uuid # encrypted
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@mock.patch.object(dmcrypt, 'delete_volume')
@mock.patch.object(conn._host, '_get_domain', return_value=dom)
@mock.patch.object(libvirt_driver.disk_api, 'get_allocated_disk_size')
def detach_encrypted_volumes(block_device_info, mock_get_alloc_size,
mock_get_domain, mock_delete_volume):
conn._detach_encrypted_volumes(instance, block_device_info)
mock_get_domain.assert_called_once_with(instance)
self.assertFalse(mock_delete_volume.called)
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
'block_device_mapping': []}
detach_encrypted_volumes(block_device_info)
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest(self, mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest(None,
mock_get_xml_desc)
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.1', 101),
('127.0.0.2', 100),
('127.0.0.2', 101)], list(i))
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest_bind_only(self, mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest('bind',
mock_get_xml_desc)
self.assertEqual([
('127.0.0.1', 101),
('127.0.0.2', 100)], list(i))
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest_connect_only(self,
mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest('connect',
mock_get_xml_desc)
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.2', 101)], list(i))
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest_on_s390(self, mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest(None,
mock_get_xml_desc,
'console')
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.1', 101),
('127.0.0.2', 100),
('127.0.0.2', 101)], list(i))
def _test_get_serial_ports_from_guest(self, mode, mock_get_xml_desc,
dev_name='serial'):
xml = """
<domain type='kvm'>
<devices>
<%(dev_name)s type="tcp">
<source host="127.0.0.1" service="100" mode="connect"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.1" service="101" mode="bind"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.2" service="100" mode="bind"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.2" service="101" mode="connect"/>
</%(dev_name)s>
</devices>
</domain>""" % {'dev_name': dev_name}
mock_get_xml_desc.return_value = xml
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
guest = libvirt_guest.Guest(FakeVirtDomain())
return drvr._get_serial_ports_from_guest(guest, mode=mode)
def test_get_guest_config_with_type_xen(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 6)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[3].type, "vnc")
self.assertEqual(cfg.devices[4].type, "xen")
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',
return_value=fields.Architecture.S390X)
def test_get_guest_config_with_type_kvm_on_s390(self, mock_get_arch):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
self._stub_host_capabilities_cpu_arch(fields.Architecture.S390X)
instance_ref = objects.Instance(**self.test_instance)
cfg = self._get_guest_config_via_fake_api(instance_ref)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
log_file_device = cfg.devices[2]
self.assertIsInstance(log_file_device,
vconfig.LibvirtConfigGuestConsole)
self.assertEqual("sclplm", log_file_device.target_type)
self.assertEqual("file", log_file_device.type)
terminal_device = cfg.devices[3]
self.assertIsInstance(terminal_device,
vconfig.LibvirtConfigGuestConsole)
self.assertEqual("sclp", terminal_device.target_type)
self.assertEqual("pty", terminal_device.type)
self.assertEqual("s390-ccw-virtio", cfg.os_mach_type)
def _stub_host_capabilities_cpu_arch(self, cpu_arch):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = cpu_arch
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
get_host_capabilities_stub)
def _get_guest_config_via_fake_api(self, instance):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
return drvr._get_guest_config(instance, [],
image_meta, disk_info)
def test_get_guest_config_with_type_xen_pae_hvm(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['vm_mode'] = fields.VMMode.HVM
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(cfg.os_type, fields.VMMode.HVM)
self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path)
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeaturePAE)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureAPIC)
def test_get_guest_config_with_type_xen_pae_pvm(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(cfg.os_type, fields.VMMode.XEN)
self.assertEqual(1, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeaturePAE)
def test_get_guest_config_with_vnc_and_spice(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[5].type, 'spicevmc')
self.assertEqual(cfg.devices[6].type, "vnc")
self.assertEqual(cfg.devices[7].type, "spice")
def test_get_guest_config_with_watchdog_action_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_watchdog_action": "none"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 9)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("none", cfg.devices[7].action)
def _test_get_guest_usb_tablet(self, vnc_enabled, spice_enabled, os_type,
agent_enabled=False, image_meta=None):
self.flags(enabled=vnc_enabled, group='vnc')
self.flags(enabled=spice_enabled,
agent_enabled=agent_enabled, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict(image_meta)
return drvr._get_guest_pointer_model(os_type, image_meta)
def test_use_ps2_mouse(self):
self.flags(pointer_model='ps2mouse')
tablet = self._test_get_guest_usb_tablet(
True, True, fields.VMMode.HVM)
self.assertIsNone(tablet)
def test_get_guest_usb_tablet_wipe(self):
self.flags(use_usb_tablet=True, group='libvirt')
tablet = self._test_get_guest_usb_tablet(
True, True, fields.VMMode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(
True, False, fields.VMMode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(
False, True, fields.VMMode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(
False, False, fields.VMMode.HVM)
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(
True, True, "foo")
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(
False, True, fields.VMMode.HVM, True)
self.assertIsNone(tablet)
def test_get_guest_usb_tablet_image_meta(self):
self.flags(use_usb_tablet=True, group='libvirt')
image_meta = {"properties": {"hw_pointer_model": "usbtablet"}}
tablet = self._test_get_guest_usb_tablet(
True, True, fields.VMMode.HVM, image_meta=image_meta)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(
True, False, fields.VMMode.HVM, image_meta=image_meta)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(
False, True, fields.VMMode.HVM, image_meta=image_meta)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(
False, False, fields.VMMode.HVM, image_meta=image_meta)
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(
True, True, "foo", image_meta=image_meta)
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(
False, True, fields.VMMode.HVM, True, image_meta=image_meta)
self.assertIsNone(tablet)
def test_get_guest_usb_tablet_image_meta_no_vnc(self):
self.flags(use_usb_tablet=False, group='libvirt')
self.flags(pointer_model=None)
image_meta = {"properties": {"hw_pointer_model": "usbtablet"}}
self.assertRaises(
exception.UnsupportedPointerModelRequested,
self._test_get_guest_usb_tablet,
False, False, fields.VMMode.HVM, True, image_meta=image_meta)
def test_get_guest_no_pointer_model_usb_tablet_set(self):
self.flags(use_usb_tablet=True, group='libvirt')
self.flags(pointer_model=None)
tablet = self._test_get_guest_usb_tablet(True, True, fields.VMMode.HVM)
self.assertIsNotNone(tablet)
def test_get_guest_no_pointer_model_usb_tablet_not_set(self):
self.flags(use_usb_tablet=False, group='libvirt')
self.flags(pointer_model=None)
tablet = self._test_get_guest_usb_tablet(True, True, fields.VMMode.HVM)
self.assertIsNone(tablet)
def test_get_guest_pointer_model_usb_tablet(self):
self.flags(use_usb_tablet=False, group='libvirt')
self.flags(pointer_model='usbtablet')
tablet = self._test_get_guest_usb_tablet(True, True, fields.VMMode.HVM)
self.assertIsNotNone(tablet)
def test_get_guest_pointer_model_usb_tablet_image(self):
image_meta = {"properties": {"hw_pointer_model": "usbtablet"}}
tablet = self._test_get_guest_usb_tablet(
True, True, fields.VMMode.HVM, image_meta=image_meta)
self.assertIsNotNone(tablet)
def test_get_guest_pointer_model_usb_tablet_image_no_HVM(self):
self.flags(pointer_model=None)
self.flags(use_usb_tablet=False, group='libvirt')
image_meta = {"properties": {"hw_pointer_model": "usbtablet"}}
self.assertRaises(
exception.UnsupportedPointerModelRequested,
self._test_get_guest_usb_tablet,
True, True, fields.VMMode.XEN, image_meta=image_meta)
def test_get_guest_config_with_watchdog_action_flavor(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {"hw:watchdog_action": 'none'}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(9, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("none", cfg.devices[7].action)
def test_get_guest_config_with_watchdog_overrides_flavor(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:watchdog_action': 'none'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_watchdog_action": "pause"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(9, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("pause", cfg.devices[7].action)
def test_get_guest_config_with_video_driver_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "vmvga"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[6].type, "vmvga")
def test_get_guest_config_with_qga_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_qemu_guest_agent": "yes"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 9)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[7].type, "unix")
self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0")
def test_get_guest_config_with_video_driver_vram(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_video:ram_max_mb': "100"}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[5].type, "spice")
self.assertEqual(cfg.devices[6].type, "qxl")
self.assertEqual(cfg.devices[6].vram, 64 * units.Mi / units.Ki)
@mock.patch('nova.virt.disk.api.teardown_container')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_unmount_fs_if_error_during_lxc_create_domain(self,
mock_get_inst_path, mock_ensure_tree, mock_setup_container,
mock_get_info, mock_teardown):
"""If we hit an error during a `_create_domain` call to `libvirt+lxc`
we need to ensure the guest FS is unmounted from the host so that any
future `lvremove` calls will work.
"""
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.by_name.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.side_effect = exception.InstanceNotFound(
instance_id='foo')
drvr._conn.defineXML = mock.Mock()
drvr._conn.defineXML.side_effect = ValueError('somethingbad')
with test.nested(
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, 'cleanup')):
self.assertRaises(ValueError,
drvr._create_domain_and_network,
self.context,
'xml',
mock_instance, None)
mock_teardown.assert_called_with(container_dir='/tmp/rootfs')
def test_video_driver_flavor_limit_not_set(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(objects.Instance, 'save'):
self.assertRaises(exception.RequestedVRamTooHigh,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_video_driver_ram_above_flavor_limit(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
instance_ref = objects.Instance(**self.test_instance)
instance_type = instance_ref.get_flavor()
instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(objects.Instance, 'save'):
self.assertRaises(exception.RequestedVRamTooHigh,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_get_guest_config_without_qga_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_qemu_guest_agent": "no"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_rng_device(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(pointer_model='ps2mouse')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertEqual(cfg.devices[6].backend, '/dev/urandom')
self.assertIsNone(cfg.devices[6].rate_bytes)
self.assertIsNone(cfg.devices[6].rate_period)
def test_get_guest_config_with_rng_not_allowed(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(pointer_model='ps2mouse')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 7)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigMemoryBalloon)
def test_get_guest_config_with_rng_limits(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(pointer_model='ps2mouse')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True',
'hw_rng:rate_bytes': '1024',
'hw_rng:rate_period': '2'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertEqual(cfg.devices[6].backend, '/dev/urandom')
self.assertEqual(cfg.devices[6].rate_bytes, 1024)
self.assertEqual(cfg.devices[6].rate_period, 2)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
def test_get_guest_config_with_rng_backend(self, mock_path):
self.flags(virt_type='kvm',
rng_dev_path='/dev/hw_rng',
group='libvirt')
self.flags(pointer_model='ps2mouse')
mock_path.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng')
self.assertIsNone(cfg.devices[6].rate_bytes)
self.assertIsNone(cfg.devices[6].rate_period)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
def test_get_guest_config_with_rng_dev_not_present(self, mock_path):
self.flags(virt_type='kvm',
use_usb_tablet=False,
rng_dev_path='/dev/hw_rng',
group='libvirt')
mock_path.return_value = False
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(exception.RngDeviceNotExist,
drvr._get_guest_config,
instance_ref,
[],
image_meta, disk_info)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_guest_cpu_shares_with_multi_vcpu(self, is_able):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 4
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(4096, cfg.cputune.shares)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_with_cpu_quota(self, is_able):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
'quota:cpu_period': '20000'}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(10000, cfg.cputune.shares)
self.assertEqual(20000, cfg.cputune.period)
def test_get_guest_config_with_hiding_hypervisor_id(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"img_hide_hypervisor_id": "true"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
[],
image_meta,
disk_info)
self.assertTrue(
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
for feature in cfg.features))
def test_get_guest_config_with_hiding_hypervisor_id_flavor_extra_specs(
self):
# Input to the test: flavor extra_specs
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
extra_specs={"hide_hypervisor_id": "true"},
expected_attrs={"extra_specs"})
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor = flavor_hide_id
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw"})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
[],
image_meta,
disk_info)
self.assertTrue(
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
for feature in cfg.features))
def test_get_guest_config_with_hiding_hypervisor_id_img_and_flavor(
self):
# Input to the test: image metadata (true) and flavor
# extra_specs (true)
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
extra_specs={"hide_hypervisor_id": "true"},
expected_attrs={"extra_specs"})
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"img_hide_hypervisor_id": "true"}})
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor = flavor_hide_id
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
[],
image_meta,
disk_info)
self.assertTrue(
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
for feature in cfg.features))
def test_get_guest_config_with_hiding_hypervisor_id_img_or_flavor(
self):
# Input to the test: image metadata (false) and flavor
# extra_specs (true)
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
extra_specs={"hide_hypervisor_id": "true"},
expected_attrs={"extra_specs"})
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"img_hide_hypervisor_id": "false"}})
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor = flavor_hide_id
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
[],
image_meta,
disk_info)
self.assertTrue(
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
for feature in cfg.features))
# Input to the test: image metadata (true) and flavor
# extra_specs (false)
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
extra_specs={"hide_hypervisor_id": "false"},
expected_attrs={"extra_specs"})
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"img_hide_hypervisor_id": "true"}})
instance_ref.flavor = flavor_hide_id
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
[],
image_meta,
disk_info)
self.assertTrue(
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
for feature in cfg.features))
def test_get_guest_config_without_hiding_hypervisor_id(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"img_hide_hypervisor_id": "false"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
[],
image_meta,
disk_info)
self.assertFalse(
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
for feature in cfg.features))
def test_get_guest_config_without_hiding_hypervisor_id_flavor_extra_specs(
self):
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
extra_specs={"hide_hypervisor_id": "false"},
expected_attrs={"extra_specs"})
self.flags(virt_type='qemu', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor = flavor_hide_id
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw"})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
[],
image_meta,
disk_info)
self.assertFalse(
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
for feature in cfg.features))
def _test_get_guest_config_disk_cachemodes(self, images_type):
# Verify that the configured cachemodes are propagated to the device
# configurations.
if images_type == 'flat':
cachemode = 'file=directsync'
elif images_type == 'lvm':
cachemode = 'block=writethrough'
elif images_type == 'rbd':
cachemode = 'network=writeback'
self.flags(disk_cachemodes=[cachemode], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for d in cfg.devices:
if isinstance(d, vconfig.LibvirtConfigGuestDisk):
expected = cachemode.split('=')
self.assertEqual(expected[0], d.source_type)
self.assertEqual(expected[1], d.driver_cache)
def test_get_guest_config_disk_cachemodes_file(self):
self.flags(images_type='flat', group='libvirt')
self._test_get_guest_config_disk_cachemodes('flat')
def test_get_guest_config_disk_cachemodes_block(self):
self.flags(images_type='lvm', group='libvirt')
self.flags(images_volume_group='vols', group='libvirt')
self._test_get_guest_config_disk_cachemodes('lvm')
@mock.patch.object(rbd_utils, 'rbd')
@mock.patch.object(rbd_utils, 'rados')
@mock.patch.object(rbd_utils.RBDDriver, 'get_mon_addrs',
return_value=(mock.Mock(), mock.Mock()))
def test_get_guest_config_disk_cachemodes_network(
self, mock_get_mon_addrs, mock_rados, mock_rbd):
self.flags(images_type='rbd', group='libvirt')
self._test_get_guest_config_disk_cachemodes('rbd')
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_with_bogus_cpu_quota(self, is_able):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
'quota:cpu_period': '20000'}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(ValueError,
drvr._get_guest_config,
instance_ref, [], image_meta, disk_info)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=False)
def test_get_update_guest_cputune(self, is_able):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
'quota:cpu_period': '20000'}
self.assertRaises(
exception.UnsupportedHostCPUControlPolicy,
drvr._update_guest_cputune, {}, instance_ref.flavor, "kvm")
def _test_get_guest_config_sysinfo_serial(self, expected_serial):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
cfg = drvr._get_guest_config_sysinfo(instance_ref)
self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo)
self.assertEqual(version.vendor_string(),
cfg.system_manufacturer)
self.assertEqual(version.product_string(),
cfg.system_product)
self.assertEqual(version.version_string_with_package(),
cfg.system_version)
self.assertEqual(expected_serial,
cfg.system_serial)
self.assertEqual(instance_ref['uuid'],
cfg.system_uuid)
self.assertEqual("Virtual Machine",
cfg.system_family)
def test_get_guest_config_sysinfo_serial_none(self):
self.flags(sysinfo_serial="none", group="libvirt")
self._test_get_guest_config_sysinfo_serial(None)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware")
def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid):
self.flags(sysinfo_serial="hardware", group="libvirt")
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
mock_uuid.return_value = theuuid
self._test_get_guest_config_sysinfo_serial(theuuid)
@contextlib.contextmanager
def patch_exists(self, result):
real_exists = os.path.exists
def fake_exists(filename):
if filename == "/etc/machine-id":
return result
return real_exists(filename)
with mock.patch.object(os.path, "exists") as mock_exists:
mock_exists.side_effect = fake_exists
yield mock_exists
def test_get_guest_config_sysinfo_serial_os(self):
self.flags(sysinfo_serial="os", group="libvirt")
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
with test.nested(
mock.patch.object(six.moves.builtins, "open",
mock.mock_open(read_data=theuuid)),
self.patch_exists(True)):
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_os_empty_machine_id(self):
self.flags(sysinfo_serial="os", group="libvirt")
with test.nested(
mock.patch.object(six.moves.builtins, "open",
mock.mock_open(read_data="")),
self.patch_exists(True)):
self.assertRaises(exception.NovaException,
self._test_get_guest_config_sysinfo_serial,
None)
def test_get_guest_config_sysinfo_serial_os_no_machine_id_file(self):
self.flags(sysinfo_serial="os", group="libvirt")
with self.patch_exists(False):
self.assertRaises(exception.NovaException,
self._test_get_guest_config_sysinfo_serial,
None)
def test_get_guest_config_sysinfo_serial_auto_hardware(self):
self.flags(sysinfo_serial="auto", group="libvirt")
real_exists = os.path.exists
with test.nested(
mock.patch.object(os.path, "exists"),
mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware")
) as (mock_exists, mock_uuid):
def fake_exists(filename):
if filename == "/etc/machine-id":
return False
return real_exists(filename)
mock_exists.side_effect = fake_exists
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
mock_uuid.return_value = theuuid
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_auto_os(self):
self.flags(sysinfo_serial="auto", group="libvirt")
real_exists = os.path.exists
real_open = builtins.open
with test.nested(
mock.patch.object(os.path, "exists"),
mock.patch.object(builtins, "open"),
) as (mock_exists, mock_open):
def fake_exists(filename):
if filename == "/etc/machine-id":
return True
return real_exists(filename)
mock_exists.side_effect = fake_exists
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
def fake_open(filename, *args, **kwargs):
if filename == "/etc/machine-id":
h = mock.MagicMock()
h.read.return_value = theuuid
h.__enter__.return_value = h
return h
return real_open(filename, *args, **kwargs)
mock_open.side_effect = fake_open
self._test_get_guest_config_sysinfo_serial(theuuid)
def _create_fake_service_compute(self):
service_info = {
'id': 1729,
'host': 'fake',
'report_count': 0
}
service_ref = objects.Service(**service_info)
compute_info = {
'id': 1729,
'vcpus': 2,
'memory_mb': 1024,
'local_gb': 2048,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'free_ram_mb': 1024,
'free_disk_gb': 2048,
'hypervisor_type': 'xen',
'hypervisor_version': 1,
'running_vms': 0,
'cpu_info': '',
'current_workload': 0,
'service_id': service_ref['id'],
'host': service_ref['host']
}
compute_ref = objects.ComputeNode(**compute_info)
return (service_ref, compute_ref)
def test_get_guest_config_with_pci_passthrough_kvm(self):
self.flags(virt_type='kvm', group='libvirt')
service_ref, compute_ref = self._create_fake_service_compute()
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.ALLOCATED,
address='0000:00:00.1',
compute_id=compute_ref.id,
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
cfg = drvr._get_guest_config(instance, [],
image_meta, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'yes')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "1")
self.assertEqual(had_pci, 1)
def test_get_guest_config_with_pci_passthrough_xen(self):
self.flags(virt_type='xen', group='libvirt')
service_ref, compute_ref = self._create_fake_service_compute()
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.ALLOCATED,
address='0000:00:00.2',
compute_id=compute_ref.id,
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
cfg = drvr._get_guest_config(instance, [],
image_meta, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'no')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "2")
self.assertEqual(had_pci, 1)
def test_get_guest_config_os_command_line_through_image_meta(self):
self.flags(virt_type="kvm",
cpu_mode='none',
group='libvirt')
self.test_instance['kernel_id'] = "fake_kernel_id"
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"os_command_line":
"fake_os_command_line"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_cmdline, "fake_os_command_line")
def test_get_guest_config_os_command_line_without_kernel_id(self):
self.flags(virt_type="kvm",
cpu_mode='none',
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"os_command_line":
"fake_os_command_line"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsNone(cfg.os_cmdline)
def test_get_guest_config_os_command_empty(self):
self.flags(virt_type="kvm",
cpu_mode='none',
group='libvirt')
self.test_instance['kernel_id'] = "fake_kernel_id"
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"os_command_line": ""}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
# the instance has 'root=/dev/vda console=tty0 console=ttyS0
# console=hvc0' set by default, so testing an empty string and None
# value in the os_command_line image property must pass
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertNotEqual(cfg.os_cmdline, "")
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
def test_get_guest_config_armv7(self, mock_numa, mock_storage):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = fields.Architecture.ARMV7
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.flags(virt_type="kvm",
group="libvirt")
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
get_host_capabilities_stub)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "vexpress-a15")
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
@mock.patch('os.path.exists', return_value=True)
def test_get_guest_config_aarch64(self, mock_path_exists,
mock_numa, mock_storage):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = fields.Architecture.AARCH64
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
TEST_AMOUNT_OF_PCIE_SLOTS = 8
CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
group='libvirt')
self.flags(virt_type="kvm",
group="libvirt")
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
get_host_capabilities_stub)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertTrue(mock_path_exists.called)
mock_path_exists.assert_called_with(
libvirt_driver.DEFAULT_UEFI_LOADER_PATH['aarch64'])
self.assertEqual(cfg.os_mach_type, "virt")
num_ports = 0
for device in cfg.devices:
try:
if (device.root_name == 'controller' and
device.model == 'pcie-root-port'):
num_ports += 1
except AttributeError:
pass
self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
@mock.patch('os.path.exists', return_value=True)
def test_get_guest_config_aarch64_with_graphics(self, mock_path_exists,
mock_numa, mock_storage):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = fields.Architecture.AARCH64
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
get_host_capabilities_stub)
self.flags(enabled=True,
server_listen='10.0.0.1',
keymap='en-ie',
group='vnc')
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=False, group='spice')
cfg = self._get_guest_config_with_graphics()
self.assertTrue(mock_path_exists.called)
mock_path_exists.assert_called_with(
libvirt_driver.DEFAULT_UEFI_LOADER_PATH['aarch64'])
self.assertEqual(cfg.os_mach_type, "virt")
usbhost_exists = False
keyboard_exists = False
for device in cfg.devices:
if device.root_name == 'controller' and device.type == 'usb':
usbhost_exists = True
if device.root_name == 'input' and device.type == 'keyboard':
keyboard_exists = True
self.assertTrue(usbhost_exists)
self.assertTrue(keyboard_exists)
def test_get_guest_config_machine_type_s390(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigGuestCPU()
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
host_cpu_archs = (fields.Architecture.S390, fields.Architecture.S390X)
for host_cpu_arch in host_cpu_archs:
caps.host.cpu.arch = host_cpu_arch
os_mach_type = drvr._get_machine_type(image_meta, caps)
self.assertEqual('s390-ccw-virtio', os_mach_type)
def test_get_guest_config_machine_type_through_image_meta(self):
self.flags(virt_type="kvm",
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_machine_type":
"fake_machine_type"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def test_get_guest_config_machine_type_from_config(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(hw_machine_type=['x86_64=fake_machine_type'],
group='libvirt')
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
<vendor>Intel</vendor>
<feature policy='require' name='xtpr'/>
</cpu>
"""
# Make sure the host arch is mocked as x86_64
self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities,
baselineCPU=fake_baselineCPU,
getVersion=lambda: 1005001)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def _test_get_guest_config_ppc64(self, device_index):
"""Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config.
"""
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
expected = (fields.Architecture.PPC64, fields.Architecture.PPC)
for guestarch in expected:
with mock.patch.object(libvirt_driver.libvirt_utils,
'get_arch',
return_value=guestarch):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta,
disk_info)
self.assertIsInstance(cfg.devices[device_index],
vconfig.LibvirtConfigGuestVideo)
self.assertEqual(cfg.devices[device_index].type, 'vga')
def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
self.flags(enabled=True, group='vnc')
self._test_get_guest_config_ppc64(6)
def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
self.flags(enabled=True,
agent_enabled=True,
group='spice')
self._test_get_guest_config_ppc64(8)
def _test_get_guest_config_bootmenu(self, image_meta, extra_specs):
self.flags(virt_type='kvm', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = extra_specs
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref, image_meta)
conf = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertTrue(conf.os_bootmenu)
def test_get_guest_config_bootmenu_via_image_meta(self):
image_meta = objects.ImageMeta.from_dict(
{"disk_format": "raw",
"properties": {"hw_boot_menu": "True"}})
self._test_get_guest_config_bootmenu(image_meta, {})
def test_get_guest_config_bootmenu_via_extra_specs(self):
image_meta = objects.ImageMeta.from_dict(
self.test_image_meta)
self._test_get_guest_config_bootmenu(image_meta,
{'hw:boot_menu': 'True'})
def test_get_guest_cpu_config_none(self):
self.flags(cpu_mode="none", group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertIsNone(conf.cpu.mode)
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_default_kvm(self):
self.flags(virt_type="kvm",
cpu_mode='none',
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertIsNone(conf.cpu.mode)
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_default_uml(self):
self.flags(virt_type="uml",
cpu_mode='none',
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_default_lxc(self):
self.flags(virt_type="lxc",
cpu_mode='none',
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_host_passthrough(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="host-passthrough", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-passthrough")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_host_passthrough_aarch64(self):
expected = {
fields.Architecture.X86_64: "host-model",
fields.Architecture.I686: "host-model",
fields.Architecture.PPC: "host-model",
fields.Architecture.PPC64: "host-model",
fields.Architecture.ARMV7: "host-model",
fields.Architecture.AARCH64: "host-passthrough",
}
for guestarch, expect_mode in expected.items():
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = guestarch
with mock.patch.object(host.Host, "get_capabilities",
return_value=caps):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
if caps.host.cpu.arch == fields.Architecture.AARCH64:
drvr._has_uefi_support = mock.Mock(return_value=True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, expect_mode)
def test_get_guest_cpu_config_host_model(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="host-model", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_custom(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="custom",
cpu_model="Penryn",
group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "Penryn")
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_get_guest_cpu_config_custom_with_extra_flags(self,
mock_warn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="custom",
cpu_model="IvyBridge",
cpu_model_extra_flags="pcid",
group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "IvyBridge")
self.assertIn(conf.cpu.features.pop().name, "pcid")
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
self.assertFalse(mock_warn.called)
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_get_guest_cpu_config_custom_with_extra_flags_upper_case(self,
mock_warn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="custom",
cpu_model="IvyBridge",
cpu_model_extra_flags="PCID",
group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual("custom", conf.cpu.mode)
self.assertEqual("IvyBridge", conf.cpu.model)
# At this point the upper case CPU flag is normalized to lower
# case, so assert for that
self.assertEqual("pcid", conf.cpu.features.pop().name)
self.assertEqual(instance_ref.flavor.vcpus, conf.cpu.sockets)
self.assertEqual(1, conf.cpu.cores)
self.assertEqual(1, conf.cpu.threads)
mock_warn.assert_not_called()
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_get_guest_cpu_config_custom_with_multiple_extra_flags(self,
mock_warn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="custom",
cpu_model="IvyBridge",
cpu_model_extra_flags=['pcid', 'vmx'],
group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
features = [feature.name for feature in conf.cpu.features]
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "IvyBridge")
self.assertIn("pcid", features)
self.assertIn("vmx", features)
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
mock_warn.assert_not_called()
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_get_guest_cpu_config_host_model_with_extra_flags(self,
mock_warn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="host-model",
cpu_model_extra_flags="pdpe1gb",
group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
features = [feature.name for feature in conf.cpu.features]
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIn("pdpe1gb", features)
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
# For 'host-model', it is now valid to use 'extra_flags';
# assert that no warning is thrown
mock_warn.assert_not_called()
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_get_guest_cpu_config_host_passthrough_with_extra_flags(self,
mock_warn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="host-passthrough",
cpu_model_extra_flags="invtsc",
group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
features = [feature.name for feature in conf.cpu.features]
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-passthrough")
self.assertIn("invtsc", features)
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
# We have lifted the restriction for 'host-passthrough' as well;
# so here too, assert that no warning is thrown
mock_warn.assert_not_called()
def test_get_guest_cpu_topology(self):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 8
instance_ref.flavor.extra_specs = {'hw:cpu_max_sockets': '4'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertEqual(conf.cpu.sockets, 4)
self.assertEqual(conf.cpu.cores, 2)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_memory_balloon_config_by_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_disable(self):
self.flags(mem_stats_period_seconds=0, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
no_exist = True
for device in cfg.devices:
if device.root_name == 'memballoon':
no_exist = False
break
self.assertTrue(no_exist)
def test_get_guest_memory_balloon_config_period_value(self):
self.flags(mem_stats_period_seconds=21, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(21, device.period)
def test_get_guest_memory_balloon_config_qemu(self):
self.flags(virt_type='qemu', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_xen(self):
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('xen', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
no_exist = True
for device in cfg.devices:
if device.root_name == 'memballoon':
no_exist = False
break
self.assertTrue(no_exist)
@mock.patch('nova.virt.libvirt.driver.LOG.warning')
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(host.Host, "get_capabilities")
def test_get_supported_perf_events_foo(self, mock_get_caps,
mock_min_version,
mock_warn):
self.flags(enabled_perf_events=['foo'], group='libvirt')
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
mock_get_caps.return_value = caps
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
events = drvr._get_supported_perf_events()
self.assertTrue(mock_warn.called)
self.assertEqual([], events)
@mock.patch.object(host.Host, "get_capabilities")
def _test_get_guest_with_perf(self, caps, events, mock_get_caps):
mock_get_caps.return_value = caps
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host('test_perf')
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(events, cfg.perf_events)
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_with_perf_libvirt_unsupported(self, mock_min_version):
def fake_has_min_version(lv_ver=None, hv_ver=None, hv_type=None):
if lv_ver == libvirt_driver.MIN_LIBVIRT_PERF_VERSION:
return False
return True
mock_min_version.side_effect = fake_has_min_version
self.flags(enabled_perf_events=['cmt'], group='libvirt')
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
self._test_get_guest_with_perf(caps, [])
@mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_CMT', True,
create=True)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_get_guest_with_perf_host_unsupported(self,
mock_min_version):
self.flags(enabled_perf_events=['cmt'], group='libvirt')
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
self._test_get_guest_with_perf(caps, [])
@mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_CMT', True,
create=True)
@mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_MBMT', True,
create=True)
@mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_MBML', True,
create=True)
@mock.patch.object(libvirt_driver.LOG, 'warning')
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_intel_cmt_perf_deprecation_warning(self,
mock_min_version,
mock_warn):
perf_events = ['cmt', 'mbml', 'mbmt']
self.flags(enabled_perf_events=['cmt', 'mbml', 'mbmt'],
group='libvirt')
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
features = []
for f in ('cmt', 'mbm_local', 'mbm_total'):
feature = vconfig.LibvirtConfigGuestCPUFeature()
feature.name = f
feature.policy = fields.CPUFeaturePolicy.REQUIRE
features.append(feature)
caps.host.cpu.features = set(features)
self._test_get_guest_with_perf(caps, ['cmt', 'mbml', 'mbmt'])
warning_count = 0
call_args_list = mock_warn.call_args_list
for call in call_args_list:
# Call can be unpackaged as a tuple of args and kwargs
# so we want to check the first arg in the args list
if (len(call) == 2 and len(call[0]) == 2 and
call[0][1] in perf_events and
'Monitoring Intel CMT' in call[0][0]):
warning_count += 1
self.assertEqual(3, warning_count)
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': fields.VMMode.HVM})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=True)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': fields.VMMode.XEN})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=False,
xen_only=True)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=False, rescue=instance_data)
def test_xml_uuid(self):
self._check_xml_and_uuid(self.test_image_meta)
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
def test_xml_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, None)
def test_xml_user_specified_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, 'sd')
def test_xml_disk_driver(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_driver(instance_data)
def test_xml_disk_bus_virtio(self):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self._check_xml_and_disk_bus(image_meta,
None,
(("disk", "virtio", "vda"),))
def test_xml_disk_bus_ide(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
expected = {fields.Architecture.PPC: ("cdrom", "scsi", "sda"),
fields.Architecture.PPC64: ("cdrom", "scsi", "sda"),
fields.Architecture.PPC64LE: ("cdrom", "scsi", "sda"),
fields.Architecture.AARCH64: ("cdrom", "scsi", "sda")}
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
("cdrom", "ide", "hda"))
image_meta = objects.ImageMeta.from_dict({
"disk_format": "iso"})
self._check_xml_and_disk_bus(image_meta,
None,
(expec_val,))
def test_xml_disk_bus_ide_and_virtio(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
expected = {fields.Architecture.PPC: ("cdrom", "scsi", "sda"),
fields.Architecture.PPC64: ("cdrom", "scsi", "sda"),
fields.Architecture.PPC64LE: ("cdrom", "scsi", "sda"),
fields.Architecture.AARCH64: ("cdrom", "scsi", "sda")}
swap = {'device_name': '/dev/vdc',
'swap_size': 1}
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {
'swap': swap,
'ephemerals': ephemerals}
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
("cdrom", "ide", "hda"))
image_meta = objects.ImageMeta.from_dict({
"disk_format": "iso"})
self._check_xml_and_disk_bus(image_meta,
block_device_info,
(expec_val,
("disk", "virtio", "vdb"),
("disk", "virtio", "vdc")))
@mock.patch.object(host.Host, 'get_guest')
def test_instance_exists(self, mock_get_guest):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr.instance_exists(None))
mock_get_guest.side_effect = exception.InstanceNotFound(
instance_id='something')
self.assertFalse(drvr.instance_exists(None))
mock_get_guest.side_effect = exception.InternalError(err='something')
self.assertFalse(drvr.instance_exists(None))
def test_estimate_instance_overhead_spawn(self):
# test that method when called with instance ref
instance_topology = objects.InstanceNUMATopology(
emulator_threads_policy=(
fields.CPUEmulatorThreadsPolicy.ISOLATE),
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024)])
instance_info = objects.Instance(**self.test_instance)
instance_info.numa_topology = instance_topology
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
overhead = drvr.estimate_instance_overhead(instance_info)
self.assertEqual(1, overhead['vcpus'])
def test_estimate_instance_overhead_spawn_no_overhead(self):
# test that method when called with instance ref, no overhead
instance_topology = objects.InstanceNUMATopology(
emulator_threads_policy=(
fields.CPUEmulatorThreadsPolicy.SHARE),
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024)])
instance_info = objects.Instance(**self.test_instance)
instance_info.numa_topology = instance_topology
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
overhead = drvr.estimate_instance_overhead(instance_info)
self.assertEqual(0, overhead['vcpus'])
def test_estimate_instance_overhead_migrate(self):
# test that method when called with flavor ref
instance_info = objects.Flavor(extra_specs={
'hw:emulator_threads_policy': (
fields.CPUEmulatorThreadsPolicy.ISOLATE),
'hw:cpu_policy': fields.CPUAllocationPolicy.DEDICATED,
})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
overhead = drvr.estimate_instance_overhead(instance_info)
self.assertEqual(1, overhead['vcpus'])
def test_estimate_instance_overhead_migrate_no_overhead(self):
# test that method when called with flavor ref, no overhead
instance_info = objects.Flavor(extra_specs={
'hw:emulator_threads_policy': (
fields.CPUEmulatorThreadsPolicy.SHARE),
'hw:cpu_policy': fields.CPUAllocationPolicy.DEDICATED,
})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
overhead = drvr.estimate_instance_overhead(instance_info)
self.assertEqual(0, overhead['vcpus'])
def test_estimate_instance_overhead_usage(self):
# test that method when called with usage dict
instance_info = objects.Flavor(extra_specs={
'hw:emulator_threads_policy': (
fields.CPUEmulatorThreadsPolicy.ISOLATE),
'hw:cpu_policy': fields.CPUAllocationPolicy.DEDICATED,
})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
overhead = drvr.estimate_instance_overhead(instance_info)
self.assertEqual(1, overhead['vcpus'])
def test_estimate_instance_overhead_usage_no_overhead(self):
# test that method when called with usage dict, no overhead
instance_info = objects.Flavor(extra_specs={
'hw:emulator_threads_policy': (
fields.CPUEmulatorThreadsPolicy.SHARE),
'hw:cpu_policy': fields.CPUAllocationPolicy.DEDICATED,
})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
overhead = drvr.estimate_instance_overhead(instance_info)
self.assertEqual(0, overhead['vcpus'])
@mock.patch.object(host.Host, "list_instance_domains")
def test_list_instances(self, mock_list):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm1, vm2, vm3, vm4]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
names = drvr.list_instances()
self.assertEqual(names[0], vm1.name())
self.assertEqual(names[1], vm2.name())
self.assertEqual(names[2], vm3.name())
self.assertEqual(names[3], vm4.name())
mock_list.assert_called_with(only_guests=True, only_running=False)
@mock.patch.object(host.Host, "list_instance_domains")
def test_list_instance_uuids(self, mock_list):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm1, vm2, vm3, vm4]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
uuids = drvr.list_instance_uuids()
self.assertEqual(len(uuids), 4)
self.assertEqual(uuids[0], vm1.UUIDString())
self.assertEqual(uuids[1], vm2.UUIDString())
self.assertEqual(uuids[2], vm3.UUIDString())
self.assertEqual(uuids[3], vm4.UUIDString())
mock_list.assert_called_with(only_guests=True, only_running=False)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus',
return_value=None)
@mock.patch('nova.virt.libvirt.host.Host.get_cpu_count',
return_value=4)
def test_get_host_vcpus_is_empty(self, get_cpu_count, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="")
vcpus = drvr._get_vcpu_total()
self.assertEqual(4, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="4-5")
get_online_cpus.return_value = set([4, 5, 6])
expected_vcpus = 2
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_out_of_range(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="4-6")
get_online_cpus.return_value = set([4, 5])
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_libvirt_error(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
self.flags(vcpu_pin_set="4-6")
get_online_cpus.side_effect = not_supported_exc
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_libvirt_error_success(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
self.flags(vcpu_pin_set="1")
get_online_cpus.side_effect = not_supported_exc
expected_vcpus = 1
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_cpu_count')
def test_get_host_vcpus_after_hotplug(self, get_cpu_count):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
get_cpu_count.return_value = 2
expected_vcpus = 2
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
get_cpu_count.return_value = 3
expected_vcpus = 3
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_quiesce(self, mock_has_min_version):
self.create_fake_libvirt_mock(lookupByUUIDString=self.fake_lookup)
with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(
{"properties": {"hw_qemu_guest_agent": "yes"}})
self.assertIsNone(drvr.quiesce(self.context, instance, image_meta))
mock_fsfreeze.assert_called_once_with()
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_unquiesce(self, mock_has_min_version):
self.create_fake_libvirt_mock(getLibVersion=lambda: 1002005,
lookupByUUIDString=self.fake_lookup)
with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(
{"properties": {"hw_qemu_guest_agent": "yes"}})
self.assertIsNone(drvr.unquiesce(self.context, instance,
image_meta))
mock_fsthaw.assert_called_once_with()
def test_create_snapshot_metadata(self):
base = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
instance_data = {'kernel_id': 'kernel',
'project_id': 'prj_id',
'ramdisk_id': 'ram_id',
'os_type': None}
instance = objects.Instance(**instance_data)
img_fmt = 'raw'
snp_name = 'snapshot_name'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
expected = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
},
'disk_format': img_fmt,
'container_format': 'bare',
}
self.assertEqual(ret, expected)
# simulate an instance with os_type field defined
# disk format equals to ami
# container format not equals to bare
instance['os_type'] = 'linux'
base = objects.ImageMeta.from_dict(
{'disk_format': 'ami',
'container_format': 'test_container'})
expected['properties']['os_type'] = instance['os_type']
expected['disk_format'] = base.disk_format
expected['container_format'] = base.container_format
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
self.assertEqual(ret, expected)
def test_get_volume_driver(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
driver = conn._get_volume_driver(connection_info)
result = isinstance(driver, volume_drivers.LibvirtFakeVolumeDriver)
self.assertTrue(result)
def test_get_volume_driver_unknown(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'unknown',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
self.assertRaises(
exception.VolumeDriverNotFound,
conn._get_volume_driver,
connection_info
)
def _fake_libvirt_config_guest_disk(self):
fake_config = vconfig.LibvirtConfigGuestDisk()
fake_config.source_type = "network"
fake_config.source_device = "fake-type"
fake_config.driver_name = "qemu"
fake_config.driver_format = "raw"
fake_config.driver_cache = "none"
fake_config.source_protocol = "fake"
fake_config.source_name = "fake"
fake_config.target_bus = "fake-bus"
fake_config.target_dev = "vdb"
return fake_config
@mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'get_config')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_cache_mode')
def test_get_volume_config(self, _set_cache_mode, get_config):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
disk_info = {'bus': 'fake-bus', 'type': 'fake-type',
'dev': 'vdb'}
config_guest_disk = self._fake_libvirt_config_guest_disk()
get_config.return_value = copy.deepcopy(config_guest_disk)
config = drvr._get_volume_config(connection_info, disk_info)
get_config.assert_called_once_with(connection_info, disk_info)
_set_cache_mode.assert_called_once_with(config)
self.assertEqual(config_guest_disk.to_xml(), config.to_xml())
@mock.patch.object(key_manager, 'API')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_use_native_luks')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor')
@mock.patch('nova.virt.libvirt.host.Host')
@mock.patch('os_brick.encryptors.luks.is_luks')
def test_connect_volume_native_luks(self, mock_is_luks, mock_host,
mock_get_volume_encryptor, mock_use_native_luks,
mock_get_volume_encryption, mock_get_key_mgr):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw',
'volume_id': uuids.volume_id}}
encryption = {'provider': encryptors.LUKS,
'encryption_key_id': uuids.encryption_key_id}
instance = mock.sentinel.instance
# Mock out the encryptors
mock_encryptor = mock.Mock()
mock_get_volume_encryptor.return_value = mock_encryptor
mock_is_luks.return_value = True
# Mock out the key manager
key = u'3734363537333734'
key_encoded = binascii.unhexlify(key)
mock_key = mock.Mock()
mock_key_mgr = mock.Mock()
mock_get_key_mgr.return_value = mock_key_mgr
mock_key_mgr.get.return_value = mock_key
mock_key.get_encoded.return_value = key_encoded
# assert that the secret is created for the encrypted volume during
# _connect_volume when use_native_luks is True
mock_get_volume_encryption.return_value = encryption
mock_use_native_luks.return_value = True
drvr._connect_volume(self.context, connection_info, instance,
encryption=encryption)
drvr._host.create_secret.assert_called_once_with('volume',
uuids.volume_id, password=key)
mock_encryptor.attach_volume.assert_not_called()
# assert that the encryptor is used if use_native_luks is False
drvr._host.create_secret.reset_mock()
mock_get_volume_encryption.reset_mock()
mock_use_native_luks.return_value = False
drvr._connect_volume(self.context, connection_info, instance,
encryption=encryption)
drvr._host.create_secret.assert_not_called()
mock_encryptor.attach_volume.assert_called_once_with(self.context,
**encryption)
# assert that we format the volume if is_luks is False
mock_use_native_luks.return_value = True
mock_is_luks.return_value = False
drvr._connect_volume(self.context, connection_info, instance,
encryption=encryption)
mock_encryptor._format_volume.assert_called_once_with(key,
**encryption)
# assert that os-brick is used when allow_native_luks is False
mock_encryptor.attach_volume.reset_mock()
mock_is_luks.return_value = True
drvr._connect_volume(self.context, connection_info, instance,
encryption=encryption, allow_native_luks=False)
mock_encryptor.attach_volume.assert_called_once_with(self.context,
**encryption)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor')
def test_disconnect_volume_native_luks(self, mock_get_volume_encryptor):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._host = mock.Mock()
drvr._host.find_secret.return_value = mock.Mock()
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw',
'volume_id': uuids.volume_id}}
encryption = {'provider': encryptors.LUKS,
'encryption_key_id': uuids.encryption_key_id}
instance = mock.sentinel.instance
# Mock out the encryptors
mock_encryptor = mock.Mock()
mock_get_volume_encryptor.return_value = mock_encryptor
# assert that a secret is deleted if found
drvr._disconnect_volume(self.context, connection_info, instance)
drvr._host.delete_secret.assert_called_once_with('volume',
uuids.volume_id)
mock_encryptor.detach_volume.assert_not_called()
# assert that the encryptor is used if no secret is found
drvr._host.find_secret.reset_mock()
drvr._host.delete_secret.reset_mock()
drvr._host.find_secret.return_value = None
drvr._disconnect_volume(self.context, connection_info, instance,
encryption=encryption)
drvr._host.delete_secret.assert_not_called()
mock_encryptor.detach_volume.called_once_with(self.context,
**encryption)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_detach_encryptor')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver')
@mock.patch('nova.volume.cinder.API.get')
def test_disconnect_multiattach_single_connection(
self, mock_volume_get, mock_get_volume_driver,
mock_get_instances, mock_detach_encryptor):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_volume_driver = mock.MagicMock(
spec=volume_drivers.LibvirtBaseVolumeDriver)
mock_get_volume_driver.return_value = mock_volume_driver
attachments = (
[('70ab645f-6ffc-406a-b3d2-5007a0c01b82',
{'mountpoint': u'/dev/vdb',
'attachment_id': u'9402c249-99df-4f72-89e7-fd611493ee5d'}),
('00803490-f768-4049-aa7d-151f54e6311e',
{'mountpoint': u'/dev/vdb',
'attachment_id': u'd6128a7b-19c8-4a3e-8036-011396df95ac'})])
mock_volume_get.return_value = (
{'attachments': OrderedDict(attachments), 'multiattach': True,
'id': 'd30559cf-f092-4693-8589-0d0a1e7d9b1f'})
fake_connection_info = {
'multiattach': True,
'volume_id': 'd30559cf-f092-4693-8589-0d0a1e7d9b1f'}
fake_instance_1 = fake_instance.fake_instance_obj(
self.context,
host='fake-host-1')
mock_get_instances.return_value = (
['00803490-f768-4049-aa7d-151f54e6311e'])
drvr._disconnect_volume(
self.context, fake_connection_info, fake_instance_1)
mock_volume_driver.disconnect_volume.assert_called_once_with(
fake_connection_info, fake_instance_1)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_detach_encryptor')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver')
@mock.patch('nova.volume.cinder.API.get')
def test_disconnect_multiattach_multi_connection(
self, mock_volume_get, mock_get_volume_driver,
mock_get_instances, mock_detach_encryptor):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_volume_driver = mock.MagicMock(
spec=volume_drivers.LibvirtBaseVolumeDriver)
mock_get_volume_driver.return_value = mock_volume_driver
attachments = (
[('70ab645f-6ffc-406a-b3d2-5007a0c01b82',
{'mountpoint': u'/dev/vdb',
'attachment_id': u'9402c249-99df-4f72-89e7-fd611493ee5d'}),
('00803490-f768-4049-aa7d-151f54e6311e',
{'mountpoint': u'/dev/vdb',
'attachment_id': u'd6128a7b-19c8-4a3e-8036-011396df95ac'})])
mock_volume_get.return_value = (
{'attachments': OrderedDict(attachments), 'multiattach': True,
'id': 'd30559cf-f092-4693-8589-0d0a1e7d9b1f'})
fake_connection_info = {
'multiattach': True,
'volume_id': 'd30559cf-f092-4693-8589-0d0a1e7d9b1f'}
fake_instance_1 = fake_instance.fake_instance_obj(
self.context,
host='fake-host-1')
mock_get_instances.return_value = (
['00803490-f768-4049-aa7d-151f54e6311e',
'70ab645f-6ffc-406a-b3d2-5007a0c01b82'])
drvr._disconnect_volume(
self.context, fake_connection_info, fake_instance_1)
mock_volume_driver.disconnect_volume.assert_not_called()
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString \
= self.fake_lookup
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
drvr.attach_volume, None,
{"driver_volume_type": "badtype"},
instance,
"/dev/sda")
def test_attach_blockio_invalid_hypervisor(self):
self.flags(virt_type='lxc', group='libvirt')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString \
= self.fake_lookup
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidHypervisorType,
drvr.attach_volume, None,
{"driver_volume_type": "fake",
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
instance,
"/dev/sda")
def _test_check_discard(self, mock_log, driver_discard=None,
bus=None, should_log=False):
mock_config = mock.Mock()
mock_config.driver_discard = driver_discard
mock_config.target_bus = bus
mock_instance = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._check_discard_for_attach_volume(mock_config, mock_instance)
self.assertEqual(should_log, mock_log.called)
@mock.patch('nova.virt.libvirt.driver.LOG.debug')
def test_check_discard_for_attach_volume_no_unmap(self, mock_log):
self._test_check_discard(mock_log, driver_discard=None,
bus='scsi', should_log=False)
@mock.patch('nova.virt.libvirt.driver.LOG.debug')
def test_check_discard_for_attach_volume_blk_controller(self, mock_log):
self._test_check_discard(mock_log, driver_discard='unmap',
bus='virtio', should_log=True)
@mock.patch('nova.virt.libvirt.driver.LOG.debug')
def test_check_discard_for_attach_volume_valid_controller(self, mock_log):
self._test_check_discard(mock_log, driver_discard='unmap',
bus='scsi', should_log=False)
@mock.patch('nova.virt.libvirt.driver.LOG.debug')
def test_check_discard_for_attach_volume_blk_controller_no_unmap(self,
mock_log):
self._test_check_discard(mock_log, driver_discard=None,
bus='virtio', should_log=False)
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
def test_attach_volume_with_libvirt_bug_breadcrumb(self, mock_get_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
bdm = {'device_name': 'vdb',
'disk_bus': 'fake-bus',
'device_type': 'fake-type'}
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
'dev': 'vdb'}
libvirt_exc = fakelibvirt.make_libvirtError(fakelibvirt.libvirtError,
"unable to execute QEMU command 'object-add': Incorrect number"
" of padding bytes (56) found on decrypted data",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
with test.nested(
mock.patch.object(drvr._host, 'get_guest'),
mock.patch('nova.virt.libvirt.driver.LOG'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config'),
mock.patch.object(drvr, '_check_discard_for_attach_volume'),
mock.patch.object(drvr, '_build_device_metadata'),
) as (mock_get_guest, mock_log, mock_connect_volume,
mock_get_volume_config, mock_check_discard, mock_build_metadata):
mock_conf = mock.MagicMock()
mock_guest = mock.MagicMock()
mock_guest.attach_device.side_effect = libvirt_exc
mock_get_volume_config.return_value = mock_conf
mock_get_guest.return_value = mock_guest
mock_get_info.return_value = disk_info
mock_build_metadata.return_value = objects.InstanceDeviceMetadata()
self.assertRaises(fakelibvirt.libvirtError, drvr.attach_volume,
self.context, connection_info, instance, "/dev/vdb",
disk_bus=bdm['disk_bus'], device_type=bdm['device_type'])
mock_log.warning.assert_called_once()
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
def test_attach_volume_with_vir_domain_affect_live_flag(self,
mock_get_domain, mock_get_info, get_image):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = {}
get_image.return_value = image_meta
mock_dom = mock.MagicMock()
mock_get_domain.return_value = mock_dom
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
bdm = {'device_name': 'vdb',
'disk_bus': 'fake-bus',
'device_type': 'fake-type'}
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
'dev': 'vdb'}
mock_get_info.return_value = disk_info
mock_conf = mock.MagicMock()
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
with test.nested(
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=mock_conf),
mock.patch.object(drvr, '_check_discard_for_attach_volume'),
mock.patch.object(drvr, '_build_device_metadata'),
mock.patch.object(objects.Instance, 'save')
) as (mock_connect_volume, mock_get_volume_config, mock_check_discard,
mock_build_metadata, mock_save):
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
mock_build_metadata.return_value = \
objects.InstanceDeviceMetadata()
drvr.attach_volume(self.context, connection_info, instance,
"/dev/vdb", disk_bus=bdm['disk_bus'],
device_type=bdm['device_type'])
mock_get_domain.assert_called_with(instance)
mock_get_info.assert_called_with(
instance,
CONF.libvirt.virt_type,
test.MatchType(objects.ImageMeta),
bdm)
mock_connect_volume.assert_called_with(
self.context, connection_info, instance, encryption=None)
mock_get_volume_config.assert_called_with(
connection_info, disk_info)
mock_dom.attachDeviceFlags.assert_called_with(
mock_conf.to_xml(), flags=flags)
mock_check_discard.assert_called_with(mock_conf, instance)
mock_build_metadata.assert_called_with(self.context, instance)
mock_save.assert_called_with()
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
def test_detach_volume_with_vir_domain_affect_live_flag(self,
mock_get_domain):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_xml_with_disk = """<domain>
<devices>
<disk type='file'>
<source file='/path/to/fake-volume'/>
<target dev='vdc' bus='virtio'/>
</disk>
</devices>
</domain>"""
mock_xml_without_disk = """<domain>
<devices>
</devices>
</domain>"""
mock_dom = mock.MagicMock()
# Second time don't return anything about disk vdc so it looks removed
return_list = [mock_xml_with_disk, mock_xml_without_disk,
mock_xml_without_disk]
# Doubling the size of return list because we test with two guest power
# states
mock_dom.XMLDesc.side_effect = return_list + return_list
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
with mock.patch.object(drvr, '_disconnect_volume') as \
mock_disconnect_volume:
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
mock_get_domain.return_value = mock_dom
drvr.detach_volume(
self.context, connection_info, instance, '/dev/vdc')
mock_get_domain.assert_called_with(instance)
mock_dom.detachDeviceFlags.assert_called_with(
"""<disk type="file" device="disk">
<source file="/path/to/fake-volume"/>
<target bus="virtio" dev="vdc"/>
</disk>
""", flags=flags)
mock_disconnect_volume.assert_called_with(
self.context, connection_info, instance, encryption=None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
def test_detach_volume_disk_not_found(self, mock_get_domain,
mock_disconnect_volume):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_xml_without_disk = """<domain>
<devices>
</devices>
</domain>"""
mock_dom = mock.MagicMock(return_value=mock_xml_without_disk)
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234,
5678]
mock_get_domain.return_value = mock_dom
drvr.detach_volume(
self.context, connection_info, instance, '/dev/vdc')
mock_get_domain.assert_called_once_with(instance)
mock_disconnect_volume.assert_called_once_with(
self.context, connection_info, instance, encryption=None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
def test_detach_volume_disk_not_found_encryption(self, mock_get_domain,
mock_disconnect_volume,
mock_get_encryptor):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_xml_without_disk = """<domain>
<devices>
</devices>
</domain>"""
mock_dom = mock.MagicMock(return_value=mock_xml_without_disk)
encryption = mock.MagicMock()
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234,
5678]
mock_get_domain.return_value = mock_dom
drvr.detach_volume(self.context, connection_info, instance,
'/dev/vdc', encryption)
mock_disconnect_volume.assert_called_once_with(
self.context, connection_info, instance, encryption=encryption)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_detach_volume_order_with_encryptors(self, mock_get_guest,
mock_get_encryptor, mock_get_volume_driver):
mock_volume_driver = mock.MagicMock(
spec=volume_drivers.LibvirtBaseVolumeDriver)
mock_get_volume_driver.return_value = mock_volume_driver
mock_guest = mock.MagicMock(spec=libvirt_guest.Guest)
mock_guest.get_power_state.return_value = power_state.RUNNING
mock_get_guest.return_value = mock_guest
mock_encryptor = mock.MagicMock(
spec=encryptors.nop.NoOpEncryptor)
mock_get_encryptor.return_value = mock_encryptor
mock_order = mock.Mock()
mock_order.attach_mock(mock_volume_driver.disconnect_volume,
'disconnect_volume')
mock_order.attach_mock(mock_guest.detach_device_with_retry(),
'detach_volume')
mock_order.attach_mock(mock_encryptor.detach_volume,
'detach_encryptor')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
encryption = {"provider": "NoOpEncryptor"}
drvr.detach_volume(
self.context, connection_info, instance, '/dev/vdc',
encryption=encryption)
mock_order.assert_has_calls([
mock.call.detach_volume(),
mock.call.detach_encryptor(**encryption),
mock.call.disconnect_volume(connection_info, instance)])
def test_extend_volume(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
connection_info = {
'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}
}
new_size_in_kb = 20 * 1024 * 1024
guest = mock.Mock(spec='nova.virt.libvirt.guest.Guest')
# block_device
block_device = mock.Mock(
spec='nova.virt.libvirt.guest.BlockDevice')
block_device.resize = mock.Mock()
guest.get_block_device = mock.Mock(return_value=block_device)
drvr._host.get_guest = mock.Mock(return_value=guest)
drvr._extend_volume = mock.Mock(return_value=new_size_in_kb)
for state in (power_state.RUNNING, power_state.PAUSED):
guest.get_power_state = mock.Mock(return_value=state)
drvr.extend_volume(connection_info, instance)
drvr._extend_volume.assert_called_with(connection_info,
instance)
guest.get_block_device.assert_called_with('/fake')
block_device.resize.assert_called_with(20480)
def test_extend_volume_with_volume_driver_without_support(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
with mock.patch.object(drvr, '_extend_volume',
side_effect=NotImplementedError()):
connection_info = {'driver_volume_type': 'fake'}
self.assertRaises(exception.ExtendVolumeNotSupported,
drvr.extend_volume,
connection_info, instance)
def test_extend_volume_disk_not_found(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
connection_info = {
'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}
}
new_size_in_kb = 20 * 1024 * 1024
xml_no_disk = "<domain><devices></devices></domain>"
dom = fakelibvirt.Domain(drvr._get_connection(), xml_no_disk, False)
guest = libvirt_guest.Guest(dom)
guest.get_power_state = mock.Mock(return_value=power_state.RUNNING)
drvr._host.get_guest = mock.Mock(return_value=guest)
drvr._extend_volume = mock.Mock(return_value=new_size_in_kb)
drvr.extend_volume(connection_info, instance)
def test_extend_volume_with_instance_not_found(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
with test.nested(
mock.patch.object(host.Host, '_get_domain',
side_effect=exception.InstanceNotFound(
instance_id=instance.uuid)),
mock.patch.object(drvr, '_extend_volume')
) as (_get_domain, _extend_volume):
connection_info = {'driver_volume_type': 'fake'}
self.assertRaises(exception.InstanceNotFound,
drvr.extend_volume,
connection_info, instance)
def test_extend_volume_with_libvirt_error(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
connection_info = {
'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}
}
new_size_in_kb = 20 * 1024 * 1024
guest = mock.Mock(spec='nova.virt.libvirt.guest.Guest')
guest.get_power_state = mock.Mock(return_value=power_state.RUNNING)
# block_device
block_device = mock.Mock(
spec='nova.virt.libvirt.guest.BlockDevice')
block_device.resize = mock.Mock(
side_effect=fakelibvirt.libvirtError('ERR'))
guest.get_block_device = mock.Mock(return_value=block_device)
drvr._host.get_guest = mock.Mock(return_value=guest)
drvr._extend_volume = mock.Mock(return_value=new_size_in_kb)
self.assertRaises(fakelibvirt.libvirtError,
drvr.extend_volume,
connection_info, instance)
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_use_encryptor_connection_info_incomplete(self,
mock_get_encryptor, mock_get_metadata):
"""Assert no attach attempt is made given incomplete connection_info.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'data': {}}
drvr._attach_encryptor(self.context, connection_info, None, False)
mock_get_metadata.assert_not_called()
mock_get_encryptor.assert_not_called()
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_attach_encryptor_unencrypted_volume_meta_missing(self,
mock_get_encryptor, mock_get_metadata):
"""Assert that if not provided encryption metadata is fetched even
if the volume is ultimately unencrypted and no attempt to attach
is made.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
encryption = {}
connection_info = {'data': {'volume_id': uuids.volume_id}}
mock_get_metadata.return_value = encryption
drvr._attach_encryptor(self.context, connection_info, None, False)
mock_get_metadata.assert_called_once_with(self.context,
drvr._volume_api, uuids.volume_id, connection_info)
mock_get_encryptor.assert_not_called()
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_attach_encryptor_unencrypted_volume_meta_provided(self,
mock_get_encryptor, mock_get_metadata):
"""Assert that if an empty encryption metadata dict is provided that
there is no additional attempt to lookup the metadata or attach the
encryptor.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
encryption = {}
connection_info = {'data': {'volume_id': uuids.volume_id}}
drvr._attach_encryptor(self.context, connection_info, encryption,
False)
mock_get_metadata.assert_not_called()
mock_get_encryptor.assert_not_called()
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_attach_encryptor_encrypted_volume_meta_missing(self,
mock_get_encryptor, mock_get_metadata):
"""Assert that if missing the encryption metadata of an encrypted
volume is fetched and then used to attach the encryptor for the volume.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_encryptor = mock.MagicMock()
mock_get_encryptor.return_value = mock_encryptor
encryption = {'provider': 'luks', 'control_location': 'front-end'}
mock_get_metadata.return_value = encryption
connection_info = {'data': {'volume_id': uuids.volume_id}}
drvr._attach_encryptor(self.context, connection_info, None, False)
mock_get_metadata.assert_called_once_with(self.context,
drvr._volume_api, uuids.volume_id, connection_info)
mock_get_encryptor.assert_called_once_with(connection_info,
encryption)
mock_encryptor.attach_volume.assert_called_once_with(self.context,
**encryption)
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_attach_encryptor_encrypted_volume_meta_provided(self,
mock_get_encryptor, mock_get_metadata):
"""Assert that when provided there are no further attempts to fetch the
encryption metadata for the volume and that the provided metadata is
then used to attach the volume.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_encryptor = mock.MagicMock()
mock_get_encryptor.return_value = mock_encryptor
encryption = {'provider': 'luks', 'control_location': 'front-end'}
connection_info = {'data': {'volume_id': uuids.volume_id}}
drvr._attach_encryptor(self.context, connection_info,
encryption, False)
mock_get_metadata.assert_not_called()
mock_get_encryptor.assert_called_once_with(connection_info,
encryption)
mock_encryptor.attach_volume.assert_called_once_with(self.context,
**encryption)
@mock.patch.object(key_manager, 'API')
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_attach_encryptor_encrypted_native_luks_serial(self,
mock_get_encryptor, mock_get_metadata, mock_get_key_mgr):
"""Uses native luks encryption with a provider encryptor and the
connection_info has a serial but not volume_id in the 'data'
sub-dict.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_encryptor = mock.MagicMock()
mock_get_encryptor.return_value = mock_encryptor
encryption = {'provider': 'luks', 'control_location': 'front-end',
'encryption_key_id': uuids.encryption_key_id}
connection_info = {'serial': uuids.serial, 'data': {}}
# Mock out the key manager
key = u'3734363537333734'
key_encoded = binascii.unhexlify(key)
mock_key = mock.Mock()
mock_key_mgr = mock.Mock()
mock_get_key_mgr.return_value = mock_key_mgr
mock_key_mgr.get.return_value = mock_key
mock_key.get_encoded.return_value = key_encoded
with mock.patch.object(drvr, '_use_native_luks', return_value=True):
with mock.patch.object(drvr._host, 'create_secret') as crt_scrt:
drvr._attach_encryptor(self.context, connection_info,
encryption, allow_native_luks=True)
mock_get_metadata.assert_not_called()
mock_get_encryptor.assert_not_called()
crt_scrt.assert_called_once_with(
'volume', uuids.serial, password=key)
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_detach_encryptor_connection_info_incomplete(self,
mock_get_encryptor, mock_get_metadata):
"""Assert no detach attempt is made given incomplete connection_info.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'data': {}}
drvr._detach_encryptor(self.context, connection_info, None)
mock_get_metadata.assert_not_called()
mock_get_encryptor.assert_not_called()
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_detach_encryptor_unencrypted_volume_meta_missing(self,
mock_get_encryptor, mock_get_metadata):
"""Assert that if not provided encryption metadata is fetched even
if the volume is ultimately unencrypted and no attempt to detach
is made.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
encryption = {}
connection_info = {'data': {'volume_id': uuids.volume_id}}
mock_get_metadata.return_value = encryption
drvr._detach_encryptor(self.context, connection_info, None)
mock_get_metadata.assert_called_once_with(self.context,
drvr._volume_api, uuids.volume_id, connection_info)
mock_get_encryptor.assert_not_called()
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_detach_encryptor_unencrypted_volume_meta_provided(self,
mock_get_encryptor, mock_get_metadata):
"""Assert that if an empty encryption metadata dict is provided that
there is no additional attempt to lookup the metadata or detach the
encryptor.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
encryption = {}
connection_info = {'data': {'volume_id': uuids.volume_id}}
drvr._detach_encryptor(self.context, connection_info, encryption)
mock_get_metadata.assert_not_called()
mock_get_encryptor.assert_not_called()
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_detach_encryptor_encrypted_volume_meta_missing(self,
mock_get_encryptor, mock_get_metadata):
"""Assert that if missing the encryption metadata of an encrypted
volume is fetched and then used to detach the encryptor for the volume.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_encryptor = mock.MagicMock()
mock_get_encryptor.return_value = mock_encryptor
encryption = {'provider': 'luks', 'control_location': 'front-end'}
mock_get_metadata.return_value = encryption
connection_info = {'data': {'volume_id': uuids.volume_id}}
drvr._detach_encryptor(self.context, connection_info, None)
mock_get_metadata.assert_called_once_with(self.context,
drvr._volume_api, uuids.volume_id, connection_info)
mock_get_encryptor.assert_called_once_with(connection_info,
encryption)
mock_encryptor.detach_volume.assert_called_once_with(**encryption)
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_detach_encryptor_encrypted_volume_meta_provided(self,
mock_get_encryptor, mock_get_metadata):
"""Assert that when provided there are no further attempts to fetch the
encryption metadata for the volume and that the provided metadata is
then used to detach the volume.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_encryptor = mock.MagicMock()
mock_get_encryptor.return_value = mock_encryptor
encryption = {'provider': 'luks', 'control_location': 'front-end'}
connection_info = {'data': {'volume_id': uuids.volume_id}}
drvr._detach_encryptor(self.context, connection_info, encryption)
mock_get_metadata.assert_not_called()
mock_get_encryptor.assert_called_once_with(connection_info,
encryption)
mock_encryptor.detach_volume.assert_called_once_with(**encryption)
@mock.patch.object(host.Host, "has_min_version")
def test_use_native_luks(self, mock_has_min_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# True only when the required QEMU and Libvirt versions are available
# on the host and a valid LUKS provider is present within the
# encryption metadata dict.
mock_has_min_version.return_value = True
self.assertFalse(drvr._use_native_luks({}))
self.assertFalse(drvr._use_native_luks({
'provider': 'nova.volume.encryptors.cryptsetup.CryptSetupEncryptor'
}))
self.assertFalse(drvr._use_native_luks({
'provider': 'CryptSetupEncryptor'}))
self.assertFalse(drvr._use_native_luks({
'provider': encryptors.PLAIN}))
self.assertTrue(drvr._use_native_luks({
'provider': 'nova.volume.encryptors.luks.LuksEncryptor'}))
self.assertTrue(drvr._use_native_luks({
'provider': 'LuksEncryptor'}))
self.assertTrue(drvr._use_native_luks({
'provider': encryptors.LUKS}))
# Always False when the required QEMU and Libvirt versions are not
# available on the host.
mock_has_min_version.return_value = False
self.assertFalse(drvr._use_native_luks({}))
self.assertFalse(drvr._use_native_luks({
'provider': 'nova.volume.encryptors.cryptsetup.CryptSetupEncryptor'
}))
self.assertFalse(drvr._use_native_luks({
'provider': 'CryptSetupEncryptor'}))
self.assertFalse(drvr._use_native_luks({
'provider': encryptors.PLAIN}))
self.assertFalse(drvr._use_native_luks({
'provider': 'nova.volume.encryptors.luks.LuksEncryptor'}))
self.assertFalse(drvr._use_native_luks({
'provider': 'LuksEncryptor'}))
self.assertFalse(drvr._use_native_luks({
'provider': encryptors.LUKS}))
def test_multi_nic(self):
network_info = _fake_network_info(self, 2)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEqual(len(interfaces), 2)
self.assertEqual(interfaces[0].get('type'), 'bridge')
def _check_xml_and_container(self, instance):
instance_ref = objects.Instance(**instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), 'lxc:///')
network_info = _fake_network_info(self, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
check = [
(lambda t: t.find('.').get('type'), 'lxc'),
(lambda t: t.find('./os/type').text, 'exe'),
(lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
for i, (check, expected_result) in enumerate(check):
self.assertEqual(check(tree),
expected_result,
'%s failed common check %d' % (xml, i))
target = tree.find('./devices/filesystem/source').get('dir')
self.assertGreater(len(target), 0)
def _check_xml_and_disk_prefix(self, instance, prefix):
instance_ref = objects.Instance(**instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
def _get_prefix(p, default):
if p:
return p + 'a'
return default
type_disk_map = {
'qemu': [
(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'xen': [
(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'xvda'))],
'kvm': [
(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'uml': [
(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'ubda'))]
}
for (virt_type, checks) in type_disk_map.items():
self.flags(virt_type=virt_type, group='libvirt')
if prefix:
self.flags(disk_prefix=prefix, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
def _check_xml_and_disk_driver(self, image_meta):
os_open = os.open
directio_supported = True
def os_open_stub(path, flags, *args, **kwargs):
if flags & os.O_DIRECT:
if not directio_supported:
raise OSError(errno.EINVAL,
'%s: %s' % (os.strerror(errno.EINVAL), path))
flags &= ~os.O_DIRECT
return os_open(path, flags, *args, **kwargs)
self.stub_out('os.open', os_open_stub)
def connection_supports_direct_io_stub(dirpath):
return directio_supported
self.stub_out('nova.privsep.utils.supports_direct_io',
connection_supports_direct_io_stub)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
network_info = _fake_network_info(self, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for guest_disk in disks:
self.assertEqual(guest_disk.get("cache"), "none")
directio_supported = False
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for guest_disk in disks:
self.assertEqual(guest_disk.get("cache"), "writethrough")
def _check_xml_and_disk_bus(self, image_meta,
block_device_info, wantConfig):
instance_ref = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
block_device_info)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta,
block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
got_disk_targets = tree.findall('./devices/disk/target')
for i in range(len(wantConfig)):
want_device_type = wantConfig[i][0]
want_device_bus = wantConfig[i][1]
want_device_dev = wantConfig[i][2]
got_device_type = got_disks[i].get('device')
got_device_bus = got_disk_targets[i].get('bus')
got_device_dev = got_disk_targets[i].get('dev')
self.assertEqual(got_device_type, want_device_type)
self.assertEqual(got_device_bus, want_device_bus)
self.assertEqual(got_device_dev, want_device_dev)
def _check_xml_and_uuid(self, image_meta):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
network_info = _fake_network_info(self, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware",)
def _check_xml_and_uri(self, instance, mock_serial,
expect_ramdisk=False, expect_kernel=False,
rescue=None, expect_xen_hvm=False, xen_only=False):
mock_serial.return_value = "cef19ce0-0ca2-11df-855d-b19fbce37686"
instance_ref = objects.Instance(**instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
xen_vm_mode = fields.VMMode.XEN
if expect_xen_hvm:
xen_vm_mode = fields.VMMode.HVM
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text,
fields.VMMode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text,
fields.VMMode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text,
fields.VMMode.UML)]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text,
xen_vm_mode)])}
if expect_xen_hvm or xen_only:
hypervisors_to_check = ['xen']
else:
hypervisors_to_check = ['qemu', 'kvm', 'xen']
for hypervisor_type in hypervisors_to_check:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
suffix = '.rescue'
else:
suffix = ''
if expect_kernel:
check = (lambda t: self.relpath(t.find('./os/kernel').text).
split('/')[1], 'kernel' + suffix)
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
if expect_kernel:
check = (lambda t: "no_timer_check" in t.find('./os/cmdline').
text, hypervisor_type == "qemu")
check_list.append(check)
# Hypervisors that only support vm_mode.HVM should not produce
# configuration that results in kernel arguments
if not expect_kernel and (hypervisor_type in
['qemu', 'kvm']):
check = (lambda t: t.find('./os/root'), None)
check_list.append(check)
check = (lambda t: t.find('./os/cmdline'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: self.relpath(t.find('./os/initrd').text).
split('/')[1], 'ramdisk' + suffix)
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
xpath = "./sysinfo/system/entry"
check = (lambda t: t.findall(xpath)[0].get("name"),
"manufacturer")
check_list.append(check)
check = (lambda t: t.findall(xpath)[0].text,
version.vendor_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].get("name"),
"product")
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].text,
version.product_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[2].get("name"),
"version")
check_list.append(check)
# NOTE(sirp): empty strings don't roundtrip in lxml (they are
# converted to None), so we need an `or ''` to correct for that
check = (lambda t: t.findall(xpath)[2].text or '',
version.version_string_with_package())
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].get("name"),
"serial")
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].text,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].get("name"),
"uuid")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].text,
instance['uuid'])
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.findall('./devices/serial')[0].get(
'type'), 'file')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial')[1].get(
'type'), 'pty')
check_list.append(check)
check = (lambda t: self.relpath(t.findall(
'./devices/serial/source')[0].get('path')).
split('/')[1], 'console.log')
check_list.append(check)
else:
check = (lambda t: t.find('./devices/console').get(
'type'), 'pty')
check_list.append(check)
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: self.relpath(t.findall('./devices/disk/source')[0].
get('file')).split('/')[1], 'disk.rescue'),
(lambda t: self.relpath(t.findall('./devices/disk/source')[1].
get('file')).split('/')[1], 'disk')]
else:
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[0].get('file')).split('/')[1],
'disk')]
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[1].get('file')).split('/')[1],
'disk.local')]
for virt_type in hypervisors_to_check:
expected_uri = type_uri_map[virt_type][0]
checks = type_uri_map[virt_type][1]
self.flags(virt_type=virt_type, group='libvirt')
with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), expected_uri)
network_info = _fake_network_info(self, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
rescue=rescue)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta,
rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed common check %d' %
(check(tree), expected_result, i))
filterref = './devices/interface/filterref'
vif = network_info[0]
nic_id = vif['address'].lower().replace(':', '')
fw = firewall.NWFilterFirewall(drvr)
instance_filter_name = fw._instance_filter_name(instance_ref,
nic_id)
self.assertEqual(tree.find(filterref).get('filter'),
instance_filter_name)
# This test is supposed to make sure we don't
# override a specifically set uri
#
# Deliberately not just assigning this string to CONF.connection_uri
# and checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the CONF.
testuri = 'something completely different'
self.flags(connection_uri=testuri, group='libvirt')
for virt_type in type_uri_map:
self.flags(virt_type=virt_type, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), testuri)
def test_ensure_filtering_rules_for_instance_timeout(self):
# ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
fake_timer = FakeTime()
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self, 1)
self.create_fake_libvirt_mock()
instance_ref = objects.Instance(**self.test_instance)
# Start test
try:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stub_out('nova.virt.libvirt.firewall.IptablesFirewallDriver.'
'setup_basic_filtering', lambda *a: None)
self.stub_out('nova.virt.libvirt.firewall.IptablesFirewallDriver.'
'prepare_instance_filter', lambda *a: None)
self.stub_out('nova.virt.libvirt.firewall.IptablesFirewallDriver.'
'instance_filter_exists', lambda *a: None)
self.stub_out('eventlet.greenthread.sleep',
lambda t: fake_timer.sleep(t))
drvr.ensure_filtering_rules_for_instance(instance_ref,
network_info)
except exception.NovaException as e:
msg = ('The firewall filter for %s does not exist' %
instance_ref['name'])
c1 = (0 <= six.text_type(e).find(msg))
self.assertTrue(c1)
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
"amount of time")
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
self, mock_cpu, mock_test_file, mock_svc):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, True)
return_value.is_volume_backed = False
self.assertThat({"filename": "file",
'image_type': 'default',
'disk_available_mb': 409600,
"disk_over_commit": False,
"block_migration": True,
"is_volume_backed": False,
"dst_wants_file_backed_memory": False,
"file_backed_memory_discard": False},
matchers.DictMatches(return_value.to_legacy_dict()))
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_with_over_commit(
self, mock_cpu, mock_test_file, mock_svc):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': -1000,
'free_disk_gb': 50,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, True, True)
return_value.is_volume_backed = False
self.assertThat({"filename": "file",
'image_type': 'default',
'disk_available_mb': 51200,
"disk_over_commit": True,
"block_migration": True,
"is_volume_backed": False,
"dst_wants_file_backed_memory": False,
"file_backed_memory_discard": False},
matchers.DictMatches(return_value.to_legacy_dict()))
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
self, mock_cpu, mock_test_file, mock_svc):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, False)
return_value.is_volume_backed = False
self.assertThat({"filename": "file",
"image_type": 'default',
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 409600,
"is_volume_backed": False,
"dst_wants_file_backed_memory": False,
"file_backed_memory_discard": False},
matchers.DictMatches(return_value.to_legacy_dict()))
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_fills_listen_addrs(
self, mock_cpu, mock_test_file, mock_svc):
# Tests that check_can_live_migrate_destination returns the listen
# addresses required by check_can_live_migrate_source.
self.flags(server_listen='192.0.2.12', group='vnc')
self.flags(server_listen='198.51.100.34', group='spice')
self.flags(proxyclient_address='203.0.113.56', group='serial_console')
self.flags(enabled=True, group='serial_console')
mock_cpu.return_value = 1
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
result = drvr.check_can_live_migrate_destination(
self.context, instance_ref, compute_info, compute_info)
self.assertEqual('192.0.2.12',
str(result.graphics_listen_addr_vnc))
self.assertEqual('198.51.100.34',
str(result.graphics_listen_addr_spice))
self.assertEqual('203.0.113.56',
str(result.serial_listen_addr))
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU',
return_value=1)
def test_check_can_live_migrate_dest_ensure_serial_adds_not_set(
self, mock_cpu, mock_test_file, mock_svc):
self.flags(proxyclient_address='127.0.0.1', group='serial_console')
self.flags(enabled=False, group='serial_console')
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
result = drvr.check_can_live_migrate_destination(
self.context, instance_ref, compute_info, compute_info)
self.assertIsNone(result.serial_listen_addr)
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu')
def test_check_can_live_migrate_guest_cpu_none_model(
self, mock_cpu, mock_test_file):
# Tests that when instance.vcpu_model.model is None, the host cpu
# model is used for live migration.
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
instance_ref.vcpu_model.model = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
result = drvr.check_can_live_migrate_destination(
self.context, instance_ref, compute_info, compute_info)
result.is_volume_backed = False
mock_cpu.assert_called_once_with(None, 'asdf', instance_ref)
expected_result = {"filename": 'fake',
"image_type": CONF.libvirt.images_type,
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": False,
"dst_wants_file_backed_memory": False,
"file_backed_memory_discard": False}
self.assertEqual(expected_result, result.to_legacy_dict())
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_no_instance_cpu_info(
self, mock_cpu, mock_test_file, mock_svc):
instance_ref = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': jsonutils.dumps({
"vendor": "AMD",
"arch": fields.Architecture.I686,
"features": ["sse3"],
"model": "Opteron_G3",
"topology": {"cores": 2, "threads": 1, "sockets": 4}
}), 'disk_available_least': 1}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, False)
# NOTE(danms): Compute manager would have set this, so set it here
return_value.is_volume_backed = False
self.assertThat({"filename": "file",
"image_type": 'default',
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": False,
"dst_wants_file_backed_memory": False,
"file_backed_memory_discard": False},
matchers.DictMatches(return_value.to_legacy_dict()))
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_file_backed(
self, mock_cpu, mock_test_file, mock_svc):
self.flags(file_backed_memory=1024, group='libvirt')
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
svc = objects.Service()
svc.version = 32
mock_svc.return_value = svc
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, False)
self.assertTrue(return_value.dst_wants_file_backed_memory)
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def _test_check_can_live_migrate_dest_file_backed_discard(
self, libvirt_version, qemu_version, mock_cpu, mock_test_file,
mock_svc, mock_lib_version, mock_version):
self.flags(file_backed_memory=1024, group='libvirt')
mock_lib_version.return_value = libvirt_version
mock_version.return_value = qemu_version
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
svc = objects.Service()
svc.version = 32
mock_svc.return_value = svc
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, False)
return return_value
def test_check_can_live_migrate_dest_file_backed_discard(self):
libvirt_version = versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION)
qemu_version = versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION)
data = self._test_check_can_live_migrate_dest_file_backed_discard(
libvirt_version, qemu_version)
self.assertTrue(data.dst_wants_file_backed_memory)
self.assertTrue(data.file_backed_memory_discard)
def test_check_can_live_migrate_dest_file_backed_discard_bad_libvirt(self):
libvirt_version = versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION) - 1
qemu_version = versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION)
data = self._test_check_can_live_migrate_dest_file_backed_discard(
libvirt_version, qemu_version)
self.assertTrue(data.dst_wants_file_backed_memory)
self.assertFalse(data.file_backed_memory_discard)
def test_check_can_live_migrate_dest_file_backed_discard_bad_qemu(self):
libvirt_version = versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION)
qemu_version = versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION) - 1
data = self._test_check_can_live_migrate_dest_file_backed_discard(
libvirt_version, qemu_version)
self.assertTrue(data.dst_wants_file_backed_memory)
self.assertFalse(data.file_backed_memory_discard)
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
self, mock_cpu, mock_svc):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
svc = objects.Service(host="old")
svc.version = 32
mock_svc.return_value = svc
mock_cpu.side_effect = exception.InvalidCPUInfo(reason='foo')
self.assertRaises(exception.InvalidCPUInfo,
drvr.check_can_live_migrate_destination,
self.context, instance_ref,
compute_info, compute_info, False)
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
@mock.patch('nova.objects.Service.version', 30)
def test_check_can_live_migrate_dest_incompatible_file_backed(
self, mock_cpu, mock_svc):
self.flags(file_backed_memory=1024, group='libvirt')
instance_ref = objects.Instance(**self.test_instance)
# _check_cpu_match
mock_cpu.return_value = 1
svc = objects.Service(host="old")
svc.version = 31
mock_svc.return_value = svc
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
self.assertRaises(exception.MigrationPreCheckError,
drvr.check_can_live_migrate_destination,
self.context, instance_ref,
compute_info, compute_info, False)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
instance = objects.Instance(**self.test_instance)
mock_compare.return_value = 5
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info),
instance)
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_handles_not_supported_error_gracefully(self,
mock_vconfig,
mock_compare):
instance = objects.Instance(**self.test_instance)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virCompareCPU',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_compare.side_effect = not_supported_exc
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info),
instance)
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
'_vcpu_model_to_cpu_config')
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
mock_compare):
instance = objects.Instance(**self.test_instance)
mock_compare.return_value = 6
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(jsonutils.dumps(_fake_cpu_info), None,
instance)
self.assertIsNone(ret)
def test_compare_cpu_virt_type_xen(self):
instance = objects.Instance(**self.test_instance)
self.flags(virt_type='xen', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, None, instance)
self.assertIsNone(ret)
def test_compare_cpu_virt_type_qemu(self):
instance = objects.Instance(**self.test_instance)
self.flags(virt_type='qemu', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, None, instance)
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
mock_compare):
instance = objects.Instance(**self.test_instance)
mock_compare.return_value = 0
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidCPUInfo,
conn._compare_cpu, None,
jsonutils.dumps(_fake_cpu_info),
instance)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
mock_compare):
instance = objects.Instance(**self.test_instance)
mock_compare.side_effect = fakelibvirt.libvirtError('cpu')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationPreCheckError,
conn._compare_cpu, None,
jsonutils.dumps(_fake_cpu_info),
instance)
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_cleanup_shared_storage_test_file')
def test_check_can_live_migrate_dest_cleanup_works_correctly(
self, mock_clean):
objects.Instance(**self.test_instance)
dest_check_data = objects.LibvirtLiveMigrateData(
filename="file",
block_migration=True,
disk_over_commit=False,
disk_available_mb=1024)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.cleanup_live_migration_destination_check(self.context,
dest_check_data)
mock_clean.assert_called_once_with('file')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.utime')
def test_check_shared_storage_test_file_exists(self, mock_utime,
mock_path_exists):
tmpfile_path = os.path.join(CONF.instances_path, 'tmp123')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr._check_shared_storage_test_file(
'tmp123', mock.sentinel.instance))
mock_utime.assert_called_once_with(CONF.instances_path, None)
mock_path_exists.assert_called_once_with(tmpfile_path)
@mock.patch('os.path.exists', return_value=False)
@mock.patch('os.utime')
def test_check_shared_storage_test_file_does_not_exist(self, mock_utime,
mock_path_exists):
tmpfile_path = os.path.join(CONF.instances_path, 'tmp123')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._check_shared_storage_test_file(
'tmp123', mock.sentinel.instance))
mock_utime.assert_called_once_with(CONF.instances_path, None)
mock_path_exists.assert_called_once_with(tmpfile_path)
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_check_shared_storage_test_file')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_is_shared_block_storage')
def _test_can_live_migrate_source(self, mock_is_shared, mock_check_shared,
block_migration=False,
is_shared_block_storage=False,
is_shared_instance_path=False,
disk_available_mb=1024,
exception=None):
instance = objects.Instance(**self.test_instance)
dest_check_data = objects.LibvirtLiveMigrateData(
filename='file',
image_type='default',
block_migration=block_migration,
disk_over_commit=False,
disk_available_mb=disk_available_mb)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_is_shared.return_value = is_shared_block_storage
mock_check_shared.return_value = is_shared_instance_path
if exception:
self.assertRaises(exception, drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
else:
ret = drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
mock_is_shared.assert_called_once_with(instance, dest_check_data, None)
mock_check_shared.assert_called_once_with('file', instance)
if exception:
return (instance, dest_check_data)
if block_migration:
self.assertIsInstance(ret, objects.LibvirtLiveMigrateData)
self.assertIn('is_shared_block_storage', ret)
self.assertFalse(ret.is_shared_block_storage)
self.assertIn('is_shared_instance_path', ret)
self.assertFalse(ret.is_shared_instance_path)
if is_shared_block_storage:
self.assertTrue(ret.is_shared_block_storage)
if is_shared_instance_path:
self.assertTrue(ret.is_shared_instance_path)
return (instance, dest_check_data)
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_assert_dest_node_has_enough_disk')
def test_check_can_live_migrate_source_block_migration(
self, mock_assert_dest):
instance, dest_check_data = self._test_can_live_migrate_source(
block_migration=True)
mock_assert_dest.assert_called_once_with(
self.context, instance, dest_check_data.disk_available_mb,
False, None)
def test_check_can_live_migrate_source_shared_block_storage(self):
self._test_can_live_migrate_source(is_shared_block_storage=True)
def test_check_can_live_migrate_source_shared_instance_path(self):
self._test_can_live_migrate_source(is_shared_instance_path=True)
def test_check_can_live_migrate_source_non_shared_fails(self):
self._test_can_live_migrate_source(
exception=exception.InvalidSharedStorage)
def test_check_can_live_migrate_source_shared_block_migration_fails(self):
self._test_can_live_migrate_source(
block_migration=True, is_shared_block_storage=True,
exception=exception.InvalidLocalStorage)
def test_check_can_live_migrate_shared_path_block_migration_fails(self):
self._test_can_live_migrate_source(
block_migration=True, is_shared_instance_path=True,
exception=exception.InvalidLocalStorage)
def test_check_can_live_migrate_non_shared_non_block_migration_fails(self):
self._test_can_live_migrate_source(
exception=exception.InvalidSharedStorage)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_get_instance_disk_info')
def test_check_can_live_migrate_source_with_dest_not_enough_disk(
self, mock_get_bdi):
mock_get_bdi.return_value = [{"virt_disk_size": 2}]
instance, _ = self._test_can_live_migrate_source(
block_migration=True, disk_available_mb=0,
exception=exception.MigrationError)
mock_get_bdi.assert_called_once_with(instance, None)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage', return_value=False)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file', return_value=False)
def test_check_can_live_migrate_source_bm_with_bdm_tunnelled_error(
self, mock_check, mock_shared_block, mock_enough,
mock_min_version):
self.flags(live_migration_tunnelled=True,
group='libvirt')
bdi = {'block_device_mapping': ['bdm']}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dest_check_data = objects.LibvirtLiveMigrateData(
filename='file',
image_type='default',
block_migration=True,
disk_over_commit=False,
disk_available_mb=100)
drvr._parse_migration_flags()
self.assertRaises(exception.MigrationPreCheckError,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data,
block_device_info=bdi)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file')
def _test_check_can_live_migrate_source_block_migration_none(
self, block_migrate, is_shared_instance_path, is_share_block,
mock_check, mock_shared_block, mock_enough, mock_verson):
mock_check.return_value = is_shared_instance_path
mock_shared_block.return_value = is_share_block
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dest_check_data = objects.LibvirtLiveMigrateData(
filename='file',
image_type='default',
disk_over_commit=False,
disk_available_mb=100)
dest_check_data_ret = drvr.check_can_live_migrate_source(
self.context, instance, dest_check_data)
self.assertEqual(block_migrate, dest_check_data_ret.block_migration)
def test_check_can_live_migrate_source_block_migration_none_shared1(self):
self._test_check_can_live_migrate_source_block_migration_none(
False,
True,
False)
def test_check_can_live_migrate_source_block_migration_none_shared2(self):
self._test_check_can_live_migrate_source_block_migration_none(
False,
False,
True)
def test_check_can_live_migrate_source_block_migration_none_no_share(self):
self._test_check_can_live_migrate_source_block_migration_none(
True,
False,
False)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file')
def test_check_can_live_migration_source_disk_over_commit_none(self,
mock_check, mock_shared_block, mock_enough, mock_disk_check):
mock_check.return_value = False
mock_shared_block.return_value = False
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dest_check_data = objects.LibvirtLiveMigrateData(
filename='file',
image_type='default',
disk_available_mb=100)
drvr.check_can_live_migrate_source(
self.context, instance, dest_check_data)
self.assertFalse(mock_disk_check.called)
def _is_shared_block_storage_test_create_mocks(self, disks):
# Test data
instance_xml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>{}</devices></domain>")
disks_xml = ''
for dsk in disks:
if dsk['type'] is not 'network':
disks_xml = ''.join([disks_xml,
"<disk type='{type}'>"
"<driver name='qemu' type='{driver}'/>"
"<source {source}='{source_path}'/>"
"<target dev='{target_dev}' bus='virtio'/>"
"</disk>".format(**dsk)])
else:
disks_xml = ''.join([disks_xml,
"<disk type='{type}'>"
"<driver name='qemu' type='{driver}'/>"
"<source protocol='{source_proto}'"
"name='{source_image}' >"
"<host name='hostname' port='7000'/>"
"<config file='/path/to/file'/>"
"</source>"
"<target dev='{target_dev}'"
"bus='ide'/>".format(**dsk)])
# Preparing mocks
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.XMLDesc = mock.Mock()
mock_virDomain.XMLDesc.return_value = (instance_xml.format(disks_xml))
mock_lookup = mock.Mock()
def mock_lookup_side_effect(name):
return mock_virDomain
mock_lookup.side_effect = mock_lookup_side_effect
mock_qemu_img_info = mock.Mock(disk_size=10737418240,
virtual_size=10737418240)
return (mock_qemu_img_info, mock_lookup)
def test_is_shared_block_storage_rbd(self):
self.flags(images_type='rbd', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_instance_disk_info = mock.Mock()
data = objects.LibvirtLiveMigrateData(image_type='rbd')
with mock.patch.object(drvr, '_get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr._is_shared_block_storage(instance, data,
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
self.assertTrue(drvr._is_storage_shared_with('foo', 'bar'))
def test_is_shared_block_storage_lvm(self):
self.flags(images_type='lvm', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
data = objects.LibvirtLiveMigrateData(image_type='lvm',
is_volume_backed=False,
is_shared_instance_path=False)
with mock.patch.object(drvr, '_get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, data,
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_qcow2(self):
self.flags(images_type='qcow2', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
data = objects.LibvirtLiveMigrateData(image_type='qcow2',
is_volume_backed=False,
is_shared_instance_path=False)
with mock.patch.object(drvr, '_get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, data,
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_rbd_only_source(self):
self.flags(images_type='rbd', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
data = objects.LibvirtLiveMigrateData(is_shared_instance_path=False,
is_volume_backed=False)
with mock.patch.object(drvr, '_get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, data,
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_rbd_only_dest(self):
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
data = objects.LibvirtLiveMigrateData(image_type='rbd',
is_volume_backed=False,
is_shared_instance_path=False)
with mock.patch.object(drvr, '_get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, data,
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_volume_backed(self):
disks = [{'type': 'block',
'driver': 'raw',
'source': 'dev',
'source_path': '/dev/disk',
'target_dev': 'vda'}]
bdi = {'block_device_mapping': [
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
(mock_qemu_img_info, mock_lookup) =\
self._is_shared_block_storage_test_create_mocks(disks)
data = objects.LibvirtLiveMigrateData(is_volume_backed=True,
is_shared_instance_path=False)
with mock.patch.object(host.Host, '_get_domain', mock_lookup):
self.assertTrue(drvr._is_shared_block_storage(instance, data,
block_device_info = bdi))
mock_lookup.assert_called_once_with(instance)
def test_is_shared_block_storage_volume_backed_with_disk(self):
disks = [{'type': 'block',
'driver': 'raw',
'source': 'dev',
'source_path': '/dev/disk',
'target_dev': 'vda'},
{'type': 'file',
'driver': 'raw',
'source': 'file',
'source_path': '/instance/disk.local',
'target_dev': 'vdb'}]
bdi = {'block_device_mapping': [
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
(mock_qemu_img_info, mock_lookup) =\
self._is_shared_block_storage_test_create_mocks(disks)
data = objects.LibvirtLiveMigrateData(is_volume_backed=True,
is_shared_instance_path=False)
with test.nested(
mock.patch.object(libvirt_driver.disk_api,
'get_disk_info', mock_qemu_img_info),
mock.patch.object(host.Host, '_get_domain', mock_lookup)):
self.assertFalse(drvr._is_shared_block_storage(
instance, data,
block_device_info = bdi))
mock_qemu_img_info.assert_called_once_with('/instance/disk.local')
mock_lookup.assert_called_once_with(instance)
def test_is_shared_block_storage_nfs(self):
bdi = {'block_device_mapping': []}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_backend = mock.MagicMock()
mock_image_backend.backend.return_value = mock_backend
mock_backend.is_file_in_instance_path.return_value = True
mock_get_instance_disk_info = mock.Mock()
data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=True,
image_type='foo')
with mock.patch.object(drvr, '_get_instance_disk_info',
mock_get_instance_disk_info):
self.assertTrue(drvr._is_shared_block_storage(
'instance', data, block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
def test_live_migration_update_graphics_xml(self, mock_xml,
mock_migrateToURI3,
mock_min_version):
self.compute = manager.ComputeManager()
instance_ref = self.test_instance
target_connection = '127.0.0.2'
xml_tmpl = ("<domain type='kvm'>"
"<devices>"
"<graphics type='vnc' listen='{vnc}'>"
"<listen address='{vnc}'/>"
"</graphics>"
"<graphics type='spice' listen='{spice}'>"
"<listen address='{spice}'/>"
"</graphics>"
"</devices>"
"</domain>")
initial_xml = xml_tmpl.format(vnc='1.2.3.4',
spice='5.6.7.8')
target_xml = xml_tmpl.format(vnc='10.0.0.1',
spice='10.0.0.2')
target_xml = etree.tostring(etree.fromstring(target_xml),
encoding='unicode')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Preparing mocks
mock_xml.return_value = initial_xml
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
disk_paths = ['vda', 'vdb']
_bandwidth = CONF.libvirt.live_migration_bandwidth
params = {
'migrate_uri': 'tcp://127.0.0.2',
'migrate_disks': disk_paths,
'bandwidth': _bandwidth,
'destination_xml': target_xml,
}
# start test
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
serial_listen_addr='127.0.0.1',
target_connect_addr=target_connection,
bdms=[],
block_migration=False)
dom = fakelibvirt.virDomain
guest = libvirt_guest.Guest(dom)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, target_connection,
False, migrate_data, guest, disk_paths)
mock_xml.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=0)
def test_live_migration_parallels_no_new_xml(self):
self.flags(virt_type='parallels', group='libvirt')
self.flags(enabled=False, group='vnc')
target_connection = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance = objects.Instance(**instance_dict)
params = {
'bandwidth': CONF.libvirt.live_migration_bandwidth,
}
migrate_data = objects.LibvirtLiveMigrateData(
target_connect_addr=target_connection,
block_migration=False)
dom_mock = mock.MagicMock()
guest = libvirt_guest.Guest(dom_mock)
drvr._live_migration_operation(self.context, instance,
target_connection, False,
migrate_data, guest, None)
dom_mock.migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=0)
@mock.patch.object(utils, 'spawn')
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(fakelibvirt.Connection, '_mark_running')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_live_migration_monitor')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_live_migration_copy_disk_paths')
def test_live_migration_parallels_no_migrate_disks(self,
mock_copy_disk_paths,
mock_monitor,
mock_running,
mock_guest,
mock_thread):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance = objects.Instance(**instance_dict)
migrate_data = objects.LibvirtLiveMigrateData(
block_migration=True)
dom = fakelibvirt.Domain(drvr._get_connection(), '<domain/>', True)
guest = libvirt_guest.Guest(dom)
mock_guest.return_value = guest
drvr._live_migration(self.context, instance, 'dest',
lambda: None, lambda: None, True,
migrate_data)
self.assertFalse(mock_copy_disk_paths.called)
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, 'dest', True,
migrate_data, guest, [])
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@mock.patch.object(nova.virt.libvirt.migration,
'get_updated_guest_xml', return_value='')
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
def test_live_migration_update_volume_xml(self, mock_xml,
mock_updated_guest_xml,
mock_migrateToURI3):
self.compute = manager.ComputeManager()
instance_ref = self.test_instance
target_connection = '127.0.0.2'
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
# Prepare mocks
mock_xml.return_value = target_xml
disk_paths = ['vda', 'vdb']
params = {
'migrate_disks': disk_paths,
'migrate_uri': 'tcp://127.0.0.2',
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': target_xml
}
# Start test
connection_info = {
u'driver_volume_type': u'iscsi',
u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {
u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
},
}
bdm = objects.LibvirtLiveMigrateBDMInfo(
serial='58a84f6d-3f0c-4e19-a0af-eb657b790657',
bus='virtio', type='disk', dev='vdb',
connection_info=connection_info)
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='',
target_connect_addr=target_connection,
bdms=[bdm],
block_migration=False)
dom = fakelibvirt.virDomain
guest = libvirt_guest.Guest(dom)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_updated_guest_xml.return_value = target_xml
drvr._live_migration_operation(
self.context, instance_ref, target_connection,
False, migrate_data, guest, disk_paths)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=0)
mock_updated_guest_xml.assert_called_once_with(
guest, migrate_data, mock.ANY, get_vif_config=None)
def test_live_migration_update_vifs_xml(self):
"""Tests that when migrate_data.vifs is populated, the destination
guest xml is updated with the migrate_data.vifs configuration.
"""
instance = objects.Instance(**self.test_instance)
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='',
target_connect_addr=None,
bdms=[],
block_migration=False,
vifs=[objects.VIFMigrateData(port_id=uuids.port_id)])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest(mock.MagicMock())
fake_xml = '<domain type="qemu"/>'
def fake_get_updated_guest_xml(guest, migrate_data, get_volume_config,
get_vif_config=None):
self.assertIsNotNone(get_vif_config)
return fake_xml
@mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml',
side_effect=fake_get_updated_guest_xml)
@mock.patch.object(drvr._host, 'has_min_version', return_value=True)
@mock.patch.object(guest, 'migrate')
def _test(migrate, has_min_version, get_updated_guest_xml):
drvr._live_migration_operation(
self.context, instance, 'dest.host', False,
migrate_data, guest, [])
self.assertEqual(1, get_updated_guest_xml.call_count)
migrate.assert_called()
_test()
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@mock.patch.object(nova.virt.libvirt.migration,
'get_updated_guest_xml', return_value='')
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
def test_live_migration_with_valid_target_connect_addr(self, mock_xml,
mock_updated_guest_xml,
mock_migrateToURI3,
mock_min_version):
self.compute = manager.ComputeManager()
instance_ref = self.test_instance
target_connection = '127.0.0.2'
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
# Prepare mocks
mock_xml.return_value = target_xml
disk_paths = ['vda', 'vdb']
params = {
'migrate_disks': disk_paths,
'migrate_uri': 'tcp://127.0.0.2',
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': target_xml,
}
# start test
connection_info = {
u'driver_volume_type': u'iscsi',
u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {
u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
},
}
bdm = objects.LibvirtLiveMigrateBDMInfo(
serial='58a84f6d-3f0c-4e19-a0af-eb657b790657',
bus='virtio', type='disk', dev='vdb',
connection_info=connection_info)
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='',
target_connect_addr=target_connection,
bdms=[bdm],
block_migration=False)
dom = fakelibvirt.virDomain
guest = libvirt_guest.Guest(dom)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_updated_guest_xml.return_value = target_xml
drvr._live_migration_operation(self.context, instance_ref,
target_connection, False, migrate_data,
guest, disk_paths)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=0)
def test_update_volume_xml(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
initial_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
target_xml = etree.tostring(etree.fromstring(target_xml),
encoding='unicode')
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial,
bus='virtio',
type='disk',
dev='vdb')
bdmi.connection_info = {u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = bdmi.type
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = bdmi.dev
conf.target_bus = bdmi.bus
conf.serial = bdmi.connection_info.get('serial')
conf.source_type = "block"
conf.source_path = bdmi.connection_info['data'].get('device_path')
guest = libvirt_guest.Guest(mock.MagicMock())
with test.nested(
mock.patch.object(drvr, '_get_volume_config',
return_value=conf),
mock.patch.object(guest, 'get_xml_desc',
return_value=initial_xml)):
config = libvirt_migrate.get_updated_guest_xml(guest,
objects.LibvirtLiveMigrateData(bdms=[bdmi]),
drvr._get_volume_config)
parser = etree.XMLParser(remove_blank_text=True)
config = etree.fromstring(config, parser)
target_xml = etree.fromstring(target_xml, parser)
self.assertEqual(etree.tostring(target_xml, encoding='unicode'),
etree.tostring(config, encoding='unicode'))
def test_live_migration_uri(self):
addresses = ('127.0.0.1', '127.0.0.1:4444', '[::1]:4444',
'[0:0:0:0:0:0:0:1]:4444', u'127.0.0.1', u'destination',
)
hypervisor_uri_map = (
('xen', 'xenmigr://%s/system'),
('kvm', 'qemu+tcp://%s/system'),
('qemu', 'qemu+tcp://%s/system'),
('parallels', 'parallels+tcp://%s/system'),
# anything else will return None
('lxc', None),
)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for dest in addresses:
for hyperv, uri in hypervisor_uri_map:
self.flags(virt_type=hyperv, group='libvirt')
if uri is not None:
uri = uri % dest
self.assertEqual(uri, drvr._live_migration_uri(dest))
else:
self.assertRaises(exception.LiveMigrationURINotAvailable,
drvr._live_migration_uri,
dest)
def test_live_migration_uri_ipv6(self):
addresses = ('::1', '0:0:0:0:0:0:0:1', u'::1')
hypervisor_uri_map = (
('xen', 'xenmigr://[%s]/system'),
('kvm', 'qemu+tcp://[%s]/system'),
('qemu', 'qemu+tcp://[%s]/system'),
('parallels', 'parallels+tcp://[%s]/system'),
# anything else will return None
('lxc', None),
)
for dest in addresses:
for hyperv, uri in hypervisor_uri_map:
self.flags(virt_type=hyperv, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
if uri is not None:
uri = uri % dest
self.assertEqual(uri, drvr._live_migration_uri(dest))
else:
self.assertRaises(exception.LiveMigrationURINotAvailable,
drvr._live_migration_uri,
dest)
def test_live_migration_uri_forced(self):
dest = 'destination'
for hyperv in ('kvm', 'xen'):
self.flags(virt_type=hyperv, group='libvirt')
forced_uri = 'foo://%s/bar'
self.flags(live_migration_uri=forced_uri, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(forced_uri % dest, drvr._live_migration_uri(dest))
def test_live_migration_scheme(self):
self.flags(live_migration_scheme='ssh', group='libvirt')
dest = 'destination'
uri = 'qemu+ssh://%s/system'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(uri % dest, drvr._live_migration_uri(dest))
def test_live_migration_scheme_does_not_override_uri(self):
forced_uri = 'qemu+ssh://%s/system'
self.flags(live_migration_uri=forced_uri, group='libvirt')
self.flags(live_migration_scheme='tcp', group='libvirt')
dest = 'destination'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(forced_uri % dest, drvr._live_migration_uri(dest))
def test_migrate_uri(self):
hypervisor_uri_map = (
('xen', None),
('kvm', 'tcp://%s'),
('qemu', 'tcp://%s'),
)
dest = 'destination'
for hyperv, uri in hypervisor_uri_map:
self.flags(virt_type=hyperv, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
if uri is not None:
uri = uri % dest
self.assertEqual(uri, drvr._migrate_uri(dest))
def test_migrate_uri_forced_live_migration_uri(self):
dest = 'destination'
self.flags(virt_type='kvm', group='libvirt')
forced_uri = 'qemu+tcp://user:pass@%s/system'
self.flags(live_migration_uri=forced_uri, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual('tcp://%s' % dest, drvr._migrate_uri(dest))
def test_migrate_uri_forced_live_migration_inboud_addr(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
addresses = ('127.0.0.1', '127.0.0.1:4444', '[::1]:4444',
'[0:0:0:0:0:0:0:1]:4444', u'127.0.0.1', u'destination',
)
for dest in addresses:
uri = 'tcp://%s'
result = drvr._migrate_uri(dest)
self.assertEqual(uri % dest, result)
self.assertIsInstance(result, str)
def test_migrate_uri_forced_live_migration_inboud_addr_ipv6(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
addresses = ('::1', '0:0:0:0:0:0:0:1', u'::1')
for dest in addresses:
uri = 'tcp://[%s]'
result = drvr._migrate_uri(dest)
self.assertEqual(uri % dest, result)
self.assertIsInstance(result, str)
def test_update_volume_xml_no_serial(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
xml_tmpl = """
<domain type='kvm'>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='{device_path}'/>
<target bus='virtio' dev='vdb'/>
<serial></serial>
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
function='0x0'/>
</disk>
</devices>
</domain>
"""
initial_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml),
encoding='unicode')
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
connection_info = {
u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {
u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
},
}
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial,
bus='virtio',
dev='vdb',
type='disk')
bdmi.connection_info = connection_info
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = bdmi.type
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = bdmi.dev
conf.target_bus = bdmi.bus
conf.serial = bdmi.connection_info.get('serial')
conf.source_type = "block"
conf.source_path = bdmi.connection_info['data'].get('device_path')
guest = libvirt_guest.Guest(mock.MagicMock())
with test.nested(
mock.patch.object(drvr, '_get_volume_config',
return_value=conf),
mock.patch.object(guest, 'get_xml_desc',
return_value=initial_xml)):
config = libvirt_migrate.get_updated_guest_xml(guest,
objects.LibvirtLiveMigrateData(bdms=[bdmi]),
drvr._get_volume_config)
self.assertEqual(target_xml, config)
def test_update_volume_xml_no_connection_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
initial_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml),
encoding='unicode')
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial,
dev='vdb',
type='disk',
bus='scsi',
format='qcow')
bdmi.connection_info = {}
conf = vconfig.LibvirtConfigGuestDisk()
guest = libvirt_guest.Guest(mock.MagicMock())
with test.nested(
mock.patch.object(drvr, '_get_volume_config',
return_value=conf),
mock.patch.object(guest, 'get_xml_desc',
return_value=initial_xml)):
config = libvirt_migrate.get_updated_guest_xml(
guest,
objects.LibvirtLiveMigrateData(bdms=[bdmi]),
drvr._get_volume_config)
self.assertEqual(target_xml, config)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
def test_live_migration_update_serial_console_xml(self, mock_xml,
mock_migrateToURI3,
mock_get,
mock_min_version):
self.compute = manager.ComputeManager()
instance_ref = self.test_instance
target_connection = '127.0.0.2'
xml_tmpl = ("<domain type='kvm'>"
"<devices>"
"<console type='tcp'>"
"<source mode='bind' host='{addr}' service='{port}'/>"
"<target type='serial' port='0'/>"
"</console>"
"</devices>"
"</domain>")
initial_xml = xml_tmpl.format(addr='9.0.0.1', port='10100')
target_xml = xml_tmpl.format(addr='9.0.0.12', port='10200')
target_xml = etree.tostring(etree.fromstring(target_xml),
encoding='unicode')
# Preparing mocks
mock_xml.return_value = initial_xml
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
disk_paths = ['vda', 'vdb']
params = {
'migrate_uri': 'tcp://127.0.0.2',
'migrate_disks': ['vda', 'vdb'],
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': target_xml,
}
# start test
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
serial_listen_addr='9.0.0.12',
target_connect_addr=target_connection,
bdms=[],
block_migration=False,
serial_listen_ports=[10200])
dom = fakelibvirt.virDomain
guest = libvirt_guest.Guest(dom)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, target_connection,
False, migrate_data, guest, disk_paths)
mock_xml.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=0)
def test_live_migration_fails_without_serial_console_address(self):
self.compute = manager.ComputeManager()
self.flags(enabled=True, group='serial_console')
self.flags(proxyclient_address='', group='serial_console')
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Preparing mocks
dom = fakelibvirt.virDomain
guest = libvirt_guest.Guest(dom)
# start test
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='',
target_connect_addr=None,
bdms=[],
block_migration=False)
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, guest, [])
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml',
return_value='')
@mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc',
return_value='<xml></xml>')
def test_live_migration_uses_migrateToURI3(
self, mock_old_xml, mock_new_xml, mock_migrateToURI3,
mock_min_version):
target_connection = '127.0.0.2'
# Preparing mocks
disk_paths = ['vda', 'vdb']
params = {
'migrate_uri': 'tcp://127.0.0.2',
'migrate_disks': ['vda', 'vdb'],
'bandwidth': CONF.libvirt.live_migration_bandwidth,
}
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
# Start test
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='0.0.0.0',
graphics_listen_addr_spice='0.0.0.0',
serial_listen_addr='127.0.0.1',
target_connect_addr=target_connection,
bdms=[],
block_migration=False)
dom = fakelibvirt.virDomain
guest = libvirt_guest.Guest(dom)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance, target_connection,
False, migrate_data, guest, disk_paths)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=0)
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc',
return_value='<xml/>')
def _test_live_migration_block_migration_flags(self,
device_names, expected_flags,
mock_old_xml, mock_min_version, mock_migrateToURI3):
target_connection = '127.0.0.2'
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='0.0.0.0',
graphics_listen_addr_spice='0.0.0.0',
serial_listen_addr='127.0.0.1',
target_connect_addr=target_connection,
bdms=[],
block_migration=True)
dom = fakelibvirt.virDomain
guest = libvirt_guest.Guest(dom)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._parse_migration_flags()
instance = objects.Instance(**self.test_instance)
drvr._live_migration_operation(self.context, instance,
target_connection,
True, migrate_data, guest,
device_names)
params = {
'migrate_uri': 'tcp://127.0.0.2',
'migrate_disks': device_names,
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': '<xml/>',
}
if not params['migrate_disks']:
del params['migrate_disks']
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection), params=params,
flags=expected_flags)
def test_live_migration_block_migration_with_devices(self):
device_names = ['vda']
expected_flags = (fakelibvirt.VIR_MIGRATE_NON_SHARED_INC |
fakelibvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
fakelibvirt.VIR_MIGRATE_PERSIST_DEST |
fakelibvirt.VIR_MIGRATE_PEER2PEER |
fakelibvirt.VIR_MIGRATE_LIVE)
self._test_live_migration_block_migration_flags(device_names,
expected_flags)
def test_live_migration_block_migration_all_filtered(self):
device_names = []
expected_flags = (fakelibvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
fakelibvirt.VIR_MIGRATE_PERSIST_DEST |
fakelibvirt.VIR_MIGRATE_PEER2PEER |
fakelibvirt.VIR_MIGRATE_LIVE)
self._test_live_migration_block_migration_flags(device_names,
expected_flags)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml',
return_value='')
@mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='')
def test_block_live_migration_tunnelled_migrateToURI3(
self, mock_old_xml, mock_new_xml,
mock_migrateToURI3, mock_min_version):
self.flags(live_migration_tunnelled=True, group='libvirt')
target_connection = None
device_names = ['disk1', 'disk2']
# Preparing mocks
# Since we are passing the VIR_MIGRATE_TUNNELLED flag, the
# 'parms' dict will not (as expected) contain 'migrate_disks'
params = {
'bandwidth': CONF.libvirt.live_migration_bandwidth
}
# Start test
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='0.0.0.0',
graphics_listen_addr_spice='0.0.0.0',
serial_listen_addr='127.0.0.1',
target_connect_addr=target_connection,
bdms=[],
block_migration=True)
dom = fakelibvirt.virDomain
guest = libvirt_guest.Guest(dom)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._parse_migration_flags()
instance = objects.Instance(**self.test_instance)
drvr._live_migration_operation(self.context, instance,
target_connection, True, migrate_data,
guest, device_names)
expected_flags = (fakelibvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
fakelibvirt.VIR_MIGRATE_PERSIST_DEST |
fakelibvirt.VIR_MIGRATE_TUNNELLED |
fakelibvirt.VIR_MIGRATE_PEER2PEER |
fakelibvirt.VIR_MIGRATE_LIVE)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=expected_flags)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc',
return_value='<xml/>')
def test_live_migration_raises_exception(self, mock_xml,
mock_migrateToURI3,
mock_min_version):
# Prepare data
self.compute = manager.ComputeManager()
instance_ref = self.test_instance
target_connection = '127.0.0.2'
disk_paths = ['vda', 'vdb']
params = {
'migrate_uri': 'tcp://127.0.0.2',
'migrate_disks': disk_paths,
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': '<xml/>',
}
# Prepare mocks
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
# Start test
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
serial_listen_addr='127.0.0.1',
target_connect_addr=target_connection,
bdms=[],
block_migration=False)
dom = fakelibvirt.virDomain
guest = libvirt_guest.Guest(dom)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, target_connection,
False, migrate_data, guest, disk_paths)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri(target_connection),
params=params, flags=0)
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
def test_rollback_live_migration_at_dest_not_shared(self, mock_destroy,
mock_get_instance_path,
mock_exist,
mock_shutil
):
# destroy method may raise InstanceTerminationFailure or
# InstancePowerOffFailure, here use their base class Invalid.
mock_destroy.side_effect = exception.Invalid(reason='just test')
fake_instance_path = os.path.join(cfg.CONF.instances_path,
'/fake_instance_uuid')
mock_get_instance_path.return_value = fake_instance_path
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=False,
instance_relative_path=False)
self.assertRaises(exception.Invalid,
drvr.rollback_live_migration_at_destination,
"context", "instance", [], None, True, migrate_data)
mock_exist.assert_called_once_with(fake_instance_path)
mock_shutil.assert_called_once_with(fake_instance_path)
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
def test_rollback_live_migration_at_dest_shared(self, mock_destroy,
mock_get_instance_path,
mock_exist,
mock_shutil
):
def fake_destroy(ctxt, instance, network_info,
block_device_info=None, destroy_disks=True):
# This is just here to test the signature. Seems there should
# be a better way to do this with mock and autospec.
pass
mock_destroy.side_effect = fake_destroy
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=True,
instance_relative_path=False)
drvr.rollback_live_migration_at_destination("context", "instance", [],
None, True, migrate_data)
mock_destroy.assert_called_once_with("context", "instance", [],
None, True)
self.assertFalse(mock_get_instance_path.called)
self.assertFalse(mock_exist.called)
self.assertFalse(mock_shutil.called)
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
def test_live_migration_copy_disk_paths_tunnelled(self, mock_xml):
self.flags(live_migration_tunnelled=True, group='libvirt')
xml = """
<domain>
<name>dummy</name>
<uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid>
<devices>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.root"/>
<target dev="vda"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.shared"/>
<target dev="vdb"/>
<shareable/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.config"/>
<target dev="vdc"/>
<readonly/>
</disk>
<disk type="block">
<source dev="/dev/mapper/somevol"/>
<target dev="vdd"/>
</disk>
<disk type="network">
<source protocol="https" name="url_path">
<host name="hostname" port="443"/>
</source>
</disk>
</devices>
</domain>"""
mock_xml.return_value = xml
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._parse_migration_flags()
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
paths = drvr._live_migration_copy_disk_paths(None, None, guest)
self.assertEqual((["/var/lib/nova/instance/123/disk.root",
"/dev/mapper/somevol"], ['vda', 'vdd']), paths)
@mock.patch.object(host.Host, "get_connection")
@mock.patch.object(host.Host, "has_min_version", return_value=True)
@mock.patch('nova.virt.driver.get_block_device_info')
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
def test_live_migration_copy_disk_paths_selective_block_migration(
self, mock_xml, mock_get_instance,
mock_block_device_info, mock_version, mock_conn):
xml = """
<domain>
<name>dummy</name>
<uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid>
<devices>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.root"/>
<target dev="vda"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.shared"/>
<target dev="vdb"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.config"/>
<target dev="vdc"/>
</disk>
<disk type="block">
<source dev="/dev/mapper/somevol"/>
<target dev="vdd"/>
</disk>
<disk type="network">
<source protocol="https" name="url_path">
<host name="hostname" port="443"/>
</source>
</disk>
</devices>
</domain>"""
mock_xml.return_value = xml
instance = objects.Instance(**self.test_instance)
instance.root_device_name = '/dev/vda'
block_device_info = {
'swap': {
'disk_bus': u'virtio',
'swap_size': 10,
'device_name': u'/dev/vdc'
},
'root_device_name': u'/dev/vda',
'ephemerals': [{
'guest_format': u'ext3',
'device_name': u'/dev/vdb',
'disk_bus': u'virtio',
'device_type': u'disk',
'size': 1
}],
'block_device_mapping': [{
'guest_format': None,
'boot_index': None,
'mount_device': u'/dev/vdd',
'connection_info': {
u'driver_volume_type': u'iscsi',
'serial': u'147df29f-aec2-4851-b3fe-f68dad151834',
u'data': {
u'access_mode': u'rw',
u'target_discovered': False,
u'encrypted': False,
u'qos_specs': None,
u'target_iqn': u'iqn.2010-10.org.openstack:'
u'volume-147df29f-aec2-4851-b3fe-'
u'f68dad151834',
u'target_portal': u'10.102.44.141:3260', u'volume_id':
u'147df29f-aec2-4851-b3fe-f68dad151834',
u'target_lun': 1,
u'auth_password': u'cXELT66FngwzTwpf',
u'auth_username': u'QbQQjj445uWgeQkFKcVw',
u'auth_method': u'CHAP'
}
},
'disk_bus': None,
'device_type': None,
'delete_on_termination': False
}]
}
mock_block_device_info.return_value = block_device_info
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
return_value = drvr._live_migration_copy_disk_paths(self.context,
instance,
guest)
expected = (['/var/lib/nova/instance/123/disk.root',
'/var/lib/nova/instance/123/disk.shared',
'/var/lib/nova/instance/123/disk.config'],
['vda', 'vdb', 'vdc'])
self.assertEqual(expected, return_value)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_live_migration_copy_disk_paths")
def test_live_migration_data_gb_plain(self, mock_paths):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
data_gb = drvr._live_migration_data_gb(instance, [])
self.assertEqual(2, data_gb)
self.assertEqual(0, mock_paths.call_count)
def test_live_migration_data_gb_block(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
def fake_stat(path):
class StatResult(object):
def __init__(self, size):
self._size = size
@property
def st_size(self):
return self._size
if path == "/var/lib/nova/instance/123/disk.root":
return StatResult(10 * units.Gi)
elif path == "/dev/mapper/somevol":
return StatResult(1.5 * units.Gi)
else:
raise Exception("Should not be reached")
disk_paths = ["/var/lib/nova/instance/123/disk.root",
"/dev/mapper/somevol"]
with mock.patch.object(os, "stat") as mock_stat:
mock_stat.side_effect = fake_stat
data_gb = drvr._live_migration_data_gb(instance, disk_paths)
# Expecting 2 GB for RAM, plus 10 GB for disk.root
# and 1.5 GB rounded to 2 GB for somevol, so 14 GB
self.assertEqual(14, data_gb)
EXPECT_SUCCESS = 1
EXPECT_FAILURE = 2
EXPECT_ABORT = 3
@mock.patch.object(libvirt_guest.Guest, "migrate_start_postcopy")
@mock.patch.object(time, "time")
@mock.patch.object(time, "sleep",
side_effect=lambda x: eventlet.sleep(0))
@mock.patch.object(host.Host, "get_connection")
@mock.patch.object(libvirt_guest.Guest, "get_job_info")
@mock.patch.object(objects.Instance, "save")
@mock.patch.object(objects.Migration, "save")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
@mock.patch.object(fakelibvirt.virDomain, "abortJob")
@mock.patch.object(libvirt_guest.Guest, "pause")
def _test_live_migration_monitoring(self,
job_info_records,
time_records,
expect_result,
mock_pause,
mock_abort,
mock_running,
mock_save,
mock_mig_save,
mock_job_info,
mock_conn,
mock_sleep,
mock_time,
mock_postcopy_switch,
current_mig_status=None,
expected_mig_status=None,
scheduled_action=None,
scheduled_action_executed=False,
block_migration=False,
expected_switch=False):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
drvr.active_migrations[instance.uuid] = deque()
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", True)
guest = libvirt_guest.Guest(dom)
finish_event = eventlet.event.Event()
def fake_job_info():
while True:
self.assertGreater(len(job_info_records), 0)
rec = job_info_records.pop(0)
if type(rec) == str:
if rec == "thread-finish":
finish_event.send()
elif rec == "domain-stop":
dom.destroy()
elif rec == "force_complete":
drvr.active_migrations[instance.uuid].append(
"force-complete")
else:
if len(time_records) > 0:
time_records.pop(0)
return rec
return rec
def fake_time():
if len(time_records) > 0:
return time_records[0]
else:
return int(
datetime.datetime(2001, 1, 20, 20, 1, 0)
.strftime('%s'))
mock_job_info.side_effect = fake_job_info
mock_time.side_effect = fake_time
dest = mock.sentinel.migrate_dest
migration = objects.Migration(context=self.context, id=1)
migrate_data = objects.LibvirtLiveMigrateData(
migration=migration, block_migration=block_migration)
if current_mig_status:
migrate_data.migration.status = current_mig_status
else:
migrate_data.migration.status = "unset"
migrate_data.migration.save()
fake_post_method = mock.MagicMock()
fake_recover_method = mock.MagicMock()
drvr._live_migration_monitor(self.context, instance,
guest, dest,
fake_post_method,
fake_recover_method,
False,
migrate_data,
finish_event,
[])
if scheduled_action_executed:
if scheduled_action == 'pause':
self.assertTrue(mock_pause.called)
if scheduled_action == 'postcopy_switch':
self.assertTrue(mock_postcopy_switch.called)
else:
if scheduled_action == 'pause':
self.assertFalse(mock_pause.called)
if scheduled_action == 'postcopy_switch':
self.assertFalse(mock_postcopy_switch.called)
mock_mig_save.assert_called_with()
if expect_result == self.EXPECT_SUCCESS:
self.assertFalse(fake_recover_method.called,
'Recover method called when success expected')
self.assertFalse(mock_abort.called,
'abortJob not called when success expected')
if expected_switch:
self.assertTrue(mock_postcopy_switch.called)
fake_post_method.assert_called_once_with(
self.context, instance, dest, False, migrate_data)
else:
if expect_result == self.EXPECT_ABORT:
self.assertTrue(mock_abort.called,
'abortJob called when abort expected')
else:
self.assertFalse(mock_abort.called,
'abortJob not called when failure expected')
self.assertFalse(fake_post_method.called,
'Post method called when success not expected')
if expected_mig_status:
fake_recover_method.assert_called_once_with(
self.context, instance, dest, migrate_data,
migration_status=expected_mig_status)
else:
fake_recover_method.assert_called_once_with(
self.context, instance, dest, migrate_data)
self.assertNotIn(instance.uuid, drvr.active_migrations)
def test_live_migration_monitor_success(self):
# A normal sequence where see all the normal job states
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS)
def test_live_migration_handle_pause_normal(self):
# A normal sequence where see all the normal job states, and pause
# scheduled in between VIR_DOMAIN_JOB_UNBOUNDED
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"force_complete",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS,
current_mig_status="running",
scheduled_action="pause",
scheduled_action_executed=True)
def test_live_migration_handle_pause_on_start(self):
# A normal sequence where see all the normal job states, and pause
# scheduled in case of job type VIR_DOMAIN_JOB_NONE and finish_event is
# not ready yet
domain_info_records = [
"force_complete",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS,
current_mig_status="preparing",
scheduled_action="pause",
scheduled_action_executed=True)
def test_live_migration_handle_pause_on_finish(self):
# A normal sequence where see all the normal job states, and pause
# scheduled in case of job type VIR_DOMAIN_JOB_NONE and finish_event is
# ready
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
"force_complete",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS,
current_mig_status="completed",
scheduled_action="pause",
scheduled_action_executed=False)
def test_live_migration_handle_pause_on_cancel(self):
# A normal sequence where see all the normal job states, and pause
# scheduled in case of job type VIR_DOMAIN_JOB_CANCELLED
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
"force_complete",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE,
current_mig_status="cancelled",
expected_mig_status='cancelled',
scheduled_action="pause",
scheduled_action_executed=False)
def test_live_migration_handle_pause_on_failure(self):
# A normal sequence where see all the normal job states, and pause
# scheduled in case of job type VIR_DOMAIN_JOB_FAILED
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
"force_complete",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_FAILED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE,
scheduled_action="pause",
scheduled_action_executed=False)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_is_post_copy_enabled")
def test_live_migration_handle_postcopy_normal(self,
mock_postcopy_enabled):
# A normal sequence where see all the normal job states, and postcopy
# switch scheduled in between VIR_DOMAIN_JOB_UNBOUNDED
mock_postcopy_enabled.return_value = True
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"force_complete",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS,
current_mig_status="running",
scheduled_action="postcopy_switch",
scheduled_action_executed=True)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_is_post_copy_enabled")
def test_live_migration_handle_postcopy_on_start(self,
mock_postcopy_enabled):
# A normal sequence where see all the normal job states, and postcopy
# switch scheduled in case of job type VIR_DOMAIN_JOB_NONE and
# finish_event is not ready yet
mock_postcopy_enabled.return_value = True
domain_info_records = [
"force_complete",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS,
current_mig_status="preparing",
scheduled_action="postcopy_switch",
scheduled_action_executed=True)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_is_post_copy_enabled")
def test_live_migration_handle_postcopy_on_finish(self,
mock_postcopy_enabled):
# A normal sequence where see all the normal job states, and postcopy
# switch scheduled in case of job type VIR_DOMAIN_JOB_NONE and
# finish_event is ready
mock_postcopy_enabled.return_value = True
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
"force_complete",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS,
current_mig_status="completed",
scheduled_action="postcopy_switch",
scheduled_action_executed=False)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_is_post_copy_enabled")
def test_live_migration_handle_postcopy_on_cancel(self,
mock_postcopy_enabled):
# A normal sequence where see all the normal job states, and postcopy
# scheduled in case of job type VIR_DOMAIN_JOB_CANCELLED
mock_postcopy_enabled.return_value = True
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
"force_complete",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE,
current_mig_status="cancelled",
expected_mig_status='cancelled',
scheduled_action="postcopy_switch",
scheduled_action_executed=False)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_is_post_copy_enabled")
def test_live_migration_handle_pause_on_postcopy(self,
mock_postcopy_enabled):
# A normal sequence where see all the normal job states, and pause
# scheduled after migration switched to postcopy
mock_postcopy_enabled.return_value = True
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"force_complete",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS,
current_mig_status="running (post-copy)",
scheduled_action="pause",
scheduled_action_executed=False)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_is_post_copy_enabled")
def test_live_migration_handle_postcopy_on_postcopy(self,
mock_postcopy_enabled):
# A normal sequence where see all the normal job states, and pause
# scheduled after migration switched to postcopy
mock_postcopy_enabled.return_value = True
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"force_complete",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS,
current_mig_status="running (post-copy)",
scheduled_action="postcopy_switch",
scheduled_action_executed=False)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_is_post_copy_enabled")
def test_live_migration_handle_postcopy_on_failure(self,
mock_postcopy_enabled):
# A normal sequence where see all the normal job states, and postcopy
# scheduled in case of job type VIR_DOMAIN_JOB_FAILED
mock_postcopy_enabled.return_value = True
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
"force_complete",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_FAILED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE,
scheduled_action="postcopy_switch",
scheduled_action_executed=False)
def test_live_migration_monitor_success_race(self):
# A normalish sequence but we're too slow to see the
# completed job state
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS)
def test_live_migration_monitor_failed(self):
# A failed sequence where we see all the expected events
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_FAILED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE)
def test_live_migration_monitor_failed_race(self):
# A failed sequence where we are too slow to see the
# failed event
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE)
def test_live_migration_monitor_cancelled(self):
# A cancelled sequence where we see all the events
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE,
expected_mig_status='cancelled')
@mock.patch.object(fakelibvirt.virDomain, "migrateSetMaxDowntime")
@mock.patch("nova.virt.libvirt.migration.downtime_steps")
def test_live_migration_monitor_downtime(self, mock_downtime_steps,
mock_set_downtime):
self.flags(live_migration_completion_timeout=1000000,
live_migration_progress_timeout=1000000,
group='libvirt')
# We've setup 4 fake downtime steps - first value is the
# time delay, second is the downtime value
downtime_steps = [
(90, 10),
(180, 50),
(270, 200),
(500, 300),
]
mock_downtime_steps.return_value = downtime_steps
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
# Times are chosen so that only the first 3 downtime
# steps are needed.
fake_times = [0, 1, 30, 95, 150, 200, 300]
# A normal sequence where see all the normal job states
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_SUCCESS)
mock_set_downtime.assert_has_calls([mock.call(10),
mock.call(50),
mock.call(200)])
def test_live_migration_monitor_completion(self):
self.flags(live_migration_completion_timeout=100,
live_migration_progress_timeout=1000000,
group='libvirt')
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320]
# A normal sequence where see all the normal job states
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_ABORT,
expected_mig_status='cancelled')
def test_live_migration_monitor_progress(self):
self.flags(live_migration_completion_timeout=1000000,
live_migration_progress_timeout=150,
group='libvirt')
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320]
# A normal sequence where see all the normal job states
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_ABORT,
expected_mig_status='cancelled')
def test_live_migration_monitor_progress_zero_data_remaining(self):
self.flags(live_migration_completion_timeout=1000000,
live_migration_progress_timeout=150,
group='libvirt')
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320]
# A normal sequence where see all the normal job states
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=0),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=70),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=50),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=30),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=10),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=0),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_FAILED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_FAILURE)
@mock.patch('nova.virt.libvirt.migration.should_switch_to_postcopy')
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_is_post_copy_enabled")
def test_live_migration_monitor_postcopy_switch(self,
mock_postcopy_enabled, mock_should_switch):
# A normal sequence where migration is switched to postcopy mode
mock_postcopy_enabled.return_value = True
switch_values = [False, False, True]
mock_should_switch.return_value = switch_values
domain_info_records = [
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS,
expected_switch=True)
@mock.patch.object(host.Host, "get_connection")
@mock.patch.object(utils, "spawn")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor")
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_live_migration_copy_disk_paths")
def test_live_migration_main(self, mock_copy_disk_path, mock_running,
mock_guest, mock_monitor, mock_thread,
mock_conn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
dom = fakelibvirt.Domain(drvr._get_connection(),
"<domain><name>demo</name></domain>", True)
guest = libvirt_guest.Guest(dom)
migrate_data = objects.LibvirtLiveMigrateData(block_migration=True)
disks_to_copy = (['/some/path/one', '/test/path/two'],
['vda', 'vdb'])
mock_copy_disk_path.return_value = disks_to_copy
mock_guest.return_value = guest
def fake_post():
pass
def fake_recover():
pass
drvr._live_migration(self.context, instance, "fakehost",
fake_post, fake_recover, True,
migrate_data)
mock_copy_disk_path.assert_called_once_with(self.context, instance,
guest)
class AnyEventletEvent(object):
def __eq__(self, other):
return type(other) == eventlet.event.Event
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, "fakehost", True,
migrate_data, guest, disks_to_copy[1])
mock_monitor.assert_called_once_with(
self.context, instance, guest, "fakehost",
fake_post, fake_recover, True,
migrate_data, AnyEventletEvent(), disks_to_copy[0])
@mock.patch('os.path.exists', return_value=False)
@mock.patch.object(fake_libvirt_utils, 'create_image')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_fetch_instance_kernel_ramdisk')
def _do_test_create_images_and_backing(self, disk_type, mock_fetch,
mock_create, mock_exists):
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = {'path': 'foo', 'type': disk_type,
'disk_size': 1 * 1024 ** 3,
'virt_disk_size': 20 * 1024 ** 3,
'backing_file': None}
drvr._create_images_and_backing(self.context, instance,
"/fake/instance/dir", [disk_info])
mock_fetch.assert_called_once_with(self.context, instance,
fallback_from_host=None)
mock_create.assert_called_once_with(
disk_info['type'], mock.ANY, disk_info['virt_disk_size'])
mock_exists.assert_called_once_with('/fake/instance/dir/foo')
def test_create_images_and_backing_qcow2(self):
self._do_test_create_images_and_backing('qcow2')
def test_create_images_and_backing_raw(self):
self._do_test_create_images_and_backing('raw')
def test_create_images_and_backing_images_not_exist_no_fallback(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
backing_file = imagecache.get_cache_fname(instance.image_ref)
disk_info = [
{u'backing_file': backing_file,
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
with mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
side_effect=exception.ImageNotFound(
image_id="fake_id")):
self.assertRaises(exception.ImageNotFound,
conn._create_images_and_backing,
self.context, instance,
"/fake/instance/dir", disk_info)
@mock.patch('nova.privsep.path.utime')
def test_create_images_and_backing_images_not_exist_fallback(self,
mock_utime):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
trusted_certs = objects.TrustedCerts(
ids=['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8',
'674736e3-f25c-405c-8362-bbf991e0ce0a'])
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'trusted_certs': trusted_certs,
'kernel_id': uuids.kernel_id,
'ramdisk_id': uuids.ramdisk_id,
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
backing_file = imagecache.get_cache_fname(instance.image_ref)
disk_info = [
{u'backing_file': backing_file,
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
with test.nested(
mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'),
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
side_effect=exception.ImageNotFound(
image_id=uuids.fake_id)),
) as (copy_image_mock, fetch_image_mock):
conn._create_images_and_backing(self.context, instance,
"/fake/instance/dir", disk_info,
fallback_from_host="fake_host")
backfile_path = os.path.join(base_dir, backing_file)
kernel_path = os.path.join(CONF.instances_path,
self.test_instance['uuid'],
'kernel')
ramdisk_path = os.path.join(CONF.instances_path,
self.test_instance['uuid'],
'ramdisk')
copy_image_mock.assert_has_calls([
mock.call(dest=backfile_path, src=backfile_path,
host='fake_host', receive=True),
mock.call(dest=kernel_path, src=kernel_path,
host='fake_host', receive=True),
mock.call(dest=ramdisk_path, src=ramdisk_path,
host='fake_host', receive=True)
])
fetch_image_mock.assert_has_calls([
mock.call(context=self.context,
target=backfile_path,
image_id=self.test_instance['image_ref'],
trusted_certs=trusted_certs),
mock.call(self.context, kernel_path, instance.kernel_id,
trusted_certs),
mock.call(self.context, ramdisk_path, instance.ramdisk_id,
trusted_certs)
])
mock_utime.assert_called()
@mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image')
def test_create_images_and_backing_images_exist(self, mock_fetch_image):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
disk_info = [
{u'backing_file': imagecache.get_cache_fname(instance.image_ref),
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
with test.nested(
mock.patch.object(imagebackend.Image, 'get_disk_size',
return_value=0),
mock.patch.object(os.path, 'exists', return_value=True)
):
conn._create_images_and_backing(self.context, instance,
'/fake/instance/dir', disk_info)
self.assertFalse(mock_fetch_image.called)
@mock.patch('nova.privsep.path.utime')
def test_create_images_and_backing_ephemeral_gets_created(self,
mock_utime):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
instance = objects.Instance(**self.test_instance)
disk_info_byname = fake_disk_info_byname(instance)
disk_info_byname['disk.local']['backing_file'] = 'ephemeral_foo'
disk_info_byname['disk.local']['virt_disk_size'] = 1 * units.Gi
disk_info = disk_info_byname.values()
with test.nested(
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'),
mock.patch.object(drvr, '_create_ephemeral'),
mock.patch.object(imagebackend.Image, 'verify_base_size'),
mock.patch.object(imagebackend.Image, 'get_disk_size')
) as (fetch_image_mock, create_ephemeral_mock, verify_base_size_mock,
disk_size_mock):
disk_size_mock.return_value = 0
drvr._create_images_and_backing(self.context, instance,
CONF.instances_path, disk_info)
self.assertEqual(len(create_ephemeral_mock.call_args_list), 1)
root_backing, ephemeral_backing = [
os.path.join(base_dir, name)
for name in (disk_info_byname['disk']['backing_file'],
'ephemeral_foo')
]
create_ephemeral_mock.assert_called_once_with(
ephemeral_size=1, fs_label='ephemeral_foo',
os_type='linux', target=ephemeral_backing)
fetch_image_mock.assert_called_once_with(
context=self.context, image_id=instance.image_ref,
target=root_backing, trusted_certs=instance.trusted_certs)
verify_base_size_mock.assert_has_calls([
mock.call(root_backing, instance.flavor.root_gb * units.Gi),
mock.call(ephemeral_backing, 1 * units.Gi)
])
mock_utime.assert_called()
def test_create_images_and_backing_disk_info_none(self):
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
drvr._create_images_and_backing(self.context, instance,
"/fake/instance/dir", None)
# Assert that we did nothing
self.assertEqual({}, fake_backend.created_disks)
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_fetch_instance_kernel_ramdisk')
def test_create_images_and_backing_parallels(self, mock_fetch):
self.flags(virt_type='parallels', group='libvirt')
instance = objects.Instance(**self.test_instance)
instance.vm_mode = fields.VMMode.EXE
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr._create_images_and_backing(self.context, instance,
'/fake/instance/dir', None)
self.assertFalse(mock_fetch.called)
def _generate_target_ret(self, target_connect_addr=None):
target_ret = {
'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'},
'target_connect_addr': target_connect_addr,
'serial_listen_addr': '127.0.0.1',
'volume': {
'12345': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
'serial': '12345'},
'disk_info': {'bus': 'scsi',
'dev': 'sda',
'type': 'disk'}},
'67890': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
'serial': '67890'},
'disk_info': {'bus': 'scsi',
'dev': 'sdb',
'type': 'disk'}}}}
return target_ret
def test_pre_live_migration_works_correctly_mocked(self):
self._test_pre_live_migration_works_correctly_mocked()
def test_pre_live_migration_with_transport_ip(self):
self.flags(live_migration_inbound_addr='127.0.0.2',
group='libvirt')
target_ret = self._generate_target_ret('127.0.0.2')
self._test_pre_live_migration_works_correctly_mocked(
target_ret=target_ret)
def test_pre_live_migration_only_dest_supports_native_luks(self):
# Assert that allow_native_luks is False when src_supports_native_luks
# is missing from migrate data during a P to Q LM.
self._test_pre_live_migration_works_correctly_mocked(
src_supports_native_luks=None, dest_supports_native_luks=True,
allow_native_luks=False)
def test_pre_live_migration_only_src_supports_native_luks(self):
# Assert that allow_native_luks is False when dest_supports_native_luks
# is False due to unmet QEMU and Libvirt deps on the dest compute.
self._test_pre_live_migration_works_correctly_mocked(
src_supports_native_luks=True, dest_supports_native_luks=False,
allow_native_luks=False)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'plug_vifs')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_connect_volume')
def _test_pre_live_migration_works_correctly_mocked(self,
mock_connect, mock_plug,
target_ret=None, src_supports_native_luks=True,
dest_supports_native_luks=True, allow_native_luks=True):
# Creating testdata
c = context.get_admin_context()
instance = objects.Instance(root_device_name='/dev/vda',
**self.test_instance)
bdms = objects.BlockDeviceMappingList(objects=[
fake_block_device.fake_bdm_object(c, {
'connection_info': jsonutils.dumps({
'serial': '12345',
'data': {
'device_path': '/dev/disk/by-path/ip-1.2.3.4:3260'
'-iqn.abc.12345.opst-lun-X'
}
}),
'device_name': '/dev/sda',
'volume_id': uuids.volume1,
'source_type': 'volume',
'destination_type': 'volume'
}),
fake_block_device.fake_bdm_object(c, {
'connection_info': jsonutils.dumps({
'serial': '67890',
'data': {
'device_path': '/dev/disk/by-path/ip-1.2.3.4:3260'
'-iqn.cde.67890.opst-lun-Z'
}
}),
'device_name': '/dev/sdb',
'volume_id': uuids.volume2,
'source_type': 'volume',
'destination_type': 'volume'
})
])
# We go through get_block_device_info to simulate what the
# ComputeManager sends to the driver (make sure we're using the
# correct type of BDM objects since there are many of them and
# they are super confusing).
block_device_info = driver.get_block_device_info(instance, bdms)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo(object):
def fixed_ips(self):
return ["test_ip_addr"]
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.'
'_create_images_and_backing',
lambda *args, **kwargs: None)
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_native_luks_available',
lambda self: dest_supports_native_luks)
nw_info = FakeNetworkInfo()
expected_connect_calls = []
for v in block_device_info['block_device_mapping']:
expected_connect_calls.append(
mock.call(c, v['connection_info'], instance,
allow_native_luks=allow_native_luks))
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
block_migration=False,
instance_relative_path='foo',
is_shared_block_storage=False,
is_shared_instance_path=False,
graphics_listen_addr_vnc='127.0.0.1',
graphics_listen_addr_spice='127.0.0.1',
serial_listen_addr='127.0.0.1',
)
if src_supports_native_luks:
migrate_data.src_supports_native_luks = True
result = drvr.pre_live_migration(
c, instance, block_device_info, nw_info, None,
migrate_data=migrate_data)
if not target_ret:
target_ret = self._generate_target_ret()
self.assertEqual(
target_ret,
result.to_legacy_dict(
pre_migration_result=True)['pre_live_migration_result'])
mock_connect.assert_has_calls(expected_connect_calls)
self.assertEqual(len(expected_connect_calls), mock_connect.call_count)
mock_plug.assert_called_once_with(test.MatchType(objects.Instance),
nw_info)
@mock.patch.object(os, 'mkdir')
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.remotefs.'
'RemoteFilesystem.copy_file')
@mock.patch('nova.virt.driver.block_device_info_get_mapping')
@mock.patch('nova.virt.configdrive.required_by', return_value=True)
def test_pre_live_migration_block_with_config_drive_success(
self, mock_required_by, block_device_info_get_mapping,
mock_copy_file, mock_get_instance_path, mock_mkdir):
self.flags(config_drive_format='iso9660')
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
fake_instance_path = os.path.join(cfg.CONF.instances_path,
'/fake_instance_uuid')
mock_get_instance_path.return_value = fake_instance_path
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
migrate_data = objects.LibvirtLiveMigrateData()
migrate_data.is_shared_instance_path = False
migrate_data.is_shared_block_storage = False
migrate_data.block_migration = True
migrate_data.instance_relative_path = 'foo'
src = "%s:%s/disk.config" % (instance.host, fake_instance_path)
result = drvr.pre_live_migration(
self.context, instance, vol, [], None, migrate_data)
block_device_info_get_mapping.assert_called_once_with(
{'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}
]}
)
mock_copy_file.assert_called_once_with(src, fake_instance_path)
migrate_data.graphics_listen_addrs_vnc = '127.0.0.1'
migrate_data.graphics_listen_addrs_spice = '127.0.0.1'
migrate_data.serial_listen_addr = '127.0.0.1'
self.assertEqual(migrate_data, result)
@mock.patch('nova.virt.driver.block_device_info_get_mapping',
return_value=())
def test_pre_live_migration_block_with_config_drive_mocked_with_vfat(
self, block_device_info_get_mapping):
self.flags(config_drive_format='vfat')
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
instance.config_drive = 'True'
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
is_shared_instance_path=False,
is_shared_block_storage=False,
block_migration=False,
instance_relative_path='foo',
)
res_data = drvr.pre_live_migration(
self.context, instance, vol, [], None, migrate_data)
res_data = res_data.to_legacy_dict(pre_migration_result=True)
block_device_info_get_mapping.assert_called_once_with(
{'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}
]}
)
self.assertEqual({'graphics_listen_addrs': {'spice': None,
'vnc': None},
'target_connect_addr': None,
'serial_listen_addr': None,
'volume': {}}, res_data['pre_live_migration_result'])
def _test_pre_live_migration_volume_backed(self, encrypted_volumes=False):
inst_ref = objects.Instance(root_device_name='/dev/vda',
**self.test_instance)
bdms = objects.BlockDeviceMappingList(objects=[
fake_block_device.fake_bdm_object(self.context, {
'connection_info': jsonutils.dumps({
'serial': uuids.vol1,
'data': {
'device_path': '/dev/disk/path/lun-X'
}
}),
'device_name': '/dev/sda',
'volume_id': uuids.vol1,
'source_type': 'volume',
'destination_type': 'volume'
}),
fake_block_device.fake_bdm_object(self.context, {
'connection_info': jsonutils.dumps({
'serial': uuids.vol2,
'data': {
'device_path': '/dev/disk/path/lun-Z'
}
}),
'device_name': '/dev/sdb',
'volume_id': uuids.vol2,
'source_type': 'volume',
'destination_type': 'volume'
})
])
# We go through get_block_device_info to simulate what the
# ComputeManager sends to the driver (make sure we're using the
# correct type of BDM objects since there are many of them and
# they are super confusing).
block_device_info = driver.get_block_device_info(inst_ref, bdms)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(drvr, '_is_native_luks_available'),
mock.patch.object(drvr._host, 'find_secret'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, 'plug_vifs'),
) as (mock_is_luks_available, mock_find_secret,
mock_connect_volume, mock_plug_vifs):
mock_is_luks_available.return_value = True
mock_find_secret.return_value = None
if encrypted_volumes:
secret_vol1 = mock.Mock()
secret_vol1.UUIDString.return_value = uuids.secret_vol1
secret_vol2 = mock.Mock()
secret_vol2.UUIDString.return_value = uuids.secret_vol2
mock_find_secret.side_effect = [secret_vol1, secret_vol2]
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
is_shared_instance_path=True,
is_shared_block_storage=False,
is_volume_backed=True,
block_migration=False,
instance_relative_path=inst_ref['name'],
disk_over_commit=False,
disk_available_mb=123,
image_type='qcow2',
filename='foo',
src_supports_native_luks=True,
)
expected_migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
is_shared_instance_path=True,
is_shared_block_storage=False,
is_volume_backed=True,
block_migration=False,
instance_relative_path=inst_ref['name'],
disk_over_commit=False,
disk_available_mb=123,
image_type='qcow2',
filename='foo',
serial_listen_ports=[],
supported_perf_events=[],
target_connect_addr=None,
src_supports_native_luks=True
)
bdmi_vol1 = migrate_data_obj.LibvirtLiveMigrateBDMInfo()
bdmi_vol1.boot_index = None
bdmi_vol1.format = None
bdmi_vol1.serial = uuids.vol1
bdmi_vol1.connection_info = {
u'data': {'device_path': u'/dev/disk/path/lun-X'},
u'serial': uuids.vol1}
bdmi_vol1.bus = 'scsi'
bdmi_vol1.dev = 'sda'
bdmi_vol1.type = 'disk'
bdmi_vol2 = migrate_data_obj.LibvirtLiveMigrateBDMInfo()
bdmi_vol2.boot_index = None
bdmi_vol2.format = None
bdmi_vol2.serial = uuids.vol2
bdmi_vol2.connection_info = {
u'data': {'device_path': u'/dev/disk/path/lun-Z'},
u'serial': uuids.vol2}
bdmi_vol2.bus = 'scsi'
bdmi_vol2.dev = 'sdb'
bdmi_vol2.type = 'disk'
if encrypted_volumes:
bdmi_vol1.encryption_secret_uuid = uuids.secret_vol1
bdmi_vol2.encryption_secret_uuid = uuids.secret_vol2
expected_migrate_data.bdms = [bdmi_vol1, bdmi_vol2]
returned_migrate_data = drvr.pre_live_migration(
self.context, inst_ref, block_device_info, [], None,
migrate_data)
expected_connect_volume_calls = []
for bdm in block_device_info['block_device_mapping']:
expected_call = mock.call(self.context, bdm['connection_info'],
inst_ref, allow_native_luks=True)
expected_connect_volume_calls.append(expected_call)
mock_connect_volume.assert_has_calls(expected_connect_volume_calls)
if encrypted_volumes:
mock_find_secret.assert_has_calls(
[mock.call('volume', uuids.vol1),
mock.call('volume', uuids.vol2)])
# FIXME(lyarwood): This is taken from test_os_vif_util.py and as
# noted there should be removed if the ComparableVersionedObject
# mix-in is ever used for these objects.
expected_migrate_data.obj_reset_changes(recursive=True)
returned_migrate_data.obj_reset_changes(recursive=True)
expected = expected_migrate_data.obj_to_primitive()
returned = returned_migrate_data.obj_to_primitive()
# We have to manually deserialize the connection_info_json so
# that the equality comparison uses a dict rather than a string
# with a random hashseed sort order on the keys.
for migrate_data in (expected, returned):
for bdm_data in migrate_data['nova_object.data']['bdms']:
bdm = bdm_data['nova_object.data']
bdm['connection_info_json'] = (
jsonutils.loads(bdm['connection_info_json']))
self.assertEqual(expected, returned)
def test_pre_live_migration_volume_backed(self):
self._test_pre_live_migration_volume_backed()
def test_pre_live_migration_volume_backed_encrypted(self):
self._test_pre_live_migration_volume_backed(encrypted_volumes=True)
@mock.patch.object(eventlet.greenthread, 'sleep',
side_effect=eventlet.sleep(0))
@mock.patch.object(libvirt_driver.LibvirtDriver, 'plug_vifs',
side_effect=processutils.ProcessExecutionError)
def test_pre_live_migration_plug_vifs_retry_fails(self, mock_plug,
mock_sleep):
self.flags(live_migration_retry_count=3)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info_json = jsonutils.dumps({})
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
is_shared_block_storage=True,
is_shared_instance_path=True,
block_migration=False,
)
self.assertRaises(processutils.ProcessExecutionError,
drvr.pre_live_migration,
self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json,
migrate_data=migrate_data)
# Called 3 times because of live_migration_retry_count is 3
mock_plug.assert_has_calls([mock.call(instance, [])] * 3)
self.assertEqual(3, mock_plug.call_count)
# Called 'live_migration_retry_count - 1' times
mock_sleep.assert_has_calls([mock.call(1)] * 2)
self.assertEqual(2, mock_sleep.call_count)
@mock.patch.object(eventlet.greenthread, 'sleep',
side_effect=eventlet.sleep(0))
@mock.patch.object(libvirt_driver.LibvirtDriver, 'plug_vifs')
def test_pre_live_migration_plug_vifs_retry_works(self, mock_plug,
mock_sleep):
self.flags(live_migration_retry_count=3)
instance = objects.Instance(**self.test_instance)
mock_plug.side_effect = [processutils.ProcessExecutionError(),
processutils.ProcessExecutionError(), None]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info_json = jsonutils.dumps({})
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
is_shared_block_storage=True,
is_shared_instance_path=True,
block_migration=False,
)
drvr.pre_live_migration(self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json,
migrate_data=migrate_data)
# Called 3 times
mock_plug.assert_has_calls([mock.call(instance, [])] * 3)
self.assertEqual(3, mock_plug.call_count)
# Called 2 times because the third 'plug_vifs' call is successful.
mock_sleep.assert_has_calls([mock.call(1)] * 2)
self.assertEqual(2, mock_sleep.call_count)
def test_pre_live_migration_plug_vifs_with_dest_port_bindings(self):
"""Tests that we use the LibvirtLiveMigrateData.vifs destination host
port binding details when plugging VIFs during pre_live_migration.
"""
source_vif = network_model.VIF(
id=uuids.port_id, type=network_model.VIF_TYPE_OVS,
vnic_type=network_model.VNIC_TYPE_NORMAL, details={'foo': 'bar'},
profile={'binding:host_id': 'fake-source-host'})
migrate_vifs = [objects.VIFMigrateData(
port_id=uuids.port_id, vnic_type=network_model.VNIC_TYPE_NORMAL,
vif_type=network_model.VIF_TYPE_OVS, vif_details={'bar': 'baz'},
profile={'binding:host_id': 'fake-dest-host'},
host='fake-dest-host', source_vif=source_vif)]
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
vifs=migrate_vifs)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance()
network_info = mock.NonCallableMock()
with mock.patch.object(drvr, 'plug_vifs') as plug_vifs:
drvr._pre_live_migration_plug_vifs(
instance, network_info, migrate_data)
expected_network_info = network_model.NetworkInfo([
migrate_vifs[0].get_dest_vif()])
plug_vifs.assert_called_once_with(instance, expected_network_info)
def test_pre_live_migration_image_not_created_with_shared_storage(self):
migrate_data_set = [{'is_shared_block_storage': False,
'is_shared_instance_path': True,
'is_volume_backed': False,
'filename': 'foo',
'instance_relative_path': 'bar',
'disk_over_commit': False,
'disk_available_mb': 123,
'image_type': 'qcow2',
'block_migration': False},
{'is_shared_block_storage': True,
'is_shared_instance_path': True,
'is_volume_backed': False,
'filename': 'foo',
'instance_relative_path': 'bar',
'disk_over_commit': False,
'disk_available_mb': 123,
'image_type': 'qcow2',
'block_migration': False},
{'is_shared_block_storage': False,
'is_shared_instance_path': True,
'is_volume_backed': False,
'filename': 'foo',
'instance_relative_path': 'bar',
'disk_over_commit': False,
'disk_available_mb': 123,
'image_type': 'qcow2',
'block_migration': True}]
def _to_obj(d):
return migrate_data_obj.LibvirtLiveMigrateData(**d)
migrate_data_set = map(_to_obj, migrate_data_set)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
# creating mocks
with test.nested(
mock.patch.object(drvr,
'_create_images_and_backing'),
mock.patch.object(drvr,
'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
) as (
create_image_mock,
rules_mock,
plug_mock,
):
disk_info_json = jsonutils.dumps({})
for migrate_data in migrate_data_set:
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=disk_info_json,
migrate_data=migrate_data)
self.assertFalse(create_image_mock.called)
self.assertIsInstance(res,
objects.LibvirtLiveMigrateData)
def test_pre_live_migration_with_not_shared_instance_path(self):
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
is_shared_block_storage=False,
is_shared_instance_path=False,
block_migration=False,
instance_relative_path='foo',
)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
def check_instance_dir(context, instance,
instance_dir, disk_info,
fallback_from_host=False):
self.assertTrue(instance_dir)
# creating mocks
with test.nested(
mock.patch.object(drvr,
'_create_images_and_backing',
side_effect=check_instance_dir),
mock.patch.object(drvr,
'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
) as (
create_image_mock,
rules_mock,
plug_mock,
):
disk_info_json = jsonutils.dumps({})
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=disk_info_json,
migrate_data=migrate_data)
create_image_mock.assert_has_calls(
[mock.call(self.context, instance, mock.ANY, {},
fallback_from_host=instance.host)])
self.assertIsInstance(res, objects.LibvirtLiveMigrateData)
def test_pre_live_migration_recreate_disk_info(self):
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
is_shared_block_storage=False,
is_shared_instance_path=False,
block_migration=True,
instance_relative_path='/some/path/',
)
disk_info = [{'disk_size': 5368709120, 'type': 'raw',
'virt_disk_size': 5368709120,
'path': '/some/path/disk',
'backing_file': '', 'over_committed_disk_size': 0},
{'disk_size': 1073741824, 'type': 'raw',
'virt_disk_size': 1073741824,
'path': '/some/path/disk.eph0',
'backing_file': '', 'over_committed_disk_size': 0}]
image_disk_info = {'/some/path/disk': 'raw',
'/some/path/disk.eph0': 'raw'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
instance_path = os.path.dirname(disk_info[0]['path'])
disk_info_path = os.path.join(instance_path, 'disk.info')
with test.nested(
mock.patch.object(os, 'mkdir'),
mock.patch.object(fake_libvirt_utils, 'write_to_file'),
mock.patch.object(drvr, '_create_images_and_backing')
) as (
mkdir, write_to_file, create_images_and_backing
):
drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=jsonutils.dumps(disk_info),
migrate_data=migrate_data)
write_to_file.assert_called_with(disk_info_path,
jsonutils.dumps(image_disk_info))
def test_pre_live_migration_with_perf_events(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._supported_perf_events = ['cmt']
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
is_shared_block_storage=False,
is_shared_instance_path=False,
block_migration=False,
instance_relative_path='foo',
)
instance = objects.Instance(**self.test_instance)
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=None,
migrate_data=migrate_data)
self.assertEqual(['cmt'], res.supported_perf_events)
@mock.patch('nova.virt.disk.api.get_disk_info')
def test_get_instance_disk_info_works_correctly(self, mock_qemu_img_info):
# Test data
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = mock.Mock(autospec=fakelibvirt.virDomain)
vdmock.XMLDesc.return_value = dummyxml
mock_qemu_img_info.side_effect = [
mock.Mock(disk_size=10737418240, virtual_size=10737418240),
mock.Mock(disk_size=3328599655, virtual_size=21474836480)
]
def fake_lookup(_uuid):
if _uuid == instance.uuid:
return vdmock
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
vdmock.XMLDesc.assert_called_once_with(0)
mock_qemu_img_info.assert_has_calls([mock.call('/test/disk'),
mock.call('/test/disk.local')])
self.assertEqual(2, mock_qemu_img_info.call_count)
def test_post_live_migration(self):
vol = {'block_device_mapping': [
{'attachment_id': None,
'connection_info': {
'data': {'multipath_id': 'dummy1'},
'serial': 'fake_serial1'},
'mount_device': '/dev/sda',
},
{'attachment_id': None,
'connection_info': {
'data': {},
'serial': 'fake_serial2'},
'mount_device': '/dev/sdb', }]}
def fake_initialize_connection(context, volume_id, connector):
return {'data': {}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_connector = {'host': 'fake'}
inst_ref = {'id': 'foo'}
cntx = context.get_admin_context()
# Set up the mock expectations
with test.nested(
mock.patch.object(driver, 'block_device_info_get_mapping',
return_value=vol['block_device_mapping']),
mock.patch.object(drvr, "get_volume_connector",
return_value=fake_connector),
mock.patch.object(drvr._volume_api, "initialize_connection",
side_effect=fake_initialize_connection),
mock.patch.object(drvr, '_disconnect_volume')
) as (block_device_info_get_mapping, get_volume_connector,
initialize_connection, _disconnect_volume):
drvr.post_live_migration(cntx, inst_ref, vol)
block_device_info_get_mapping.assert_has_calls([
mock.call(vol)])
get_volume_connector.assert_has_calls([
mock.call(inst_ref)])
_disconnect_volume.assert_has_calls([
mock.call(cntx, {'data': {'multipath_id': 'dummy1'}},
inst_ref),
mock.call(cntx, {'data': {}}, inst_ref)])
def test_post_live_migration_cinder_v3(self):
cntx = context.get_admin_context()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = fake_instance.fake_instance_obj(cntx,
uuid=uuids.instance)
vol_id = uuids.volume
old_attachment_id = uuids.attachment
disk_dev = 'sda'
connection_info = {
'data': {'multipath_id': 'dummy1'},
'serial': vol_id}
block_device_mapping = [
{'attachment_id': uuids.attachment,
'mount_device': '/dev/%s' % disk_dev,
'connection_info': connection_info}]
old_attachment = {
'connection_info': {
'data': {'multipath_id': 'dummy1'},
'serial': vol_id}}
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_block_storage=True,
old_vol_attachment_ids={vol_id: old_attachment_id})
@mock.patch.object(drvr, '_disconnect_volume')
@mock.patch.object(drvr._volume_api, 'attachment_get')
@mock.patch.object(driver, 'block_device_info_get_mapping')
def _test(mock_get_bdms, mock_attachment_get, mock_disconnect):
mock_get_bdms.return_value = block_device_mapping
mock_attachment_get.return_value = old_attachment
drvr.post_live_migration(cntx, instance, None,
migrate_data=migrate_data)
mock_attachment_get.assert_called_once_with(cntx,
old_attachment_id)
mock_disconnect.assert_called_once_with(cntx, connection_info,
instance)
_test()
@mock.patch('nova.virt.disk.api.get_disk_info')
def test_get_instance_disk_info_excludes_volumes(
self, mock_qemu_img_info):
# Test data
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdc' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume2'/>"
"<target dev='vdd' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = mock.Mock(autospec=fakelibvirt.virDomain)
vdmock.XMLDesc.return_value = dummyxml
mock_qemu_img_info.side_effect = [
mock.Mock(disk_size=10737418240, virtual_size=10737418240),
mock.Mock(disk_size=3328599655, virtual_size=21474836480)
]
def fake_lookup(_uuid):
if _uuid == instance.uuid:
return vdmock
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance,
block_device_info=info)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
vdmock.XMLDesc.assert_called_once_with(0)
mock_qemu_img_info.assert_has_calls([mock.call('/test/disk'),
mock.call('/test/disk.local')])
self.assertEqual(2, mock_qemu_img_info.call_count)
@mock.patch('nova.virt.disk.api.get_disk_info')
def test_get_instance_disk_info_no_bdinfo_passed(self, mock_qemu_img_info):
# NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method
# without access to Nova's block device information. We want to make
# sure that we guess volumes mostly correctly in that case as well
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='block'><driver name='qemu' type='raw'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
path = '/test/disk'
size = 10737418240
# Preparing mocks
vdmock = mock.Mock(autospec=fakelibvirt.virDomain)
vdmock.XMLDesc.return_value = dummyxml
mock_qemu_img_info.return_value = mock.Mock(disk_size=10737418240,
virtual_size=10737418240)
def fake_lookup(_uuid):
if _uuid == instance.uuid:
return vdmock
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
fake_libvirt_utils.disk_sizes[path] = 10 * units.Gi
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(1, len(info))
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], path)
self.assertEqual(info[0]['disk_size'], size)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
vdmock.XMLDesc.assert_called_once_with(0)
mock_qemu_img_info.assert_called_once_with(path)
def test_spawn_with_network_info(self):
def fake_getLibVersion():
return fakelibvirt.FAKE_LIBVIRT_VERSION
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
<vendor>Intel</vendor>
<feature policy='require' name='xtpr'/>
</cpu>
"""
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getVersion=lambda: 1005001,
baselineCPU=fake_baselineCPU)
instance = objects.Instance(**self.test_instance)
instance.image_ref = uuids.image_ref
instance.config_drive = ''
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.useFixture(fake_imagebackend.ImageBackendFixture())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
utils.tempdir(),
mock.patch('nova.virt.libvirt.driver.libvirt'),
mock.patch.object(drvr, '_build_device_metadata'),
mock.patch.object(drvr, 'get_info'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter')
) as (
tmpdir,
mock_orig_libvirt,
mock_build_device_metadata,
mock_get_info,
mock_ignored, mock_ignored
):
self.flags(instances_path=tmpdir)
hw_running = hardware.InstanceInfo(state=power_state.RUNNING)
mock_get_info.return_value = hw_running
mock_build_device_metadata.return_value = None
del mock_orig_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
drvr.spawn(self.context, instance, image_meta, [], 'herp', {},
network_info=network_info)
mock_get_info.assert_called_once_with(instance)
mock_build_device_metadata.assert_called_once_with(self.context,
instance)
# Methods called directly by spawn()
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_domain_and_network')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
# Methods called by _create_configdrive via post_xml_callback
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder._make_iso9660')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata')
@mock.patch.object(instance_metadata, 'InstanceMetadata')
def test_spawn_with_config_drive(self, mock_instance_metadata,
mock_build_device_metadata,
mock_mkisofs, mock_get_info,
mock_create_domain_and_network,
mock_get_guest_xml):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
instance.config_drive = 'True'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
instance_info = hardware.InstanceInfo(state=power_state.RUNNING)
mock_build_device_metadata.return_value = None
def fake_create_domain_and_network(
context, xml, instance, network_info,
block_device_info=None, power_on=True,
vifs_already_plugged=False, post_xml_callback=None,
destroy_disks_on_failure=False):
# The config disk should be created by this callback, so we need
# to execute it.
post_xml_callback()
fake_backend = self.useFixture(
fake_imagebackend.ImageBackendFixture(exists=lambda _: False))
mock_get_info.return_value = instance_info
mock_create_domain_and_network.side_effect = \
fake_create_domain_and_network
drvr.spawn(self.context, instance, image_meta, [], None, {})
# We should have imported 'disk.config'
config_disk = fake_backend.disks['disk.config']
config_disk.import_file.assert_called_once_with(instance, mock.ANY,
'disk.config')
def test_spawn_without_image_meta(self):
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
fake_backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver._get_guest_xml',
lambda *a, **kw: None)
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.'
'_create_domain_and_network',
lambda *a, **kw: None)
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.get_info',
lambda self, instance: hardware.InstanceInfo(
state=power_state.RUNNING))
drvr.spawn(self.context, instance, image_meta, [], None, {})
# We should have created a root disk and an ephemeral disk
self.assertEqual(['disk', 'disk.local'],
sorted(fake_backend.created_disks.keys()))
def _test_spawn_disks(self, image_ref, block_device_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
# Volume-backed instance created without image
instance = objects.Instance(**self.test_instance)
instance.image_ref = image_ref
instance.root_device_name = '/dev/vda'
instance.uuid = uuids.instance_uuid
backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
with test.nested(
mock.patch.object(drvr, '_get_guest_xml'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(drvr, 'get_info')
) as (
mock_get_guest_xml, mock_create_domain_and_network,
mock_get_info
):
hw_running = hardware.InstanceInfo(state=power_state.RUNNING)
mock_get_info.return_value = hw_running
drvr.spawn(self.context, instance,
image_meta, [], None, {},
block_device_info=block_device_info)
# Return a sorted list of created disks
return sorted(backend.created_disks.keys())
def test_spawn_from_volume_no_image_ref(self):
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
{'mount_device': 'vda',
'boot_index': 0}]}
disks_created = self._test_spawn_disks(None, block_device_info)
# We should have created the ephemeral disk, and nothing else
self.assertEqual(['disk.local'], disks_created)
def test_spawn_from_volume_with_image_ref(self):
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
{'mount_device': 'vda',
'boot_index': 0}]}
disks_created = self._test_spawn_disks(uuids.image_ref,
block_device_info)
# We should have created the ephemeral disk, and nothing else
self.assertEqual(['disk.local'], disks_created)
def test_spawn_from_image(self):
disks_created = self._test_spawn_disks(uuids.image_ref, None)
# We should have created the root and ephemeral disks
self.assertEqual(['disk', 'disk.local'], disks_created)
def test_start_lxc_from_volume(self):
self.flags(virt_type="lxc",
group='libvirt')
def check_setup_container(image, container_dir=None):
self.assertIsInstance(image, imgmodel.LocalBlockImage)
self.assertEqual(image.path, '/dev/path/to/dev')
return '/dev/nbd1'
bdm = {
'guest_format': None,
'boot_index': 0,
'mount_device': '/dev/sda',
'connection_info': {
'driver_volume_type': 'iscsi',
'serial': 'afc1',
'data': {
'access_mode': 'rw',
'target_discovered': False,
'encrypted': False,
'qos_specs': None,
'target_iqn': 'iqn: volume-afc1',
'target_portal': 'ip: 3260',
'volume_id': 'afc1',
'target_lun': 1,
'auth_password': 'uj',
'auth_username': '47',
'auth_method': 'CHAP'
}
},
'disk_bus': 'scsi',
'device_type': 'disk',
'delete_on_termination': False
}
def _connect_volume_side_effect(ctxt, connection_info, instance):
bdm['connection_info']['data']['device_path'] = '/dev/path/to/dev'
def _get(key, opt=None):
return bdm.get(key, opt)
def getitem(key):
return bdm[key]
def setitem(key, val):
bdm[key] = val
bdm_mock = mock.MagicMock()
bdm_mock.__getitem__.side_effect = getitem
bdm_mock.__setitem__.side_effect = setitem
bdm_mock.get = _get
disk_mock = mock.MagicMock()
disk_mock.source_path = '/dev/path/to/dev'
block_device_info = {'block_device_mapping': [bdm_mock],
'root_device_name': '/dev/sda'}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/sda'
instance_ref['ephemeral_gb'] = 0
instance_ref['uuid'] = uuids.fake
inst_obj = objects.Instance(**instance_ref)
image_meta = objects.ImageMeta.from_dict({})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, '_connect_volume',
side_effect=_connect_volume_side_effect),
mock.patch.object(drvr, '_get_volume_config',
return_value=disk_mock),
mock.patch.object(drvr, 'get_info',
return_value=hardware.InstanceInfo(
state=power_state.RUNNING)),
mock.patch('nova.virt.disk.api.setup_container',
side_effect=check_setup_container),
mock.patch('nova.virt.disk.api.teardown_container'),
mock.patch.object(objects.Instance, 'save')):
drvr.spawn(self.context, inst_obj, image_meta, [], None, {},
network_info=[],
block_device_info=block_device_info)
self.assertEqual('/dev/nbd1',
inst_obj.system_metadata.get(
'rootfs_device_name'))
def test_spawn_with_pci_devices(self):
class FakeLibvirtPciDevice(object):
def dettach(self):
return None
def reset(self):
return None
def fake_node_device_lookup_by_name(address):
pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}"
% dict(hex='[\da-f]', oct='[0-8]'))
pattern = re.compile(pattern)
if pattern.match(address) is None:
raise fakelibvirt.libvirtError()
return FakeLibvirtPciDevice()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver._get_guest_xml',
lambda *args, **kwargs: None)
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.'
'_create_domain_and_network',
lambda *args, **kwargs: None)
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.get_info',
lambda self, instance: hardware.InstanceInfo(
state=power_state.RUNNING))
mock_connection = mock.MagicMock(
nodeDeviceLookupByName=fake_node_device_lookup_by_name)
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance = objects.Instance(**instance_ref)
instance['pci_devices'] = objects.PciDeviceList(
objects=[objects.PciDevice(address='0000:00:00.0')])
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.useFixture(fake_imagebackend.ImageBackendFixture())
with mock.patch.object(drvr, '_get_connection',
return_value=mock_connection):
drvr.spawn(self.context, instance, image_meta, [], None, {})
def _test_create_image_plain(self, os_type='', filename='', mkfs=False):
gotFiles = []
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
instance['os_type'] = os_type
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver._get_guest_xml',
lambda *args, **kwargs: None)
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.'
'_create_domain_and_network',
lambda *args, **kwargs: None)
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.get_info',
lambda self, instance: hardware.InstanceInfo(
state=power_state.RUNNING))
if mkfs:
self.stub_out(
'nova.privsep.fs._MKFS_COMMAND',
{os_type: 'mkfs.ext4 --label %(fs_label)s %(target)s'})
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
self.useFixture(
fake_imagebackend.ImageBackendFixture(got_files=gotFiles))
drvr._create_image(self.context, instance, disk_info['mapping'])
drvr._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * units.Gi},
{'filename': filename,
'size': 20 * units.Gi},
]
self.assertEqual(gotFiles, wantFiles)
def test_create_image_plain_os_type_blank(self):
self._test_create_image_plain(os_type='',
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_none(self):
self._test_create_image_plain(os_type=None,
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_set_no_fs(self):
self._test_create_image_plain(os_type='test',
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_set_with_fs(self):
ephemeral_file_name = ('ephemeral_20_%s' % utils.get_hash_str(
'mkfs.ext4 --label %(fs_label)s %(target)s')[:7])
self._test_create_image_plain(os_type='test',
filename=ephemeral_file_name,
mkfs=True)
def test_create_image_initrd(self):
kernel_id = uuids.kernel_id
ramdisk_id = uuids.ramdisk_id
kernel_fname = imagecache.get_cache_fname(kernel_id)
ramdisk_fname = imagecache.get_cache_fname(ramdisk_id)
filename = self._EPHEMERAL_20_DEFAULT
gotFiles = []
instance_ref = self.test_instance
instance_ref['image_ref'] = uuids.instance_id
instance_ref['kernel_id'] = uuids.kernel_id
instance_ref['ramdisk_id'] = uuids.ramdisk_id
instance_ref['os_type'] = 'test'
instance = objects.Instance(**instance_ref)
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_backend = self.useFixture(
fake_imagebackend.ImageBackendFixture(got_files=gotFiles))
with test.nested(
mock.patch.object(driver, '_get_guest_xml'),
mock.patch.object(driver, '_create_domain_and_network'),
mock.patch.object(driver, 'get_info',
return_value=[hardware.InstanceInfo(state=power_state.RUNNING)])
):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
driver._create_image(self.context, instance, disk_info['mapping'])
# Assert that kernel and ramdisk were fetched with fetch_raw_image
# and no size
for name, disk in fake_backend.disks.items():
cache = disk.cache
if name in ('kernel', 'ramdisk'):
cache.assert_called_once_with(
context=self.context, filename=mock.ANY, image_id=mock.ANY,
fetch_func=fake_libvirt_utils.fetch_raw_image)
wantFiles = [
{'filename': kernel_fname,
'size': None},
{'filename': ramdisk_fname,
'size': None},
{'filename': imagecache.get_cache_fname(uuids.instance_id),
'size': 10 * units.Gi},
{'filename': filename,
'size': 20 * units.Gi},
]
self.assertEqual(wantFiles, gotFiles)
def test_injection_info_is_sanitized(self):
info = get_injection_info(
network_info=mock.sentinel.network_info,
files=mock.sentinel.files,
admin_pass='verybadpass')
self.assertNotIn('verybadpass', str(info))
self.assertNotIn('verybadpass', repr(info))
@mock.patch(
'nova.virt.libvirt.driver.LibvirtDriver._build_device_metadata')
@mock.patch('nova.api.metadata.base.InstanceMetadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive')
def test_create_configdrive(self, mock_make_drive,
mock_instance_metadata,
mock_build_device_metadata):
instance = objects.Instance(**self.test_instance)
instance.config_drive = 'True'
backend = self.useFixture(
fake_imagebackend.ImageBackendFixture(exists=lambda path: False))
mock_build_device_metadata.return_value = None
injection_info = get_injection_info(
network_info=mock.sentinel.network_info,
admin_pass=mock.sentinel.admin_pass,
files=mock.sentinel.files
)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._create_configdrive(self.context, instance, injection_info)
expected_config_drive_path = os.path.join(
CONF.instances_path, instance.uuid, 'disk.config')
mock_make_drive.assert_called_once_with(expected_config_drive_path)
mock_instance_metadata.assert_called_once_with(instance,
request_context=self.context,
network_info=mock.sentinel.network_info,
content=mock.sentinel.files,
extra_md={'admin_pass': mock.sentinel.admin_pass})
backend.disks['disk.config'].import_file.assert_called_once_with(
instance, mock.ANY, 'disk.config')
@ddt.unpack
@ddt.data({'expected': 200, 'flavor_size': 200},
{'expected': 100, 'flavor_size': 200, 'bdi_size': 100},
{'expected': 200, 'flavor_size': 200, 'bdi_size': 100,
'legacy': True})
def test_create_image_with_swap(self, expected,
flavor_size=None, bdi_size=None,
legacy=False):
# Test the precedence of swap disk size specified in both the bdm and
# the flavor.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance = objects.Instance(**instance_ref)
if flavor_size is not None:
instance.flavor.swap = flavor_size
bdi = {'block_device_mapping': [{'boot_index': 0}]}
if bdi_size is not None:
bdi['swap'] = {'swap_size': bdi_size, 'device_name': '/dev/vdb'}
create_image_kwargs = {}
if legacy:
create_image_kwargs['ignore_bdi_for_swap'] = True
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance, image_meta,
block_device_info=bdi)
backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
drvr._create_image(self.context, instance, disk_info['mapping'],
block_device_info=bdi, **create_image_kwargs)
backend.mock_create_swap.assert_called_once_with(
target='swap_%i' % expected, swap_mb=expected,
context=self.context)
backend.disks['disk.swap'].cache.assert_called_once_with(
fetch_func=mock.ANY, filename='swap_%i' % expected,
size=expected * units.Mi, context=self.context, swap_mb=expected)
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
def test_create_vz_container_with_swap(self, mock_cache):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance_ref = copy.deepcopy(self.test_instance)
instance_ref['vm_mode'] = fields.VMMode.EXE
instance_ref['flavor'].swap = 1024
instance = objects.Instance(**instance_ref)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance, image_meta)
self.assertRaises(exception.Invalid,
drvr._create_image,
self.context, instance, disk_info['mapping'])
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache',
side_effect=exception.ImageNotFound(image_id='fake-id'))
def test_create_image_not_exist_no_fallback(self, mock_cache):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
self.assertRaises(exception.ImageNotFound,
drvr._create_image,
self.context, instance, disk_info['mapping'])
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
def test_create_image_not_exist_fallback(self, mock_cache):
def side_effect(fetch_func, filename, size=None, *args, **kwargs):
def second_call(fetch_func, filename, size=None, *args, **kwargs):
# call copy_from_host ourselves because we mocked image.cache()
fetch_func('fake-target')
# further calls have no side effect
mock_cache.side_effect = None
mock_cache.side_effect = second_call
# raise an error only the first call
raise exception.ImageNotFound(image_id='fake-id')
mock_cache.side_effect = side_effect
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
with mock.patch.object(libvirt_driver.libvirt_utils,
'copy_image') as mock_copy:
drvr._create_image(self.context, instance, disk_info['mapping'],
fallback_from_host='fake-source-host')
mock_copy.assert_called_once_with(src='fake-target',
dest='fake-target',
host='fake-source-host',
receive=True)
@mock.patch('nova.privsep.fs.get_file_extension_for_os_type')
def test_create_image_with_ephemerals(self, mock_get_ext):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance = objects.Instance(**instance_ref)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
bdi = {'ephemerals': [{'size': 100}],
'block_device_mapping': [{'boot_index': 0}]}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance, image_meta,
block_device_info=bdi)
mock_get_ext.return_value = mock.sentinel.file_ext
backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
drvr._create_image(self.context, instance, disk_info['mapping'],
block_device_info=bdi)
filename = 'ephemeral_100_%s' % mock.sentinel.file_ext
backend.mock_create_ephemeral.assert_called_once_with(
target=filename, ephemeral_size=100, fs_label='ephemeral0',
is_block_dev=mock.sentinel.is_block_dev, os_type='linux',
specified_fs=None, context=self.context, vm_mode=None)
backend.disks['disk.eph0'].cache.assert_called_once_with(
fetch_func=mock.ANY, context=self.context,
filename=filename, size=100 * units.Gi, ephemeral_size=mock.ANY,
specified_fs=None)
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
def test_create_image_resize_snap_backend(self, mock_cache):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
instance.task_state = task_states.RESIZE_FINISH
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
fake_backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
drvr._create_image(self.context, instance, disk_info['mapping'])
# Assert we called create_snap on the root disk
fake_backend.disks['disk'].create_snap.assert_called_once_with(
libvirt_utils.RESIZE_SNAPSHOT_NAME)
@mock.patch('nova.privsep.fs.mkfs')
def test_create_ephemeral_specified_fs(self, fake_mkfs):
self.flags(default_ephemeral_format='ext3')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True, specified_fs='ext4')
fake_mkfs.assert_has_calls([mock.call('ext4', '/dev/something',
'myVol')])
@mock.patch('nova.privsep.path.utime')
def test_create_ephemeral_specified_fs_not_valid(self, mock_utime):
CONF.set_override('default_ephemeral_format', 'ext4')
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'guest_format': 'dummy',
'size': 1}]
block_device_info = {
'ephemerals': ephemerals}
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
disk_info['mapping'].pop('disk.local')
with test.nested(
mock.patch.object(utils, 'execute'),
mock.patch.object(drvr, 'get_info'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(imagebackend.Image, 'verify_base_size'),
mock.patch.object(imagebackend.Image, 'get_disk_size')
) as (execute_mock, get_info_mock,
create_mock, verify_base_size_mock, disk_size_mock):
disk_size_mock.return_value = 0
self.assertRaises(exception.InvalidBDMFormat, drvr._create_image,
context, instance, disk_info['mapping'],
block_device_info=block_device_info)
@mock.patch('nova.privsep.fs.mkfs')
def test_create_ephemeral_default(self, fake_mkfs):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
fake_mkfs.assert_has_calls([mock.call('ext4', '/dev/something',
'myVol')])
@mock.patch('nova.privsep.fs.mkfs')
def test_create_ephemeral_with_conf(self, fake_mkfs):
CONF.set_override('default_ephemeral_format', 'ext4')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
fake_mkfs.assert_has_calls([mock.call('ext4', '/dev/something',
'myVol')])
@mock.patch('nova.privsep.fs.configurable_mkfs')
def test_create_ephemeral_with_arbitrary(self, fake_mkfs):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stub_out('nova.privsep.fs._MKFS_COMMAND',
{'linux': 'mkfs.ext4 --label %(fs_label)s %(target)s'})
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
fake_mkfs.assert_has_calls([mock.call('linux', 'myVol',
'/dev/something', True, None,
None)])
@mock.patch('nova.privsep.fs.configurable_mkfs')
def test_create_ephemeral_with_ext3(self, fake_mkfs):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stub_out('nova.privsep.fs._MKFS_COMMAND',
{'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'})
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
fake_mkfs.assert_has_calls([mock.call('linux', 'myVol',
'/dev/something', True, None,
None)])
@mock.patch.object(fake_libvirt_utils, 'create_ploop_image')
def test_create_ephemeral_parallels(self, mock_create_ploop):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=False,
specified_fs='fs_format',
vm_mode=fields.VMMode.EXE)
mock_create_ploop.assert_called_once_with('expanded',
'/dev/something',
'20G', 'fs_format')
@mock.patch('nova.privsep.fs.unprivileged_mkfs')
def test_create_swap_default(self, fake_mkfs):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._create_swap('/dev/something', 1)
fake_mkfs.assert_has_calls([mock.call('swap', '/dev/something')])
def test_ensure_console_log_for_instance_pass(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(drvr, '_get_console_log_path'),
mock.patch.object(fake_libvirt_utils, 'file_open')
) as (mock_path, mock_open):
drvr._ensure_console_log_for_instance(mock.ANY)
mock_path.assert_called_once()
mock_open.assert_called_once()
def test_ensure_console_log_for_instance_pass_w_permissions(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(drvr, '_get_console_log_path'),
mock.patch.object(fake_libvirt_utils, 'file_open',
side_effect=IOError(errno.EACCES, 'exc'))
) as (mock_path, mock_open):
drvr._ensure_console_log_for_instance(mock.ANY)
mock_path.assert_called_once()
mock_open.assert_called_once()
def test_ensure_console_log_for_instance_fail(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(drvr, '_get_console_log_path'),
mock.patch.object(fake_libvirt_utils, 'file_open',
side_effect=IOError(errno.EREMOTE, 'exc'))
) as (mock_path, mock_open):
self.assertRaises(
IOError,
drvr._ensure_console_log_for_instance,
mock.ANY)
@mock.patch('nova.privsep.path.last_bytes',
return_value=(b'67890', 0))
def test_get_console_output_file(self, mock_last_bytes):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
console_log = '%s/console.log' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
with mock.patch('os.path.exists', return_value=True):
output = drvr.get_console_output(self.context, instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual(b'67890', output)
def test_get_console_output_file_missing(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_log = os.path.join(tmpdir, instance['name'],
'non-existent.log')
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch('os.path.exists', return_value=False):
output = drvr.get_console_output(self.context, instance)
self.assertEqual('', output)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.privsep.path.last_bytes',
return_value=(b'67890', 0))
@mock.patch('nova.privsep.path.writefile')
@mock.patch('nova.privsep.libvirt.readpty')
def test_get_console_output_pty(self, mocked_readfile, mocked_writefile,
mocked_last_bytes, mocked_path_exists):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
pty_file = '%s/fake_pty' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='pty'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % pty_file
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
mocked_readfile.return_value = 'foo'
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = drvr.get_console_output(self.context, instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual(b'67890', output)
def test_get_console_output_pty_not_available(self):
instance = objects.Instance(**self.test_instance)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='pty'>
<target port='0'/>
</console>
</devices>
</domain>
"""
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleNotAvailable,
drvr.get_console_output, self.context, instance)
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_console_output_not_available(self, mock_get_xml, get_domain):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='foo'>
<source path='srcpath'/>
<target port='0'/>
</console>
</devices>
</domain>
"""
mock_get_xml.return_value = xml
get_domain.return_value = mock.MagicMock()
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleNotAvailable,
drvr.get_console_output, self.context, instance)
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
@mock.patch.object(libvirt_guest.Guest, 'get_xml_desc')
def test_get_console_output_logrotate(self, mock_get_xml, get_domain):
fake_libvirt_utils.files['console.log'] = b'uvwxyz'
fake_libvirt_utils.files['console.log.0'] = b'klmnopqrst'
fake_libvirt_utils.files['console.log.1'] = b'abcdefghij'
def mock_path_exists(path):
return os.path.basename(path) in fake_libvirt_utils.files
def mock_last_bytes(path, count):
with fake_libvirt_utils.file_open(path) as flo:
return nova.privsep.path._last_bytes_inner(flo, count)
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='console.log'/>
<target port='0'/>
</console>
</devices>
</domain>
"""
mock_get_xml.return_value = xml
get_domain.return_value = mock.MagicMock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(**self.test_instance)
def _get_logd_output(bytes_to_read):
with utils.tempdir() as tmp_dir:
self.flags(instances_path=tmp_dir)
log_data = ""
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = bytes_to_read
with mock.patch('os.path.exists',
side_effect=mock_path_exists):
with mock.patch('nova.privsep.path.last_bytes',
side_effect=mock_last_bytes):
log_data = drvr.get_console_output(self.context,
instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
return log_data
# span across only 1 file (with remaining bytes)
self.assertEqual(b'wxyz', _get_logd_output(4))
# span across only 1 file (exact bytes)
self.assertEqual(b'uvwxyz', _get_logd_output(6))
# span across 2 files (with remaining bytes)
self.assertEqual(b'opqrstuvwxyz', _get_logd_output(12))
# span across all files (exact bytes)
self.assertEqual(b'abcdefghijklmnopqrstuvwxyz', _get_logd_output(26))
# span across all files with more bytes than available
self.assertEqual(b'abcdefghijklmnopqrstuvwxyz', _get_logd_output(30))
# files are not available
fake_libvirt_utils.files = {}
self.assertEqual('', _get_logd_output(30))
# reset the file for other tests
fake_libvirt_utils.files['console.log'] = b'01234567890'
def test_get_host_ip_addr(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = drvr.get_host_ip_addr()
self.assertEqual(ip, CONF.my_ip)
@mock.patch.object(libvirt_driver.LOG, 'warning')
@mock.patch('nova.compute.utils.get_machine_ips')
def test_get_host_ip_addr_failure(self, mock_ips, mock_log):
mock_ips.return_value = ['8.8.8.8', '75.75.75.75']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.get_host_ip_addr()
mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was '
u'not found on any of the '
u'interfaces: %(ifaces)s',
{'ifaces': '8.8.8.8, 75.75.75.75',
'my_ip': mock.ANY})
def test_conn_event_handler(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
# verify that the driver registers for the close callback
# and re-connects after receiving the callback
self.assertRaises(exception.HypervisorUnavailable,
drvr.init_host,
"wibble")
self.assertTrue(service_mock.disabled)
def test_command_with_broken_connection(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock),
mock.patch.object(host.Host, "get_capabilities")):
self.assertRaises(exception.HypervisorUnavailable,
drvr.init_host, ("wibble",))
self.assertTrue(service_mock.disabled)
def test_service_resume_after_broken_connection(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = True
with test.nested(
mock.patch.object(drvr._host, "_connect",
return_value=mock.MagicMock()),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock),
mock.patch.object(host.Host, "get_capabilities")):
drvr.init_host("wibble")
drvr.get_num_instances()
drvr._host._dispatch_conn_event()
self.assertFalse(service_mock.disabled)
self.assertIsNone(service_mock.disabled_reason)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(host.Host, '_get_domain',
side_effect=exception.InstanceNotFound(
instance_id=uuids.instance))
@mock.patch.object(objects.Instance, 'save')
def test_immediate_delete(self, mock_save, mock_get, mock_delete):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, {})
mock_save.assert_called_once_with()
mock_get.assert_has_calls([mock.call(instance)] * 3)
self.assertEqual(3, mock_get.call_count)
mock_delete.assert_called_once_with(instance)
@mock.patch.object(objects.Instance, 'get_by_uuid')
@mock.patch.object(objects.Instance, 'obj_load_attr', autospec=True)
@mock.patch.object(objects.Instance, 'save', autospec=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_disconnect_volume')
@mock.patch.object(driver, 'block_device_info_get_mapping')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def _test_destroy_removes_disk(self, mock_undefine_domain, mock_mapping,
mock_disconnect_volume,
mock_delete_instance_files, mock_destroy,
mock_inst_save, mock_inst_obj_load_attr,
mock_get_by_uuid, volume_fail=False):
instance = objects.Instance(self.context, **self.test_instance)
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
mock_mapping.return_value = vol['block_device_mapping']
mock_delete_instance_files.return_value = True
mock_get_by_uuid.return_value = instance
if volume_fail:
mock_disconnect_volume.return_value = (
exception.VolumeNotFound('vol'))
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], vol)
def test_destroy_removes_disk(self):
self._test_destroy_removes_disk(volume_fail=False)
def test_destroy_removes_disk_volume_fails(self):
self._test_destroy_removes_disk(volume_fail=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'unplug_vifs')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy,
mock_unplug_vifs):
instance = fake_instance.fake_instance_obj(
None, name='instancename', id=1,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], None, False)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@mock.patch.object(host.Host, '_get_domain')
def test_destroy_lxc_calls_teardown_container(self, mock_get_domain,
mock_teardown_container,
mock_cleanup):
self.flags(virt_type='lxc', group='libvirt')
fake_domain = FakeVirtDomain()
def destroy_side_effect(*args, **kwargs):
fake_domain._info[0] = power_state.SHUTDOWN
with mock.patch.object(fake_domain, 'destroy',
side_effect=destroy_side_effect) as mock_domain_destroy:
mock_get_domain.return_value = fake_domain
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = []
drvr.destroy(self.context, instance, network_info, None, False)
mock_get_domain.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_domain_destroy.assert_called_once_with()
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
network_info, None, False)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@mock.patch.object(host.Host, '_get_domain')
def test_destroy_lxc_calls_teardown_container_when_no_domain(self,
mock_get_domain, mock_teardown_container, mock_cleanup):
self.flags(virt_type='lxc', group='libvirt')
instance = objects.Instance(**self.test_instance)
inf_exception = exception.InstanceNotFound(instance_id=instance.uuid)
mock_get_domain.side_effect = inf_exception
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = []
drvr.destroy(self.context, instance, network_info, None, False)
mock_get_domain.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
network_info, None, False)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain')
@mock.patch.object(host.Host, '_get_domain')
def test_reboot_different_ids(self, mock_get, mock_create):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
# Mock domain
mock_domain = mock.create_autospec(fakelibvirt.virDomain)
mock_domain.info.side_effect = [
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple,
(libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple]
mock_domain.ID.side_effect = ['some_fake_id', 'some_fake_id',
'some_other_fake_id',
'some_other_fake_id']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get.return_value = mock_domain
self.stub_out('oslo_service.loopingcall.FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
self.stub_out('nova.pci.manager.get_instance_pci_devs', lambda *a: [])
drvr.reboot(None, instance, [], 'SOFT')
mock_domain.info.assert_has_calls([mock.call()] * 2)
self.assertEqual(2, mock_domain.info.call_count)
mock_domain.ID.assert_has_calls([mock.call()] * 4)
self.assertEqual(4, mock_domain.ID.call_count)
mock_domain.shutdown.assert_called_once_with()
mock_get.assert_has_calls([mock.call(instance)] * 2, any_order=True)
self.assertEqual(2, mock_get.call_count)
mock_create.assert_called_once_with(domain=mock_domain)
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, '_get_domain')
def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot,
mock_sleep, mock_loopingcall,
mock_get_instance_pci_devs):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_hard_reboot_called = False
# Mock domain
mock_domain = mock.Mock(fakelibvirt.virDomain)
return_values = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple,
(libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple]
mock_domain.info.side_effect = return_values
mock_domain.ID.return_value = 'some_fake_id'
mock_domain.shutdown.side_effect = mock.Mock()
def fake_hard_reboot(*args, **kwargs):
self.reboot_hard_reboot_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_domain.return_value = mock_domain
mock_hard_reboot.side_effect = fake_hard_reboot
mock_loopingcall.return_value = FakeLoopingCall()
mock_get_instance_pci_devs.return_value = []
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_hard_reboot_called)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, '_get_domain')
def test_soft_reboot_libvirt_exception(self, mock_get_domain,
mock_hard_reboot):
# Tests that a hard reboot is performed when a soft reboot results
# in raising a libvirtError.
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
# setup mocks
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.info.return_value = (
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_virDomain.ID.return_value = 'some_fake_id'
mock_virDomain.shutdown.side_effect = fakelibvirt.libvirtError('Err')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
context = None
instance = objects.Instance(**self.test_instance)
network_info = []
mock_get_domain.return_value = mock_virDomain
drvr.reboot(context, instance, network_info, 'SOFT')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, '_get_domain')
def _test_resume_state_on_host_boot_with_state(self, state,
mock_get_domain,
mock_hard_reboot):
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.info.return_value = ([state, None, None, None, None])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = mock_virDomain
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
drvr.resume_state_on_host_boot(self.context, instance, network_info,
block_device_info=None)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
self.assertEqual(mock_hard_reboot.called, state not in ignored_states)
def test_resume_state_on_host_boot_with_running_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_suspended_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED)
def test_resume_state_on_host_boot_with_paused_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.PAUSED)
def test_resume_state_on_host_boot_with_nostate(self):
self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE)
def test_resume_state_on_host_boot_with_shutdown_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_crashed_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.CRASHED)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, '_get_domain')
def test_resume_state_on_host_boot_with_instance_not_found_on_driver(
self, mock_get_domain, mock_hard_reboot):
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.side_effect = exception.InstanceNotFound(
instance_id='fake')
drvr.resume_state_on_host_boot(self.context, instance, network_info=[],
block_device_info=None)
mock_hard_reboot.assert_called_once_with(self.context,
instance, [], None)
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml')
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
'_get_instance_disk_info_from_config')
@mock.patch('nova.virt.libvirt.LibvirtDriver.destroy')
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
'_get_all_assigned_mediated_devices')
def test_hard_reboot(self, mock_get_mdev, mock_destroy, mock_get_disk_info,
mock_get_guest_xml, mock_create_domain_and_network,
mock_get_info):
self.context.auth_token = True # any non-None value will suffice
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
block_device_info = None
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
mock_get_mdev.return_value = {uuids.mdev1: uuids.inst1}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN),
hardware.InstanceInfo(state=power_state.RUNNING)]
mock_get_info.side_effect = return_values
mock_get_guest_xml.return_value = dummyxml
mock_get_disk_info.return_value = \
fake_disk_info_byname(instance).values()
backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
with mock.patch('os.path.exists', return_value=True):
drvr._hard_reboot(self.context, instance, network_info,
block_device_info)
disks = backend.disks
# NOTE(mdbooth): _create_images_and_backing() passes a full path in
# 'disk_name' when creating a disk. This is wrong, but happens to
# work due to handling by each individual backend. This will be
# fixed in a subsequent commit.
#
# We translate all the full paths into disk names here to make the
# test readable
disks = {os.path.basename(name): value
for name, value in disks.items()}
# We should have called cache() on the root and ephemeral disks
for name in ('disk', 'disk.local'):
self.assertTrue(disks[name].cache.called)
mock_get_mdev.assert_called_once_with(instance)
mock_destroy.assert_called_once_with(self.context, instance,
network_info, destroy_disks=False,
block_device_info=block_device_info)
mock_get_guest_xml.assert_called_once_with(self.context, instance,
network_info, mock.ANY, mock.ANY,
block_device_info=block_device_info, mdevs=[uuids.mdev1])
mock_create_domain_and_network.assert_called_once_with(self.context,
dummyxml, instance, network_info,
block_device_info=block_device_info, vifs_already_plugged=True)
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
@mock.patch('nova.pci.manager.get_instance_pci_devs')
@mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
'_get_instance_disk_info_from_config')
@mock.patch('nova.virt.libvirt.utils.write_to_file')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config')
@mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
'_get_all_assigned_mediated_devices')
def test_hard_reboot_does_not_call_glance_show(self,
mock_get_mdev, mock_destroy, mock_get_disk_info,
mock_get_guest_config, mock_get_instance_path, mock_write_to_file,
mock_get_instance_disk_info, mock_create_images_and_backing,
mock_create_domand_and_network, mock_prepare_pci_devices_for_use,
mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree):
"""For a hard reboot, we shouldn't need an additional call to glance
to get the image metadata.
This is important for automatically spinning up instances on a
host-reboot, since we won't have a user request context that'll allow
the Glance request to go through. We have to rely on the cached image
metadata, instead.
https://bugs.launchpad.net/nova/+bug/1339386
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_mdev.return_value = {}
network_info = mock.MagicMock()
block_device_info = mock.MagicMock()
mock_get_disk_info.return_value = {}
mock_get_guest_config.return_value = mock.MagicMock()
mock_get_instance_path.return_value = '/foo'
mock_looping_call.return_value = mock.MagicMock()
drvr._image_api = mock.MagicMock()
drvr._hard_reboot(self.context, instance, network_info,
block_device_info)
self.assertFalse(drvr._image_api.get.called)
mock_ensure_tree.assert_called_once_with('/foo')
def test_suspend(self):
guest = libvirt_guest.Guest(FakeVirtDomain(id=1))
dom = guest._domain
instance = objects.Instance(**self.test_instance)
instance.ephemeral_key_uuid = None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@mock.patch.object(dmcrypt, 'delete_volume')
@mock.patch.object(conn, '_get_instance_disk_info_from_config',
return_value=[])
@mock.patch.object(conn, '_detach_mediated_devices')
@mock.patch.object(conn, '_detach_direct_passthrough_ports')
@mock.patch.object(conn, '_detach_pci_devices')
@mock.patch.object(pci_manager, 'get_instance_pci_devs',
return_value='pci devs')
@mock.patch.object(conn._host, 'get_guest', return_value=guest)
def suspend(mock_get_guest, mock_get_instance_pci_devs,
mock_detach_pci_devices,
mock_detach_direct_passthrough_ports,
mock_detach_mediated_devices,
mock_get_instance_disk_info,
mock_delete_volume):
mock_managedSave = mock.Mock()
dom.managedSave = mock_managedSave
conn.suspend(self.context, instance)
mock_managedSave.assert_called_once_with(0)
self.assertFalse(mock_get_instance_disk_info.called)
mock_delete_volume.assert_has_calls([mock.call(disk['path'])
for disk in mock_get_instance_disk_info.return_value], False)
suspend()
@mock.patch.object(time, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain')
@mock.patch.object(host.Host, '_get_domain')
def _test_clean_shutdown(self, mock_get_domain, mock_create_domain,
mock_sleep, seconds_to_shutdown,
timeout, retry_interval,
shutdown_attempts, succeeds):
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
shutdown_count = []
# Mock domain
mock_domain = mock.Mock(fakelibvirt.virDomain)
return_infos = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple]
return_shutdowns = [shutdown_count.append("shutdown")]
retry_countdown = retry_interval
for x in range(min(seconds_to_shutdown, timeout)):
return_infos.append(
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
if retry_countdown == 0:
return_shutdowns.append(shutdown_count.append("shutdown"))
retry_countdown = retry_interval
else:
retry_countdown -= 1
if seconds_to_shutdown < timeout:
return_infos.append(
(libvirt_guest.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
mock_domain.info.side_effect = return_infos
mock_domain.shutdown.side_effect = return_shutdowns
def fake_create_domain(**kwargs):
self.reboot_create_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_domain.return_value = mock_domain
mock_create_domain.side_effect = fake_create_domain
result = drvr._clean_shutdown(instance, timeout, retry_interval)
self.assertEqual(succeeds, result)
self.assertEqual(shutdown_attempts, len(shutdown_count))
def test_clean_shutdown_first_time(self):
self._test_clean_shutdown(seconds_to_shutdown=2,
timeout=5,
retry_interval=3,
shutdown_attempts=1,
succeeds=True)
def test_clean_shutdown_with_retry(self):
self._test_clean_shutdown(seconds_to_shutdown=4,
timeout=5,
retry_interval=3,
shutdown_attempts=2,
succeeds=True)
def test_clean_shutdown_failure(self):
self._test_clean_shutdown(seconds_to_shutdown=6,
timeout=5,
retry_interval=3,
shutdown_attempts=2,
succeeds=False)
def test_clean_shutdown_no_wait(self):
self._test_clean_shutdown(seconds_to_shutdown=6,
timeout=0,
retry_interval=3,
shutdown_attempts=1,
succeeds=False)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_direct_passthrough_ports(self,
mock_get_image_metadata, mock_ID, mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_direct_passthrough_ports(
self.context, instance, guest, network_info)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_direct_physical_passthrough_ports(self,
mock_get_image_metadata, mock_ID, mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT_PHYSICAL
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_direct_passthrough_ports(
self.context, instance, guest, network_info)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_direct_passthrough_ports_with_info_cache(self,
mock_get_image_metadata, mock_ID, mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_direct_passthrough_ports(
self.context, instance, guest, None)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def _test_detach_direct_passthrough_ports(self,
mock_has_min_version, vif_type):
instance = objects.Instance(**self.test_instance)
expeted_pci_slot = "0000:00:00.0"
network_info = _fake_network_info(self, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
# some more adjustments for the fake network_info so that
# the correct get_config function will be executed (vif's
# get_config_hw_veb - which is according to the real SRIOV vif)
# and most importantly the pci_slot which is translated to
# cfg.source_dev, then to PciDevice.address and sent to
# _detach_pci_devices
network_info[0]['profile'] = dict(pci_slot=expeted_pci_slot)
network_info[0]['type'] = vif_type
network_info[0]['details'] = dict(vlan="2145")
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
# fill the pci_devices of the instance so that
# pci_manager.get_instance_pci_devs will not return an empty list
# which will eventually fail the assertion for detachDeviceFlags
expected_pci_device_obj = (
objects.PciDevice(address=expeted_pci_slot, request_id=None))
instance.pci_devices = objects.PciDeviceList()
instance.pci_devices.objects = [expected_pci_device_obj]
domain = FakeVirtDomain()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest(domain)
with mock.patch.object(drvr, '_detach_pci_devices') as mock_detach_pci:
drvr._detach_direct_passthrough_ports(
self.context, instance, guest)
mock_detach_pci.assert_called_once_with(
guest, [expected_pci_device_obj])
def test_detach_direct_passthrough_ports_interface_interface_hostdev(self):
# Note: test detach_direct_passthrough_ports method for vif with config
# LibvirtConfigGuestInterface
self._test_detach_direct_passthrough_ports(vif_type="hw_veb")
def test_detach_direct_passthrough_ports_interface_pci_hostdev(self):
# Note: test detach_direct_passthrough_ports method for vif with config
# LibvirtConfigGuestHostdevPCI
self._test_detach_direct_passthrough_ports(vif_type="ib_hostdev")
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
def test_detach_duplicate_mac_direct_passthrough_ports(
self, mock_detachDeviceFlags, mock_has_min_version):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 2)
for network_info_inst in network_info:
network_info_inst['vnic_type'] = network_model.VNIC_TYPE_DIRECT
network_info_inst['type'] = "hw_veb"
network_info_inst['details'] = dict(vlan="2145")
network_info_inst['address'] = "fa:16:3e:96:2a:48"
network_info[0]['profile'] = dict(pci_slot="0000:00:00.0")
network_info[1]['profile'] = dict(pci_slot="0000:00:00.1")
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
# fill the pci_devices of the instance so that
# pci_manager.get_instance_pci_devs will not return an empty list
# which will eventually fail the assertion for detachDeviceFlags
instance.pci_devices = objects.PciDeviceList()
instance.pci_devices.objects = [
objects.PciDevice(address='0000:00:00.0', request_id=None),
objects.PciDevice(address='0000:00:00.1', request_id=None)
]
domain = FakeVirtDomain()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest(domain)
drvr._detach_direct_passthrough_ports(self.context, instance, guest)
expected_xml = [
('<hostdev mode="subsystem" type="pci" managed="yes">\n'
' <source>\n'
' <address bus="0x00" domain="0x0000" \
function="0x0" slot="0x00"/>\n'
' </source>\n'
'</hostdev>\n'),
('<hostdev mode="subsystem" type="pci" managed="yes">\n'
' <source>\n'
' <address bus="0x00" domain="0x0000" \
function="0x1" slot="0x00"/>\n'
' </source>\n'
'</hostdev>\n')
]
mock_detachDeviceFlags.has_calls([
mock.call(expected_xml[0], flags=1),
mock.call(expected_xml[1], flags=1)
])
def test_resume(self):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
block_device_info = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest('fake_dom')
with test.nested(
mock.patch.object(drvr, '_get_existing_domain_xml',
return_value=dummyxml),
mock.patch.object(drvr, '_create_domain_and_network',
return_value=guest),
mock.patch.object(drvr, '_attach_pci_devices'),
mock.patch.object(pci_manager, 'get_instance_pci_devs',
return_value='fake_pci_devs'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(guest, 'sync_guest_time'),
mock.patch.object(drvr, '_wait_for_running',
side_effect=loopingcall.LoopingCallDone()),
) as (_get_existing_domain_xml, _create_domain_and_network,
_attach_pci_devices, get_instance_pci_devs, get_image_metadata,
mock_sync_time, mock_wait):
get_image_metadata.return_value = {'bar': 234}
drvr.resume(self.context, instance, network_info,
block_device_info)
_get_existing_domain_xml.assert_has_calls([mock.call(instance,
network_info, block_device_info)])
_create_domain_and_network.assert_has_calls([mock.call(
self.context, dummyxml,
instance, network_info,
block_device_info=block_device_info,
vifs_already_plugged=True)])
self.assertTrue(mock_sync_time.called)
_attach_pci_devices.assert_has_calls([mock.call(guest,
'fake_pci_devs')])
@mock.patch.object(host.Host, '_get_domain')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines(self, mock_save, mock_delete_instance_files,
mock_get_info, mock_get_domain):
dom_mock = mock.MagicMock()
dom_mock.undefineFlags.return_value = 1
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = dom_mock
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN, internal_id=-1)
mock_delete_instance_files.return_value = None
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(rbd_utils.RBDDriver, '_destroy_volume')
@mock.patch.object(rbd_utils.RBDDriver, '_disconnect_from_rados')
@mock.patch.object(rbd_utils.RBDDriver, '_connect_to_rados')
@mock.patch.object(rbd_utils, 'rbd')
@mock.patch.object(rbd_utils, 'rados')
def test_cleanup_rbd(self, mock_rados, mock_rbd, mock_connect,
mock_disconnect, mock_destroy_volume):
mock_connect.return_value = mock.MagicMock(), mock.MagicMock()
instance = objects.Instance(**self.test_instance)
all_volumes = [uuids.other_instance + '_disk',
uuids.other_instance + '_disk.swap',
instance.uuid + '_disk',
instance.uuid + '_disk.swap']
mock_rbd.RBD.return_value.list.return_value = all_volumes
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._cleanup_rbd(instance)
calls = [mock.call(mock.ANY, instance.uuid + '_disk'),
mock.call(mock.ANY, instance.uuid + '_disk.swap')]
mock_destroy_volume.assert_has_calls(calls)
self.assertEqual(2, mock_destroy_volume.call_count)
@mock.patch.object(rbd_utils.RBDDriver, '_destroy_volume')
@mock.patch.object(rbd_utils.RBDDriver, '_disconnect_from_rados')
@mock.patch.object(rbd_utils.RBDDriver, '_connect_to_rados')
@mock.patch.object(rbd_utils, 'rbd')
@mock.patch.object(rbd_utils, 'rados')
def test_cleanup_rbd_resize_reverting(self, mock_rados, mock_rbd,
mock_connect, mock_disconnect,
mock_destroy_volume):
mock_connect.return_value = mock.MagicMock(), mock.MagicMock()
instance = objects.Instance(**self.test_instance)
instance.task_state = task_states.RESIZE_REVERTING
all_volumes = [uuids.other_instance + '_disk',
uuids.other_instance + '_disk.local',
instance.uuid + '_disk',
instance.uuid + '_disk.local']
mock_rbd.RBD.return_value.list.return_value = all_volumes
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._cleanup_rbd(instance)
mock_destroy_volume.assert_called_once_with(
mock.ANY, instance.uuid + '_disk.local')
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_undefine_flags(self, mock_save):
mock_domain = mock.Mock(fakelibvirt.virDomain)
mock_domain.undefineFlags.side_effect = fakelibvirt.libvirtError('Err')
mock_domain.ID.return_value = 123
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
drvr._has_uefi_support = mock.Mock(return_value=False)
drvr.delete_instance_files = mock.Mock(return_value=None)
drvr.get_info = mock.Mock(return_value=
hardware.InstanceInfo(state=power_state.SHUTDOWN, internal_id=-1)
)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
self.assertEqual(2, mock_domain.ID.call_count)
mock_domain.destroy.assert_called_once_with()
mock_domain.undefineFlags.assert_called_once_with(1)
mock_domain.undefine.assert_called_once_with()
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_attribute_with_managed_save(self, mock_save):
mock_domain = mock.Mock(fakelibvirt.virDomain)
mock_domain.undefineFlags.side_effect = AttributeError()
mock_domain.ID.return_value = 123
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
drvr._has_uefi_support = mock.Mock(return_value=False)
drvr.delete_instance_files = mock.Mock(return_value=None)
drvr.get_info = mock.Mock(return_value=
hardware.InstanceInfo(state=power_state.SHUTDOWN, internal_id=-1)
)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
self.assertEqual(1, mock_domain.ID.call_count)
mock_domain.destroy.assert_called_once_with()
mock_domain.undefineFlags.assert_called_once_with(1)
mock_domain.hasManagedSaveImage.assert_has_calls([mock.call(0)])
mock_domain.managedSaveRemove.assert_called_once_with(0)
mock_domain.undefine.assert_called_once_with()
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_attribute_no_managed_save(self, mock_save):
mock_domain = mock.Mock(fakelibvirt.virDomain)
mock_domain.undefineFlags.side_effect = AttributeError()
mock_domain.hasManagedSaveImage.side_effect = AttributeError()
mock_domain.ID.return_value = 123
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
drvr._has_uefi_support = mock.Mock(return_value=False)
drvr.delete_instance_files = mock.Mock(return_value=None)
drvr.get_info = mock.Mock(return_value=
hardware.InstanceInfo(state=power_state.SHUTDOWN, internal_id=-1)
)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
self.assertEqual(1, mock_domain.ID.call_count)
mock_domain.destroy.assert_called_once_with()
mock_domain.undefineFlags.assert_called_once_with(1)
mock_domain.hasManagedSaveImage.assert_has_calls([mock.call(0)])
mock_domain.undefine.assert_called_once_with()
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_removes_nvram(self, mock_save):
mock_domain = mock.Mock(fakelibvirt.virDomain)
mock_domain.ID.return_value = 123
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
drvr._has_uefi_support = mock.Mock(return_value=True)
drvr.delete_instance_files = mock.Mock(return_value=None)
drvr.get_info = mock.Mock(return_value=hardware.InstanceInfo(
state=power_state.SHUTDOWN, internal_id=-1))
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
self.assertEqual(1, mock_domain.ID.call_count)
mock_domain.destroy.assert_called_once_with()
# undefineFlags should now be called with 5 as uefi us supported
mock_domain.undefineFlags.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE |
fakelibvirt.VIR_DOMAIN_UNDEFINE_NVRAM
)
mock_domain.undefine.assert_not_called()
mock_save.assert_called_once_with()
def test_destroy_timed_out(self):
mock_virdomain = mock.Mock(autospec=fakelibvirt.virDomain)
mock_virdomain.destroy.side_effect = fakelibvirt.libvirtError(
'timed out')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
lambda self, instance: mock_virdomain)
self.stub_out('nova.tests.unit.virt.libvirt.fakelibvirt.libvirtError.'
'get_error_code',
lambda self: fakelibvirt.VIR_ERR_OPERATION_TIMEOUT)
instance = objects.Instance(**self.test_instance)
self.assertRaises(exception.InstancePowerOffFailure,
drvr.destroy, self.context, instance, [])
mock_virdomain.ID.assert_called_once_with()
mock_virdomain.destroy.assert_called_once_with()
def test_private_destroy_not_found(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain",
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
mock_virdomain = mock.Mock(autospec=fakelibvirt.virDomain)
mock_virdomain.destroy.side_effect = ex
mock_virdomain.info.side_effect = ex
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
lambda self, instance: mock_virdomain)
instance = objects.Instance(**self.test_instance)
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
drvr._destroy(instance)
mock_virdomain.ID.assert_called_once_with()
mock_virdomain.destroy.assert_called_once_with()
mock_virdomain.info.assert_called_once_with()
mock_virdomain.UUIDString.assert_called_once_with()
def test_private_destroy_lxc_processes_refused_to_die(self):
self.flags(virt_type='lxc', group='libvirt')
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "",
error_message="internal error: Some processes refused to die",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn._host, '_get_domain') as mock_get_domain, \
mock.patch.object(conn, 'get_info') as mock_get_info:
mock_domain = mock.MagicMock()
mock_domain.ID.return_value = 1
mock_get_domain.return_value = mock_domain
mock_domain.destroy.side_effect = ex
mock_info = mock.MagicMock()
mock_info.internal_id = 1
mock_info.state = power_state.SHUTDOWN
mock_get_info.return_value = mock_info
instance = objects.Instance(**self.test_instance)
conn._destroy(instance)
def test_private_destroy_processes_refused_to_die_still_raises(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "",
error_message="internal error: Some processes refused to die",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn._host, '_get_domain') as mock_get_domain:
mock_domain = mock.MagicMock()
mock_domain.ID.return_value = 1
mock_get_domain.return_value = mock_domain
mock_domain.destroy.side_effect = ex
instance = objects.Instance(**self.test_instance)
self.assertRaises(fakelibvirt.libvirtError, conn._destroy,
instance)
def test_private_destroy_ebusy_timeout(self):
# Tests that _destroy will retry 3 times to destroy the guest when an
# EBUSY is raised, but eventually times out and raises the libvirtError
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
("Failed to terminate process 26425 with SIGKILL: "
"Device or resource busy"),
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
int1=errno.EBUSY)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.poweroff = mock.Mock(side_effect=ex)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr._host, 'get_guest',
return_value=mock_guest):
self.assertRaises(fakelibvirt.libvirtError, drvr._destroy,
instance)
self.assertEqual(3, mock_guest.poweroff.call_count)
def test_private_destroy_ebusy_multiple_attempt_ok(self):
# Tests that the _destroy attempt loop is broken when EBUSY is no
# longer raised.
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
("Failed to terminate process 26425 with SIGKILL: "
"Device or resource busy"),
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
int1=errno.EBUSY)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.poweroff = mock.Mock(side_effect=[ex, None])
inst_info = hardware.InstanceInfo(power_state.SHUTDOWN, internal_id=1)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr._host, 'get_guest',
return_value=mock_guest):
with mock.patch.object(drvr, 'get_info', return_value=inst_info):
drvr._destroy(instance)
self.assertEqual(2, mock_guest.poweroff.call_count)
@mock.patch.object(fakelibvirt.libvirtError, 'get_error_code')
@mock.patch.object(host.Host, '_get_domain',
side_effect=exception.InstanceNotFound(
instance_id=uuids.instance))
def test_undefine_domain_with_not_found_instance(self, mock_get_domain,
mock_get_error):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
# NOTE(wenjianhn): verifies undefine doesn't raise if the
# instance disappears
drvr._undefine_domain(instance)
mock_get_domain.assert_called_once_with(instance)
mock_get_error.assert_not_called()
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_uefi_support")
@mock.patch.object(host.Host, "get_guest")
def test_undefine_domain_handles_libvirt_errors(self, mock_get,
mock_has_uefi):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
fake_guest = mock.Mock()
mock_get.return_value = fake_guest
unexpected = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "Random", error_code=1)
fake_guest.delete_configuration.side_effect = unexpected
# ensure raise unexpected error code
self.assertRaises(type(unexpected), drvr._undefine_domain, instance)
ignored = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "No such domain",
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
fake_guest.delete_configuration.side_effect = ignored
# ensure no raise for no such domain
drvr._undefine_domain(instance)
@mock.patch.object(host.Host, "list_instance_domains")
@mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid")
@mock.patch.object(objects.InstanceList, "get_by_filters")
def test_disk_over_committed_size_total(self, mock_get, mock_bdms,
mock_list):
# Ensure destroy calls managedSaveRemove for saved instance.
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
self._uuid = uuids.fake
def ID(self):
return 1
def name(self):
return self._name
def UUIDString(self):
return self._uuid
def XMLDesc(self, flags):
return "<domain><name>%s</name></domain>" % self._name
instance_domains = [
DiagFakeDomain("instance0000001"),
DiagFakeDomain("instance0000002")]
mock_list.return_value = instance_domains
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_disks = {'instance0000001':
[{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'instance0000002':
[{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '0'}]}
def get_info(cfg, block_device_info):
return fake_disks.get(cfg.name)
instance_uuids = [dom.UUIDString() for dom in instance_domains]
instances = [objects.Instance(
uuid=instance_uuids[0],
root_device_name='/dev/vda'),
objects.Instance(
uuid=instance_uuids[1],
root_device_name='/dev/vdb')
]
mock_get.return_value = instances
with mock.patch.object(
drvr, "_get_instance_disk_info_from_config") as mock_info:
mock_info.side_effect = get_info
result = drvr._get_disk_over_committed_size_total()
self.assertEqual(result, 10653532160)
mock_list.assert_called_once_with(only_running=False)
self.assertEqual(2, mock_info.call_count)
filters = {'uuid': instance_uuids}
mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True)
mock_bdms.assert_called_with(mock.ANY, instance_uuids)
@mock.patch.object(host.Host, "list_instance_domains")
@mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid")
@mock.patch.object(objects.InstanceList, "get_by_filters")
def test_disk_over_committed_size_total_eperm(self, mock_get, mock_bdms,
mock_list):
# Ensure destroy calls managedSaveRemove for saved instance.
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
self._uuid = uuidutils.generate_uuid()
def ID(self):
return 1
def name(self):
return self._name
def UUIDString(self):
return self._uuid
def XMLDesc(self, flags):
return "<domain><name>%s</name></domain>" % self._name
instance_domains = [
DiagFakeDomain("instance0000001"),
DiagFakeDomain("instance0000002"),
DiagFakeDomain("instance0000003"),
DiagFakeDomain("instance0000004"),
DiagFakeDomain("instance0000005")]
mock_list.return_value = instance_domains
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_disks = {'instance0000001':
[{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'instance0000002':
[{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '21474836480'}],
'instance0000003':
[{'type': 'raw', 'path': '/somepath/disk3',
'virt_disk_size': '0',
'backing_file': '/somepath/disk3',
'disk_size': '21474836480',
'over_committed_disk_size': '32212254720'}],
'instance0000004':
[{'type': 'raw', 'path': '/somepath/disk4',
'virt_disk_size': '0',
'backing_file': '/somepath/disk4',
'disk_size': '32212254720',
'over_committed_disk_size': '42949672960'}]}
def side_effect(cfg, block_device_info):
if cfg.name == 'instance0000001':
self.assertEqual('/dev/vda',
block_device_info['root_device_name'])
raise OSError(errno.ENOENT, 'No such file or directory')
if cfg.name == 'instance0000002':
self.assertEqual('/dev/vdb',
block_device_info['root_device_name'])
raise OSError(errno.ESTALE, 'Stale NFS file handle')
if cfg.name == 'instance0000003':
self.assertEqual('/dev/vdc',
block_device_info['root_device_name'])
raise OSError(errno.EACCES, 'Permission denied')
if cfg.name == 'instance0000004':
self.assertEqual('/dev/vdd',
block_device_info['root_device_name'])
return fake_disks.get(cfg.name)
get_disk_info = mock.Mock()
get_disk_info.side_effect = side_effect
drvr._get_instance_disk_info_from_config = get_disk_info
instance_uuids = [dom.UUIDString() for dom in instance_domains]
instances = [objects.Instance(
uuid=instance_uuids[0],
root_device_name='/dev/vda'),
objects.Instance(
uuid=instance_uuids[1],
root_device_name='/dev/vdb'),
objects.Instance(
uuid=instance_uuids[2],
root_device_name='/dev/vdc'),
objects.Instance(
uuid=instance_uuids[3],
root_device_name='/dev/vdd'),
]
mock_get.return_value = instances
# NOTE(danms): We need to have found bdms for our instances,
# but we don't really need them to be complete as we just need
# to make it to our side_effect above. Exclude the last domain
# to simulate the case where we have an instance with no BDMs.
mock_bdms.return_value = {uuid: [] for uuid in instance_uuids
if uuid != instance_domains[-1].UUIDString()}
result = drvr._get_disk_over_committed_size_total()
self.assertEqual(42949672960, result)
mock_list.assert_called_once_with(only_running=False)
self.assertEqual(5, get_disk_info.call_count)
filters = {'uuid': instance_uuids}
mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True)
mock_bdms.assert_called_with(mock.ANY, instance_uuids)
@mock.patch.object(host.Host, "list_instance_domains")
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_instance_disk_info_from_config",
side_effect=exception.VolumeBDMPathNotFound(path='bar'))
@mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid")
@mock.patch.object(objects.InstanceList, "get_by_filters")
def test_disk_over_committed_size_total_bdm_not_found(self,
mock_get,
mock_bdms,
mock_get_disk_info,
mock_list_domains):
mock_dom = mock.Mock()
mock_dom.XMLDesc.return_value = "<domain/>"
mock_list_domains.return_value = [mock_dom]
# Tests that we handle VolumeBDMPathNotFound gracefully.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(0, drvr._get_disk_over_committed_size_total())
@mock.patch('nova.virt.libvirt.host.Host.list_instance_domains')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_get_instance_disk_info_from_config',
side_effect=exception.DiskNotFound(location='/opt/stack/foo'))
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid',
return_value=objects.BlockDeviceMappingList())
@mock.patch('nova.objects.InstanceList.get_by_filters',
return_value=objects.InstanceList(objects=[
objects.Instance(uuid=uuids.instance,
task_state=task_states.DELETING)]))
def test_disk_over_committed_size_total_disk_not_found_ignore(
self, mock_get, mock_bdms, mock_get_disk_info, mock_list_domains):
"""Tests that we handle DiskNotFound gracefully for an instance that
is undergoing a task_state transition.
"""
mock_dom = mock.Mock()
mock_dom.XMLDesc.return_value = "<domain/>"
mock_dom.UUIDString.return_value = uuids.instance
mock_list_domains.return_value = [mock_dom]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(0, drvr._get_disk_over_committed_size_total())
@mock.patch('nova.virt.libvirt.host.Host.list_instance_domains')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_get_instance_disk_info_from_config',
side_effect=exception.DiskNotFound(location='/opt/stack/foo'))
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid',
return_value=objects.BlockDeviceMappingList())
@mock.patch('nova.objects.InstanceList.get_by_filters',
return_value=objects.InstanceList(objects=[
objects.Instance(uuid=uuids.instance, task_state=None)]))
def test_disk_over_committed_size_total_disk_not_found_reraise(
self, mock_get, mock_bdms, mock_get_disk_info, mock_list_domains):
"""Tests that we handle DiskNotFound gracefully for an instance that
is NOT undergoing a task_state transition and the error is re-raised.
"""
mock_dom = mock.Mock()
mock_dom.XMLDesc.return_value = "<domain/>"
mock_dom.UUIDString.return_value = uuids.instance
mock_list_domains.return_value = [mock_dom]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.DiskNotFound,
drvr._get_disk_over_committed_size_total)
@mock.patch('nova.virt.libvirt.storage.lvm.get_volume_size')
@mock.patch('nova.virt.disk.api.get_disk_size',
new_callable=mock.NonCallableMock)
def test_get_instance_disk_info_from_config_block_devices(self,
mock_disk_api, mock_get_volume_size):
"""Test that for block devices the actual and virtual sizes are
reported as the same and that the disk_api is not used.
"""
c = context.get_admin_context()
instance = objects.Instance(root_device_name='/dev/vda',
**self.test_instance)
bdms = objects.BlockDeviceMappingList(objects=[
fake_block_device.fake_bdm_object(c, {
'device_name': '/dev/mapper/vg-lv',
'source_type': 'image',
'destination_type': 'local'
}),
])
block_device_info = driver.get_block_device_info(instance, bdms)
config = vconfig.LibvirtConfigGuest()
disk_config = vconfig.LibvirtConfigGuestDisk()
disk_config.source_type = "block"
disk_config.source_path = mock.sentinel.volume_path
config.devices.append(disk_config)
mock_get_volume_size.return_value = mock.sentinel.volume_size
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = drvr._get_instance_disk_info_from_config(config,
block_device_info)
mock_get_volume_size.assert_called_once_with(mock.sentinel.volume_path)
self.assertEqual(disk_info[0]['disk_size'],
disk_info[0]['virt_disk_size'])
def test_cpu_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.arch = fields.Architecture.X86_64
cpu.cells = 1
cpu.cores = 2
cpu.threads = 1
cpu.sockets = 4
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
guest = vconfig.LibvirtConfigGuest()
guest.ostype = fields.VMMode.HVM
guest.arch = fields.Architecture.X86_64
guest.domtype = ["kvm"]
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = fields.VMMode.HVM
guest.arch = fields.Architecture.I686
guest.domtype = ["kvm"]
caps.guests.append(guest)
return caps
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
get_host_capabilities_stub)
want = {"vendor": "AMD",
"features": set(["extapic", "3dnow"]),
"model": "Opteron_G4",
"arch": fields.Architecture.X86_64,
"topology": {"cells": 1, "cores": 2, "threads": 1,
"sockets": 4}}
got = drvr._get_cpu_info()
self.assertEqual(want, got)
def test_get_pcinet_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dev_name = "net_enp2s2_02_9a_a1_37_be_54"
parent_address = "pci_0000_04_11_7"
node_dev = FakeNodeDevice(_fake_NodeDevXml[dev_name])
with mock.patch.object(pci_utils, 'get_net_name_by_vf_pci_address',
return_value=dev_name) as mock_get_net_name, \
mock.patch.object(drvr._host, 'device_lookup_by_name',
return_value=node_dev) as mock_dev_lookup:
actualvf = drvr._get_pcinet_info(parent_address)
expect_vf = {
"name": dev_name,
"capabilities": ["rx", "tx", "sg", "tso", "gso", "gro",
"rxvlan", "txvlan"]
}
self.assertEqual(expect_vf, actualvf)
mock_get_net_name.called_once_with(parent_address)
mock_dev_lookup.called_once_with(dev_name)
def test_get_pcidev_info(self):
self.stub_out('nova.virt.libvirt.host.Host.device_lookup_by_name',
lambda self, name: FakeNodeDevice(
_fake_NodeDevXml[name]))
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(
fakelibvirt.Connection, 'getLibVersion') as mock_lib_version:
mock_lib_version.return_value = (
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION) - 1)
actualvf = drvr._get_pcidev_info("pci_0000_04_00_3")
expect_vf = {
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"numa_node": None,
"vendor_id": '8086',
"label": 'label_8086_1521',
"dev_type": fields.PciDeviceType.SRIOV_PF,
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_04_10_7")
expect_vf = {
"dev_id": "pci_0000_04_10_7",
"address": "0000:04:10.7",
"product_id": '1520',
"numa_node": None,
"vendor_id": '8086',
"label": 'label_8086_1520',
"dev_type": fields.PciDeviceType.SRIOV_VF,
"parent_addr": '0000:04:00.3',
}
self.assertEqual(expect_vf, actualvf)
with mock.patch.object(pci_utils, 'get_net_name_by_vf_pci_address',
return_value="net_enp2s2_02_9a_a1_37_be_54"):
actualvf = drvr._get_pcidev_info("pci_0000_04_11_7")
expect_vf = {
"dev_id": "pci_0000_04_11_7",
"address": "0000:04:11.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": 0,
"label": 'label_8086_1520',
"dev_type": fields.PciDeviceType.SRIOV_VF,
"parent_addr": '0000:04:00.3',
"capabilities": {
"network": ["rx", "tx", "sg", "tso", "gso", "gro",
"rxvlan", "txvlan"]},
}
self.assertEqual(expect_vf, actualvf)
with mock.patch.object(
pci_utils, 'is_physical_function', return_value=True):
actualvf = drvr._get_pcidev_info("pci_0000_04_00_1")
expect_vf = {
"dev_id": "pci_0000_04_00_1",
"address": "0000:04:00.1",
"product_id": '1013',
"numa_node": 0,
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": fields.PciDeviceType.SRIOV_PF,
}
self.assertEqual(expect_vf, actualvf)
with mock.patch.object(
pci_utils, 'is_physical_function', return_value=False):
actualvf = drvr._get_pcidev_info("pci_0000_04_00_1")
expect_vf = {
"dev_id": "pci_0000_04_00_1",
"address": "0000:04:00.1",
"product_id": '1013',
"numa_node": 0,
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": fields.PciDeviceType.STANDARD,
}
self.assertEqual(expect_vf, actualvf)
with mock.patch.object(
fakelibvirt.Connection, 'getLibVersion') as mock_lib_version:
mock_lib_version.return_value = (
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION))
actualvf = drvr._get_pcidev_info("pci_0000_03_00_0")
expect_vf = {
"dev_id": "pci_0000_03_00_0",
"address": "0000:03:00.0",
"product_id": '1013',
"numa_node": 0,
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": fields.PciDeviceType.SRIOV_PF,
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_03_00_1")
expect_vf = {
"dev_id": "pci_0000_03_00_1",
"address": "0000:03:00.1",
"product_id": '1013',
"numa_node": 0,
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": fields.PciDeviceType.SRIOV_PF,
}
self.assertEqual(expect_vf, actualvf)
def test_list_devices_not_supported(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Handle just the NO_SUPPORT error
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
with mock.patch.object(drvr._conn, 'listDevices',
side_effect=not_supported_exc):
self.assertEqual('[]', drvr._get_pci_passthrough_devices())
# We cache not supported status to avoid emitting too many logging
# messages. Clear this value to test the other exception case.
del drvr._list_devices_supported
# Other errors should not be caught
other_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'other exc',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
with mock.patch.object(drvr._conn, 'listDevices',
side_effect=other_exc):
self.assertRaises(fakelibvirt.libvirtError,
drvr._get_pci_passthrough_devices)
@mock.patch.object(host.Host, 'list_pci_devices',
return_value=['pci_0000_04_00_3', 'pci_0000_04_10_7',
'pci_0000_04_11_7'])
def test_get_pci_passthrough_devices(self, mock_list):
self.stub_out('nova.virt.libvirt.host.Host.device_lookup_by_name',
lambda self, name: FakeNodeDevice(
_fake_NodeDevXml[name]))
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actjson = drvr._get_pci_passthrough_devices()
expectvfs = [
{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": fields.PciDeviceType.SRIOV_PF,
"phys_function": None,
"numa_node": None},
{
"dev_id": "pci_0000_04_10_7",
"domain": 0,
"address": "0000:04:10.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": None,
"dev_type": fields.PciDeviceType.SRIOV_VF,
"phys_function": [('0x0000', '0x04', '0x00', '0x3')],
"parent_addr": "0000:04:00.3"},
{
"dev_id": "pci_0000_04_11_7",
"domain": 0,
"address": "0000:04:11.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": 0,
"dev_type": fields.PciDeviceType.SRIOV_VF,
"phys_function": [('0x0000', '0x04', '0x00', '0x3')],
"parent_addr": "0000:04:00.3"
}
]
actualvfs = jsonutils.loads(actjson)
for dev in range(len(actualvfs)):
for key in actualvfs[dev].keys():
if key not in ['phys_function', 'virt_functions', 'label']:
self.assertEqual(expectvfs[dev][key], actualvfs[dev][key])
mock_list.assert_called_once_with()
# TODO(stephenfin): This only has one caller. Flatten it and remove the
# 'mempages=False' branches or add the missing test
def _test_get_host_numa_topology(self, mempages):
self.flags(physnets=['foo', 'bar', 'baz'], group='neutron')
# we need to call the below again to ensure the updated 'physnets'
# value is read and the new groups created
nova.conf.neutron.register_dynamic_opts(CONF)
self.flags(numa_nodes=[0, 2], group='neutron_tunnel')
self.flags(numa_nodes=[1], group='neutron_physnet_foo')
self.flags(numa_nodes=[3], group='neutron_physnet_bar')
self.flags(numa_nodes=[1, 2, 3], group='neutron_physnet_baz')
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
if mempages:
for i, cell in enumerate(caps.host.topology.cells):
cell.mempages = fakelibvirt.create_mempages(
[(4, 1024 * i), (2048, i)])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([0, 1, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set([0, 1, 2, 3, 6])),
):
got_topo = drvr._get_host_numa_topology()
if mempages:
# cells 0
self.assertEqual(4, got_topo.cells[0].mempages[0].size_kb)
self.assertEqual(0, got_topo.cells[0].mempages[0].total)
self.assertEqual(2048, got_topo.cells[0].mempages[1].size_kb)
self.assertEqual(0, got_topo.cells[0].mempages[1].total)
# cells 1
self.assertEqual(4, got_topo.cells[1].mempages[0].size_kb)
self.assertEqual(1024, got_topo.cells[1].mempages[0].total)
self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb)
self.assertEqual(1, got_topo.cells[1].mempages[1].total)
else:
self.assertEqual([], got_topo.cells[0].mempages)
self.assertEqual([], got_topo.cells[1].mempages)
self.assertEqual(set([]), got_topo.cells[0].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[1].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[2].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[3].pinned_cpus)
self.assertEqual([set([0, 1])], got_topo.cells[0].siblings)
self.assertEqual([set([3])], got_topo.cells[1].siblings)
self.assertEqual(set(),
got_topo.cells[0].network_metadata.physnets)
self.assertEqual(set(['foo', 'baz']),
got_topo.cells[1].network_metadata.physnets)
self.assertEqual(set(['baz']),
got_topo.cells[2].network_metadata.physnets)
self.assertEqual(set(['bar', 'baz']),
got_topo.cells[3].network_metadata.physnets)
self.assertTrue(got_topo.cells[0].network_metadata.tunneled)
self.assertFalse(got_topo.cells[1].network_metadata.tunneled)
self.assertTrue(got_topo.cells[2].network_metadata.tunneled)
self.assertFalse(got_topo.cells[3].network_metadata.tunneled)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_get_host_numa_topology(self, mock_version):
self._test_get_host_numa_topology(mempages=True)
def test_get_host_numa_topology_empty(self):
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(host.Host, 'has_min_version', return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)
) as (has_min_version, get_caps):
self.assertIsNone(drvr._get_host_numa_topology())
self.assertEqual(2, get_caps.call_count)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_xen(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_lib_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_VERSION)
mock_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_VERSION)
mock_type.return_value = host.HV_DRIVER_XEN
self.assertIsNone(drvr._get_host_numa_topology())
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_get_host_numa_topology_missing_network_metadata(self,
mock_version):
self.flags(physnets=['bar'], group='neutron')
# we need to call the below again to ensure the updated 'physnets'
# value is read and the new groups created
nova.conf.neutron.register_dynamic_opts(CONF)
# we explicitly avoid registering a '[neutron_physnets_bar] numa_nodes'
# option here
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(hardware, 'get_vcpu_pin_set',
return_value=set([0, 1, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set([0, 1, 2, 3, 6])),
):
self.assertRaisesRegex(
exception.InvalidNetworkNUMAAffinity,
"Invalid NUMA network affinity configured: the physnet 'bar' "
"was listed in '\[neutron\] physnets' but no corresponding "
"'\[neutron_physnet_bar\] numa_nodes' option was defined.",
drvr._get_host_numa_topology)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def _test_get_host_numa_topology_invalid_network_affinity(self,
group_name, mock_version):
self.flags(physnets=['foo', 'bar'], group='neutron')
# we need to call the below again to ensure the updated 'physnets'
# value is read and the new groups created
nova.conf.neutron.register_dynamic_opts(CONF)
# set defaults...
for group_ in ['neutron_physnet_foo', 'neutron_physnet_bar',
'neutron_tunnel']:
self.flags(numa_nodes=[0], group=group_)
# but override them for the error case
self.flags(numa_nodes=[4], group=group_name)
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fields.Architecture.X86_64
caps.host.topology = fakelibvirt.NUMATopology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(hardware, 'get_vcpu_pin_set',
return_value=set([0, 1, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set([0, 1, 2, 3, 6])),
):
self.assertRaisesRegex(
exception.InvalidNetworkNUMAAffinity,
r'node 4 for \w+ \w+ is not present',
drvr._get_host_numa_topology)
def test_get_host_numa_topology_invalid_physical_network_affinity(self):
"""Ensure errors are raised for non-existent NUMA nodes.
If a physical network is affined to a non-existent NUMA node, an
exception should be raised. Prove this to be the case.
"""
self._test_get_host_numa_topology_invalid_network_affinity(
'neutron_physnet_bar')
def test_get_host_numa_topology_invalid_tunnel_network_affinity(self):
"""Ensure errors are raised for non-existent NUMA nodes.
If a tunneled network is affined to a non-existent NUMA node, an
exception should be raised. Prove this to be the case.
"""
self._test_get_host_numa_topology_invalid_network_affinity(
'neutron_tunnel')
def test_diagnostic_vcpus_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
raise fakelibvirt.libvirtError('vcpus missing')
def blockStats(self, path):
return (169, 688640, 0, 0, 1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
lambda self, instance: DiagFakeDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': 1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': 1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.useFixture(utils_fixture.TimeFixture(diags_time))
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = fake_diagnostics_object(with_disks=True, with_nic=True)
self.assertDiagnosticsEqual(expected, actual)
def test_diagnostic_blockstats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
raise fakelibvirt.libvirtError('blockStats missing')
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
lambda self, instance: DiagFakeDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.useFixture(utils_fixture.TimeFixture(diags_time))
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = fake_diagnostics_object(with_cpus=True, with_nic=True)
self.assertDiagnosticsEqual(expected, actual)
def test_diagnostic_interfacestats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, 1)
def interfaceStats(self, path):
raise fakelibvirt.libvirtError('interfaceStat missing')
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
lambda self, instance: DiagFakeDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': 1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': 1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.useFixture(utils_fixture.TimeFixture(diags_time))
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = fake_diagnostics_object(with_cpus=True, with_disks=True)
self.assertDiagnosticsEqual(expected, actual)
def test_diagnostic_memorystats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, 1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
raise fakelibvirt.libvirtError('memoryStats missing')
def maxMemory(self):
return 280160
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
lambda self, instance: DiagFakeDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': 1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': 1,
'memory': 280160,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.useFixture(utils_fixture.TimeFixture(diags_time))
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = fake_diagnostics_object(with_cpus=True, with_disks=True,
with_nic=True)
self.assertDiagnosticsEqual(expected, actual)
def test_diagnostic_full(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, 1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
lambda self, instance: DiagFakeDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': 1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': 1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.useFixture(utils_fixture.TimeFixture(diags_time))
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = fake_diagnostics_object(with_cpus=True, with_disks=True,
with_nic=True)
self.assertDiagnosticsEqual(expected, actual)
@mock.patch.object(host.Host, '_get_domain')
def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
<interface type="bridge">
<mac address="53:55:00:a5:39:39"/>
<model type="virtio"/>
<target dev="br0"/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, 1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self):
return DiagFakeDomain()
mock_get_domain.side_effect = fake_get_domain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': 1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': 1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
'br0_rx': 4408,
'br0_rx_drop': 0,
'br0_rx_errors': 0,
'br0_rx_packets': 82,
'br0_tx': 0,
'br0_tx_drop': 0,
'br0_tx_errors': 0,
'br0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.useFixture(utils_fixture.TimeFixture(diags_time))
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = fake_diagnostics_object(with_cpus=True, with_disks=True,
with_nic=True)
expected.add_nic(mac_address='53:55:00:a5:39:39',
rx_drop=0,
rx_errors=0,
rx_octets=4408,
rx_packets=82,
tx_drop=0,
tx_errors=0,
tx_octets=0,
tx_packets=0)
self.assertDiagnosticsEqual(expected, actual)
@mock.patch.object(host.Host, "list_instance_domains")
def test_failing_vcpu_count(self, mock_list):
"""Domain can fail to return the vcpu description in case it's
just starting up or shutting down. Make sure None is handled
gracefully.
"""
class DiagFakeDomain(object):
def __init__(self, vcpus):
self._vcpus = vcpus
def vcpus(self):
if self._vcpus is None:
raise fakelibvirt.libvirtError("fake-error")
else:
return ([[1, 2, 3, 4]] * self._vcpus, [True] * self._vcpus)
def ID(self):
return 1
def name(self):
return "instance000001"
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
mock_list.return_value = [
DiagFakeDomain(None), DiagFakeDomain(5)]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(6, drvr._get_vcpu_used())
mock_list.assert_called_with(only_guests=True, only_running=True)
def _test_get_instance_capabilities(self, want):
'''Base test for 'get_capabilities' function. '''
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
caps = vconfig.LibvirtConfigCaps()
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = fields.Architecture.X86_64
guest.domtype = ['kvm', 'qemu']
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = fields.Architecture.I686
guest.domtype = ['kvm']
caps.guests.append(guest)
return caps
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
get_host_capabilities_stub)
got = drvr._get_instance_capabilities()
self.assertEqual(want, got)
def test_get_instance_capabilities_kvm(self):
self.flags(virt_type='kvm', group='libvirt')
# Because virt_type is set to kvm, we get only
# capabilities where the hypervisor_type is kvm
want = [(fields.Architecture.X86_64, 'kvm', 'hvm'),
(fields.Architecture.I686, 'kvm', 'hvm')]
self._test_get_instance_capabilities(want)
def test_get_instance_capabilities_qemu(self):
self.flags(virt_type='qemu', group='libvirt')
# Because virt_type is set to qemu, we get only
# capabilities where the hypervisor_type is qemu
want = [(fields.Architecture.X86_64, 'qemu', 'hvm')]
self._test_get_instance_capabilities(want)
def test_set_cache_mode(self):
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'directsync')
def test_set_cache_mode_shareable(self):
"""Tests that when conf.shareable is True, the configuration is
ignored and the driver_cache is forced to 'none'.
"""
self.flags(disk_cachemodes=['block=writethrough'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.shareable = True
fake_conf.source_type = 'block'
drvr._set_cache_mode(fake_conf)
self.assertEqual('none', fake_conf.driver_cache)
def test_set_cache_mode_invalid_mode(self):
self.flags(disk_cachemodes=['file=FAKE'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
drvr._set_cache_mode(fake_conf)
self.assertIsNone(fake_conf.driver_cache)
def test_set_cache_mode_invalid_object(self):
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuest()
fake_conf.driver_cache = 'fake'
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'fake')
@mock.patch('os.unlink')
@mock.patch.object(os.path, 'exists')
def _test_shared_storage_detection(self, is_same,
mock_exists, mock_unlink):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.get_host_ip_addr = mock.MagicMock(return_value='bar')
mock_exists.return_value = is_same
with test.nested(
mock.patch.object(drvr._remotefs, 'create_file'),
mock.patch.object(drvr._remotefs, 'remove_file')
) as (mock_rem_fs_create, mock_rem_fs_remove):
result = drvr._is_storage_shared_with('host', '/path')
mock_rem_fs_create.assert_any_call('host', mock.ANY)
create_args, create_kwargs = mock_rem_fs_create.call_args
self.assertTrue(create_args[1].startswith('/path'))
if is_same:
mock_unlink.assert_called_once_with(mock.ANY)
else:
mock_rem_fs_remove.assert_called_with('host', mock.ANY)
remove_args, remove_kwargs = mock_rem_fs_remove.call_args
self.assertTrue(remove_args[1].startswith('/path'))
return result
def test_shared_storage_detection_same_host(self):
self.assertTrue(self._test_shared_storage_detection(True))
def test_shared_storage_detection_different_host(self):
self.assertFalse(self._test_shared_storage_detection(False))
@mock.patch.object(os, 'unlink')
@mock.patch.object(os.path, 'exists')
@mock.patch.object(utils, 'execute')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_host_ip_addr',
return_value='foo')
def test_shared_storage_detection_easy(self, mock_get, mock_exec,
mock_exists, mock_unlink):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertTrue(drvr._is_storage_shared_with('foo', '/path'))
mock_get.assert_called_once_with()
mock_exec.assert_not_called()
mock_exists.assert_not_called()
mock_unlink.assert_not_called()
def test_store_pid_remove_pid(self):
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
popen = mock.Mock(pid=3)
drvr.job_tracker.add_job(instance, popen.pid)
self.assertIn(3, drvr.job_tracker.jobs[instance.uuid])
drvr.job_tracker.remove_job(instance, popen.pid)
self.assertNotIn(instance.uuid, drvr.job_tracker.jobs)
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
def test_get_domain_info_with_more_return(self, mock_get_domain):
instance = objects.Instance(**self.test_instance)
dom_mock = mock.MagicMock()
dom_mock.info.return_value = [
1, 2048, 737, 8, 12345, 888888
]
dom_mock.ID.return_value = mock.sentinel.instance_id
mock_get_domain.return_value = dom_mock
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_info(instance)
self.assertEqual(1, info.state)
self.assertEqual(mock.sentinel.instance_id, info.internal_id)
dom_mock.info.assert_called_once_with()
dom_mock.ID.assert_called_once_with()
mock_get_domain.assert_called_once_with(instance)
def test_create_domain(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_domain = mock.MagicMock()
guest = drvr._create_domain(domain=mock_domain)
self.assertEqual(mock_domain, guest._domain)
mock_domain.createWithFlags.assert_has_calls([mock.call(0)])
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree,
mock_setup_container, mock_get_info, mock_clean):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.by_name.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.RUNNING)
with test.nested(
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [])
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.by_name.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call(
mock_image.get_model(),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc_id_maps(self, mock_get_inst_path,
mock_ensure_tree, mock_setup_container,
mock_chown, mock_get_info, mock_clean):
self.flags(virt_type='lxc', uid_maps=["0:1000:100"],
gid_maps=["0:1000:100"], group='libvirt')
def chown_side_effect(path, id_maps):
self.assertEqual('/tmp/rootfs', path)
self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap)
self.assertEqual(0, id_maps[0].start)
self.assertEqual(1000, id_maps[0].target)
self.assertEqual(100, id_maps[0].count)
self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap)
self.assertEqual(0, id_maps[1].start)
self.assertEqual(1000, id_maps[1].target)
self.assertEqual(100, id_maps[1].count)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.by_name.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_chown.side_effect = chown_side_effect
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.RUNNING)
with test.nested(
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')
) as (
mock_is_booted_from_volume, mock_create_domain, mock_plug_vifs,
mock_setup_basic_filtering, mock_prepare_instance_filter,
mock_apply_instance_filter
):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [])
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.by_name.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call(
mock_image.get_model(),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
@mock.patch('nova.virt.disk.api.teardown_container')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc_not_running(self, mock_get_inst_path,
mock_ensure_tree,
mock_setup_container,
mock_get_info, mock_teardown):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.by_name.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN)
with test.nested(
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [])
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.by_name.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call(
mock_image.get_model(),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
teardown_call = mock.call(container_dir='/tmp/rootfs')
mock_teardown.assert_has_calls([teardown_call])
def test_create_domain_define_xml_fails(self):
"""Tests that the xml is logged when defining the domain fails."""
fake_xml = "<test>this is a test</test>"
def fake_defineXML(xml):
# In py2 env, xml is encoded in write_instance_config use
# encodeutils.safe_encode, it will be decode text before encoding
if six.PY2:
self.assertEqual(fake_safe_decode(fake_xml), xml)
else:
self.assertEqual(fake_xml, xml)
raise fakelibvirt.libvirtError('virDomainDefineXML() failed')
def fake_safe_decode(text, *args, **kwargs):
return text + 'safe decoded'
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.assertIn('safe decoded', msg % args)
self.stub_out('oslo_utils.encodeutils.safe_decode', fake_safe_decode)
self.stub_out('nova.virt.libvirt.guest.LOG.error', fake_error)
self.create_fake_libvirt_mock(defineXML=fake_defineXML)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
fake_xml)
self.assertTrue(self.log_error_called)
def test_create_domain_with_flags_fails(self):
"""Tests that the xml is logged when creating the domain with flags
fails
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_createWithFlags(self, launch_flags):
raise fakelibvirt.libvirtError('virDomainCreateWithFlags() failed')
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.stub_out('nova.tests.unit.virt.libvirt.test_driver.'
'FakeVirtDomain.createWithFlags', fake_createWithFlags)
self.stub_out('nova.virt.libvirt.guest.LOG.error', fake_error)
self.create_fake_libvirt_mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
domain=fake_domain)
self.assertTrue(self.log_error_called)
@mock.patch('nova.privsep.libvirt.enable_hairpin')
def test_create_domain_enable_hairpin_fails(self, mock_writefile):
"""Tests that the xml is logged when enabling hairpin mode for the
domain fails.
"""
# Guest.enable_hairpin is only called for nova-network.
# TODO(mikal): remove this test when nova-net goes away
self.flags(use_neutron=False)
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
mock_writefile.side_effect = IOError
def fake_get_interfaces(*args):
return ["dev"]
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.stub_out('nova.virt.libvirt.guest.LOG.error', fake_error)
self.create_fake_libvirt_mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.stub_out(
'nova.virt.libvirt.guest.Guest.get_interfaces',
fake_get_interfaces)
self.assertRaises(IOError, drvr._create_domain, domain=fake_domain,
power_on=False)
self.assertTrue(self.log_error_called)
def test_get_vnc_console(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='vnc' port='5900'/>"
"</devices></domain>")
vdmock = mock.create_autospec(fakelibvirt.virDomain)
vdmock.XMLDesc.return_value = dummyxml
def fake_lookup(_uuid):
if _uuid == instance['uuid']:
return vdmock
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
vnc_dict = drvr.get_vnc_console(self.context, instance)
self.assertEqual(vnc_dict.port, '5900')
vdmock.XMLDesc.assert_called_once_with(flags=0)
def test_get_vnc_console_unavailable(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = mock.create_autospec(fakelibvirt.virDomain)
vdmock.XMLDesc.return_value = dummyxml
def fake_lookup(_uuid):
if _uuid == instance['uuid']:
return vdmock
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
drvr.get_vnc_console, self.context, instance)
vdmock.XMLDesc.assert_called_once_with(flags=0)
def test_get_spice_console(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='spice' port='5950'/>"
"</devices></domain>")
vdmock = mock.create_autospec(fakelibvirt.virDomain)
vdmock.XMLDesc.return_value = dummyxml
def fake_lookup(_uuid):
if _uuid == instance['uuid']:
return vdmock
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
spice_dict = drvr.get_spice_console(self.context, instance)
self.assertEqual(spice_dict.port, '5950')
vdmock.XMLDesc.assert_called_once_with(flags=0)
def test_get_spice_console_unavailable(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = mock.create_autospec(fakelibvirt.virDomain)
vdmock.XMLDesc.return_value = dummyxml
def fake_lookup(_uuid):
if _uuid == instance['uuid']:
return vdmock
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
drvr.get_spice_console, self.context, instance)
vdmock.XMLDesc.assert_called_once_with(flags=0)
def test_detach_volume_with_instance_not_found(self):
# Test that detach_volume() method does not raise exception,
# if the instance does not exist.
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(host.Host, '_get_domain',
side_effect=exception.InstanceNotFound(
instance_id=instance.uuid)),
mock.patch.object(drvr, '_disconnect_volume')
) as (_get_domain, _disconnect_volume):
connection_info = {'driver_volume_type': 'fake'}
drvr.detach_volume(
self.context, connection_info, instance, '/dev/sda')
_get_domain.assert_called_once_with(instance)
_disconnect_volume.assert_called_once_with(
self.context, connection_info, instance, encryption=None)
def _test_attach_detach_interface_get_config(self, method_name):
"""Tests that the get_config() method is properly called in
attach_interface() and detach_interface().
method_name: either \"attach_interface\" or \"detach_interface\"
depending on the method to test.
"""
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
lambda self, instance: FakeVirtDomain())
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_image_meta = objects.ImageMeta.from_dict(
{'id': instance['image_ref']})
if method_name == "attach_interface":
mock_setup = self.useFixture(fixtures.MockPatchObject(
drvr.firewall_driver, 'setup_basic_filtering')).mock
mock_build = self.useFixture(fixtures.MockPatchObject(
drvr, '_build_device_metadata')).mock
mock_build.return_value = objects.InstanceDeviceMetadata()
mock_save = self.useFixture(fixtures.MockPatchObject(
objects.Instance, 'save')).mock
expected = drvr.vif_driver.get_config(instance, network_info[0],
fake_image_meta,
instance.get_flavor(),
CONF.libvirt.virt_type,
drvr._host)
mock_get_config = self.useFixture(fixtures.MockPatchObject(
drvr.vif_driver, 'get_config')).mock
mock_get_config.return_value = expected
if method_name == "attach_interface":
drvr.attach_interface(self.context, instance, fake_image_meta,
network_info[0])
mock_setup.assert_called_once_with(instance, network_info)
mock_build.assert_called_once_with(self.context, instance)
mock_save.assert_called_once_with()
elif method_name == "detach_interface":
drvr.detach_interface(self.context, instance, network_info[0])
else:
raise ValueError("Unhandled method %s" % method_name)
mock_get_config.assert_called_once_with(
instance, network_info[0], test.MatchType(objects.ImageMeta),
test.MatchType(objects.Flavor), CONF.libvirt.virt_type, drvr._host)
@mock.patch.object(lockutils, "external_lock")
def test_attach_interface_get_config(self, mock_lock):
"""Tests that the get_config() method is properly called in
attach_interface().
"""
mock_lock.return_value = threading.Semaphore()
self._test_attach_detach_interface_get_config("attach_interface")
def test_detach_interface_get_config(self):
"""Tests that the get_config() method is properly called in
detach_interface().
"""
self._test_attach_detach_interface_get_config("detach_interface")
@mock.patch.object(blockinfo, 'get_root_info')
@mock.patch.object(blockinfo, 'get_disk_bus_for_device_type')
def test_default_root_device_name(self, mock_get_disk, mock_get_root):
instance = {'uuid': 'fake_instance'}
image_meta = objects.ImageMeta.from_dict({'id': uuids.image_id})
root_bdm = {'source_type': 'image',
'destination_type': 'volume',
'image_id': 'fake_id'}
self.flags(virt_type='qemu', group='libvirt')
mock_get_disk.side_effect = ['virtio', 'ide']
mock_get_root.return_value = {'dev': 'vda'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(drvr.default_root_device_name(instance, image_meta,
root_bdm), '/dev/vda')
self.assertEqual(2, mock_get_disk.call_count)
mock_get_disk.assert_has_calls([
mock.call(instance, 'qemu', image_meta, 'disk'),
mock.call(instance, 'qemu', image_meta, 'cdrom')])
mock_get_root.assert_called_once_with(instance, 'qemu', image_meta,
root_bdm, 'virtio', 'ide')
@mock.patch.object(objects.BlockDeviceMapping, "save")
def test_default_device_names_for_instance(self, save_mock):
instance = objects.Instance(**self.test_instance)
instance.root_device_name = '/dev/vda'
ephemerals = [objects.BlockDeviceMapping(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'device_name': 'vdb',
'source_type': 'blank',
'volume_size': 2,
'destination_type': 'local'}))]
swap = [objects.BlockDeviceMapping(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'device_name': 'vdg',
'source_type': 'blank',
'volume_size': 512,
'guest_format': 'swap',
'destination_type': 'local'}))]
block_device_mapping = [
objects.BlockDeviceMapping(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-image-id',
'device_name': '/dev/vdxx',
'disk_bus': 'scsi'}))]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.default_device_names_for_instance(instance,
instance.root_device_name,
ephemerals, swap,
block_device_mapping)
# Ephemeral device name was correct so no changes
self.assertEqual('/dev/vdb', ephemerals[0].device_name)
# Swap device name was incorrect so it was changed
self.assertEqual('/dev/vdc', swap[0].device_name)
# Volume device name was changed too, taking the bus into account
self.assertEqual('/dev/sda', block_device_mapping[0].device_name)
self.assertEqual(3, save_mock.call_count)
def _test_get_device_name_for_instance(self, new_bdm, expected_dev):
instance = objects.Instance(**self.test_instance)
instance.root_device_name = '/dev/vda'
instance.ephemeral_gb = 0
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
got_dev = drvr.get_device_name_for_instance(
instance, [], new_bdm)
self.assertEqual(expected_dev, got_dev)
def test_get_device_name_for_instance_simple(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name=None, guest_format=None,
disk_bus=None, device_type=None)
self._test_get_device_name_for_instance(new_bdm, '/dev/vdb')
def test_get_device_name_for_instance_suggested(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name='/dev/vdg', guest_format=None,
disk_bus=None, device_type=None)
self._test_get_device_name_for_instance(new_bdm, '/dev/vdb')
def test_get_device_name_for_instance_bus(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name=None, guest_format=None,
disk_bus='scsi', device_type=None)
self._test_get_device_name_for_instance(new_bdm, '/dev/sda')
def test_get_device_name_for_instance_device_type(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name=None, guest_format=None,
disk_bus=None, device_type='floppy')
self._test_get_device_name_for_instance(new_bdm, '/dev/fda')
def test_is_supported_fs_format(self):
supported_fs = [nova.privsep.fs.FS_FORMAT_EXT2,
nova.privsep.fs.FS_FORMAT_EXT3,
nova.privsep.fs.FS_FORMAT_EXT4,
nova.privsep.fs.FS_FORMAT_XFS]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for fs in supported_fs:
self.assertTrue(drvr.is_supported_fs_format(fs))
supported_fs = ['', 'dummy']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for fs in supported_fs:
self.assertFalse(drvr.is_supported_fs_format(fs))
@mock.patch('nova.virt.libvirt.host.Host.write_instance_config')
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_post_live_migration_at_destination(
self, mock_get_guest, mock_write_instance_config):
instance = objects.Instance(id=1, uuid=uuids.instance)
dom = mock.MagicMock()
guest = libvirt_guest.Guest(dom)
mock_get_guest.return_value = guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.post_live_migration_at_destination(mock.ANY, instance, mock.ANY)
# Assert that we don't try to write anything to the destination node
# since the source live migrated with the VIR_MIGRATE_PERSIST_DEST flag
mock_write_instance_config.assert_not_called()
def test_create_propagates_exceptions(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(id=1, uuid=uuids.instance,
image_ref='my_fake_image')
with test.nested(
mock.patch.object(drvr, '_create_domain_setup_lxc'),
mock.patch.object(drvr, '_create_domain_cleanup_lxc'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, '_create_domain',
side_effect=exception.NovaException),
mock.patch.object(drvr, 'cleanup')):
self.assertRaises(exception.NovaException,
drvr._create_domain_and_network,
self.context,
'xml',
instance, None)
def test_create_without_pause(self):
self.flags(virt_type='lxc', group='libvirt')
@contextlib.contextmanager
def fake_lxc_disk_handler(*args, **kwargs):
yield
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
with test.nested(
mock.patch.object(drvr, '_lxc_disk_handler',
side_effect=fake_lxc_disk_handler),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'cleanup')) as (
_handler, cleanup, firewall_driver, create, plug_vifs):
domain = drvr._create_domain_and_network(self.context, 'xml',
instance, None)
self.assertEqual(0, create.call_args_list[0][1]['pause'])
self.assertEqual(0, domain.resume.call_count)
def _test_create_with_network_events(self, neutron_failure=None,
power_on=True):
generated_events = []
def wait_timeout():
event = mock.MagicMock()
if neutron_failure == 'timeout':
raise eventlet.timeout.Timeout()
elif neutron_failure == 'error':
event.status = 'failed'
else:
event.status = 'completed'
return event
def fake_prepare(instance, name, tag):
m = mock.MagicMock()
m.instance = instance
m.event_name = '%s-%s' % (name, tag)
m.wait.side_effect = wait_timeout
generated_events.append(m)
return m
virtapi = manager.ComputeVirtAPI(mock.MagicMock())
prepare = virtapi._compute.instance_events.prepare_for_instance_event
prepare.side_effect = fake_prepare
drvr = libvirt_driver.LibvirtDriver(virtapi, False)
instance = objects.Instance(vm_state=vm_states.BUILDING,
**self.test_instance)
vifs = [{'id': uuids.vif_1, 'active': False},
{'id': uuids.vif_2, 'active': False}]
@mock.patch.object(drvr, 'plug_vifs')
@mock.patch.object(drvr, 'firewall_driver')
@mock.patch.object(drvr, '_create_domain')
@mock.patch.object(drvr, 'cleanup')
def test_create(cleanup, create, fw_driver, plug_vifs):
domain = drvr._create_domain_and_network(self.context, 'xml',
instance, vifs,
power_on=power_on)
plug_vifs.assert_called_with(instance, vifs)
pause = self._get_pause_flag(drvr, vifs, power_on=power_on)
self.assertEqual(pause,
create.call_args_list[0][1]['pause'])
if pause:
domain.resume.assert_called_once_with()
if neutron_failure and CONF.vif_plugging_is_fatal:
cleanup.assert_called_once_with(self.context,
instance, network_info=vifs,
block_device_info=None)
test_create()
if utils.is_neutron() and CONF.vif_plugging_timeout and power_on:
prepare.assert_has_calls([
mock.call(instance, 'network-vif-plugged', uuids.vif_1),
mock.call(instance, 'network-vif-plugged', uuids.vif_2)])
for event in generated_events:
if neutron_failure and generated_events.index(event) != 0:
self.assertEqual(0, event.call_count)
elif (neutron_failure == 'error' and
not CONF.vif_plugging_is_fatal):
event.wait.assert_called_once_with()
else:
self.assertEqual(0, prepare.call_count)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron(self, is_neutron):
self._test_create_with_network_events()
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_power_off(self,
is_neutron):
# Tests that we don't wait for events if we don't start the instance.
self._test_create_with_network_events(power_on=False)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_nowait(self, is_neutron):
self.flags(vif_plugging_timeout=0)
self._test_create_with_network_events()
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_nonfatal_timeout(
self, is_neutron):
self.flags(vif_plugging_is_fatal=False)
self._test_create_with_network_events(neutron_failure='timeout')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_fatal_timeout(
self, is_neutron):
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_create_with_network_events,
neutron_failure='timeout')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_nonfatal_error(
self, is_neutron):
self.flags(vif_plugging_is_fatal=False)
self._test_create_with_network_events(neutron_failure='error')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_fatal_error(
self, is_neutron):
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_create_with_network_events,
neutron_failure='error')
@mock.patch('nova.utils.is_neutron', return_value=False)
def test_create_with_network_events_non_neutron(self, is_neutron):
self._test_create_with_network_events()
def test_create_with_other_error(self):
drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False)
@mock.patch.object(drvr, 'plug_vifs')
@mock.patch.object(drvr, 'firewall_driver')
@mock.patch.object(drvr, '_create_domain')
@mock.patch.object(drvr, '_cleanup_failed_start')
def the_test(mock_cleanup, mock_create, mock_fw, mock_plug):
instance = objects.Instance(**self.test_instance)
mock_create.side_effect = test.TestingException
self.assertRaises(test.TestingException,
drvr._create_domain_and_network,
self.context, 'xml', instance, [], None)
mock_cleanup.assert_called_once_with(self.context, instance,
[], None, None, False)
# destroy_disks_on_failure=True, used only by spawn()
mock_cleanup.reset_mock()
self.assertRaises(test.TestingException,
drvr._create_domain_and_network,
self.context, 'xml', instance, [], None,
destroy_disks_on_failure=True)
mock_cleanup.assert_called_once_with(self.context, instance,
[], None, None, True)
the_test()
def test_cleanup_failed_start_no_guest(self):
drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False)
with mock.patch.object(drvr, 'cleanup') as mock_cleanup:
drvr._cleanup_failed_start(None, None, None, None, None, False)
self.assertTrue(mock_cleanup.called)
def test_cleanup_failed_start_inactive_guest(self):
drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False)
guest = mock.MagicMock()
guest.is_active.return_value = False
with mock.patch.object(drvr, 'cleanup') as mock_cleanup:
drvr._cleanup_failed_start(None, None, None, None, guest, False)
self.assertTrue(mock_cleanup.called)
self.assertFalse(guest.poweroff.called)
def test_cleanup_failed_start_active_guest(self):
drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False)
guest = mock.MagicMock()
guest.is_active.return_value = True
with mock.patch.object(drvr, 'cleanup') as mock_cleanup:
drvr._cleanup_failed_start(None, None, None, None, guest, False)
self.assertTrue(mock_cleanup.called)
self.assertTrue(guest.poweroff.called)
def test_cleanup_failed_start_failed_poweroff(self):
drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False)
guest = mock.MagicMock()
guest.is_active.return_value = True
guest.poweroff.side_effect = test.TestingException
with mock.patch.object(drvr, 'cleanup') as mock_cleanup:
self.assertRaises(test.TestingException,
drvr._cleanup_failed_start,
None, None, None, None, guest, False)
self.assertTrue(mock_cleanup.called)
self.assertTrue(guest.poweroff.called)
def test_cleanup_failed_start_failed_poweroff_destroy_disks(self):
drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False)
guest = mock.MagicMock()
guest.is_active.return_value = True
guest.poweroff.side_effect = test.TestingException
with mock.patch.object(drvr, 'cleanup') as mock_cleanup:
self.assertRaises(test.TestingException,
drvr._cleanup_failed_start,
None, None, None, None, guest, True)
mock_cleanup.called_once_with(None, None, network_info=None,
block_device_info=None, destroy_disks=True)
self.assertTrue(guest.poweroff.called)
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_dom = mock.MagicMock()
mock_encryption_meta = mock.MagicMock()
get_encryption_metadata.return_value = mock_encryption_meta
fake_xml = """
<domain>
<name>instance-00000001</name>
<memory>1048576</memory>
<vcpu>1</vcpu>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source file='/path/fake-volume1'/>
<target dev='vda' bus='virtio'/>
</disk>
</devices>
</domain>
"""
fake_volume_id = "fake-volume-id"
connection_info = {"driver_volume_type": "fake",
"data": {"access_mode": "rw",
"volume_id": fake_volume_id}}
def fake_getitem(*args, **kwargs):
fake_bdm = {'connection_info': connection_info,
'mount_device': '/dev/vda'}
return fake_bdm.get(args[0])
mock_volume = mock.MagicMock()
mock_volume.__getitem__.side_effect = fake_getitem
block_device_info = {'block_device_mapping': [mock_volume]}
network_info = [network_model.VIF(id='1'),
network_model.VIF(id='2', active=True)]
with test.nested(
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver,
'prepare_instance_filter'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
) as (plug_vifs, setup_basic_filtering, prepare_instance_filter,
create_domain, apply_instance_filter):
create_domain.return_value = libvirt_guest.Guest(mock_dom)
guest = drvr._create_domain_and_network(
self.context, fake_xml, instance, network_info,
block_device_info=block_device_info)
plug_vifs.assert_called_once_with(instance, network_info)
setup_basic_filtering.assert_called_once_with(instance,
network_info)
prepare_instance_filter.assert_called_once_with(instance,
network_info)
pause = self._get_pause_flag(drvr, network_info)
create_domain.assert_called_once_with(
fake_xml, pause=pause, power_on=True, post_xml_callback=None)
self.assertEqual(mock_dom, guest._domain)
def test_get_guest_storage_config(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_instance = copy.deepcopy(self.test_instance)
test_instance["default_swap_device"] = None
instance = objects.Instance(**test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = instance.get_flavor()
conn_info = {'driver_volume_type': 'fake', 'data': {}}
bdm = objects.BlockDeviceMapping(
self.context,
**fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': '/dev/vdc'}))
bdi = {'block_device_mapping':
driver_block_device.convert_volumes([bdm])}
bdm = bdi['block_device_mapping'][0]
bdm['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
bdi)
mock_conf = mock.MagicMock(source_path='fake')
with test.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'save'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=mock_conf)
) as (volume_save, connect_volume, get_volume_config):
devices = drvr._get_guest_storage_config(self.context, instance,
image_meta, disk_info, False, bdi, flavor, "hvm")
self.assertEqual(3, len(devices))
self.assertEqual('/dev/vdb', instance.default_ephemeral_device)
self.assertIsNone(instance.default_swap_device)
connect_volume.assert_called_with(self.context,
bdm['connection_info'], instance)
get_volume_config.assert_called_with(bdm['connection_info'],
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
volume_save.assert_called_once_with()
def test_get_neutron_events(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = [network_model.VIF(id='1'),
network_model.VIF(id='2', active=True)]
events = drvr._get_neutron_events(network_info)
self.assertEqual([('network-vif-plugged', '1')], events)
def test_unplug_vifs_ignores_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
drvr._unplug_vifs('inst', [1], ignore_errors=True)
vif_driver.unplug.assert_called_once_with('inst', 1)
def test_unplug_vifs_reports_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
self.assertRaises(exception.AgentError,
drvr.unplug_vifs, 'inst', [1])
vif_driver.unplug.assert_called_once_with('inst', 1)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_pass_with_no_mount_device(self, undefine, unplug):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.firewall_driver = mock.Mock()
drvr._disconnect_volume = mock.Mock()
fake_inst = {'name': 'foo'}
fake_bdms = [{'connection_info': 'foo',
'mount_device': None}]
with mock.patch('nova.virt.driver'
'.block_device_info_get_mapping',
return_value=fake_bdms):
drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False)
self.assertTrue(drvr._disconnect_volume.called)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
fake_inst = {'name': 'foo'}
with mock.patch.object(drvr._conn, 'lookupByUUIDString') as lookup:
lookup.return_value = fake_inst
# NOTE(danms): Make unplug cause us to bail early, since
# we only care about how it was called
unplug.side_effect = test.TestingException
self.assertRaises(test.TestingException,
drvr.cleanup, 'ctxt', fake_inst, 'netinfo')
unplug.assert_called_once_with(fake_inst, 'netinfo', True)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'unfilter_instance')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files',
return_value=True)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_cleanup_migrate_data_shared_block_storage(self,
_undefine_domain,
save,
delete_instance_files,
unfilter_instance):
# Tests the cleanup method when migrate_data has
# is_shared_block_storage=True and destroy_disks=False.
instance = objects.Instance(self.context, **self.test_instance)
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_block_storage=True)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.cleanup(
self.context, instance, network_info={}, destroy_disks=False,
migrate_data=migrate_data, destroy_vifs=False)
delete_instance_files.assert_called_once_with(instance)
self.assertEqual(1, int(instance.system_metadata['clean_attempts']))
self.assertTrue(instance.cleaned)
save.assert_called_once_with()
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_use_native_luks')
def test_swap_volume_native_luks_blocked(self, mock_use_native_luks,
mock_get_encryption):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_use_native_luks.return_value = True
# dest volume is encrypted
mock_get_encryption.side_effect = [{}, {'provider': 'luks'}]
self.assertRaises(NotImplementedError, drvr.swap_volume, self.context,
{}, {}, None, None, None)
# src volume is encrypted
mock_get_encryption.side_effect = [{'provider': 'luks'}, {}]
self.assertRaises(NotImplementedError, drvr.swap_volume, self.context,
{}, {}, None, None, None)
# both volumes are encrypted
mock_get_encryption.side_effect = [{'provider': 'luks'},
{'provider': 'luks'}]
self.assertRaises(NotImplementedError, drvr.swap_volume, self.context,
{}, {}, None, None, None)
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete',
return_value=True)
def _test_swap_volume(self, mock_is_job_complete, source_type,
resize=False, fail=False):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
guest = libvirt_guest.Guest(mock_dom)
with mock.patch.object(drvr._conn, 'defineXML',
create=True) as mock_define:
srcfile = "/first/path"
dstfile = "/second/path"
orig_xml = six.text_type(mock.sentinel.orig_xml)
new_xml = six.text_type(mock.sentinel.new_xml)
mock_dom.XMLDesc.return_value = orig_xml
mock_dom.isPersistent.return_value = True
def fake_rebase_success(*args, **kwargs):
# Make sure the XML is set after the rebase so we know
# get_xml_desc was called after the update.
mock_dom.XMLDesc.return_value = new_xml
if not fail:
mock_dom.blockRebase.side_effect = fake_rebase_success
# If the swap succeeds, make sure we use the new XML to
# redefine the domain.
expected_xml = new_xml
else:
if resize:
mock_dom.blockResize.side_effect = test.TestingException()
expected_exception = test.TestingException
else:
mock_dom.blockRebase.side_effect = test.TestingException()
expected_exception = exception.VolumeRebaseFailed
# If the swap fails, make sure we use the original domain XML
# to redefine the domain.
expected_xml = orig_xml
# Run the swap volume code.
mock_conf = mock.MagicMock(source_type=source_type,
source_path=dstfile)
if not fail:
drvr._swap_volume(guest, srcfile, mock_conf, 1)
else:
self.assertRaises(expected_exception, drvr._swap_volume, guest,
srcfile, mock_conf, 1)
# Verify we read the original persistent config.
expected_call_count = 1
expected_calls = [mock.call(
flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))]
if not fail:
# Verify we read the updated live config.
expected_call_count = 2
expected_calls.append(
mock.call(flags=fakelibvirt.VIR_DOMAIN_XML_SECURE))
self.assertEqual(expected_call_count, mock_dom.XMLDesc.call_count)
mock_dom.XMLDesc.assert_has_calls(expected_calls)
# Verify we called with the correct flags.
expected_flags = (fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
if source_type == 'block':
expected_flags = (expected_flags |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY_DEV)
mock_dom.blockRebase.assert_called_once_with(srcfile, dstfile, 0,
flags=expected_flags)
# Verify we defined the expected XML.
mock_define.assert_called_once_with(expected_xml)
# Verify we called resize with the correct args.
if resize:
mock_dom.blockResize.assert_called_once_with(
srcfile, 1 * units.Gi / units.Ki)
def test_swap_volume_file(self):
self._test_swap_volume('file')
def test_swap_volume_block(self):
"""If the swapped volume is type="block", make sure that we give
libvirt the correct VIR_DOMAIN_BLOCK_REBASE_COPY_DEV flag to ensure the
correct type="block" XML is generated (bug 1691195)
"""
self._test_swap_volume('block')
def test_swap_volume_rebase_fail(self):
self._test_swap_volume('block', fail=True)
def test_swap_volume_resize_fail(self):
self._test_swap_volume('file', resize=True, fail=True)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_swap_volume(self, get_guest, connect_volume, get_volume_config,
swap_volume, disconnect_volume):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
old_connection_info = {'driver_volume_type': 'fake',
'serial': 'old-volume-id',
'data': {'device_path': '/fake-old-volume',
'access_mode': 'rw'}}
new_connection_info = {'driver_volume_type': 'fake',
'serial': 'new-volume-id',
'data': {'device_path': '/fake-new-volume',
'access_mode': 'rw'}}
mock_dom = mock.MagicMock()
guest = libvirt_guest.Guest(mock_dom)
mock_dom.XMLDesc.return_value = """<domain>
<devices>
<disk type='file'>
<source file='/fake-old-volume'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
"""
mock_dom.name.return_value = 'inst'
mock_dom.UUIDString.return_value = 'uuid'
get_guest.return_value = guest
conf = mock.MagicMock(source_path='/fake-new-volume')
get_volume_config.return_value = conf
conn.swap_volume(self.context, old_connection_info,
new_connection_info, instance, '/dev/vdb', 1)
get_guest.assert_called_once_with(instance)
connect_volume.assert_called_once_with(self.context,
new_connection_info, instance)
swap_volume.assert_called_once_with(guest, 'vdb', conf, 1)
disconnect_volume.assert_called_once_with(self.context,
old_connection_info,
instance)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch('nova.virt.libvirt.guest.BlockDevice.rebase')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config')
@mock.patch('nova.virt.libvirt.guest.Guest.get_disk')
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
@mock.patch('nova.virt.libvirt.host.Host.write_instance_config')
def test_swap_volume_disconnect_new_volume_on_rebase_error(self,
write_config, get_guest, get_disk, get_volume_config,
connect_volume, disconnect_volume, rebase, get_volume_encryption):
"""Assert that disconnect_volume is called for the new volume if an
error is encountered while rebasing
"""
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
guest = libvirt_guest.Guest(mock.MagicMock())
get_guest.return_value = guest
get_volume_encryption.return_value = {}
exc = fakelibvirt.make_libvirtError(fakelibvirt.libvirtError,
'internal error', error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
rebase.side_effect = exc
self.assertRaises(exception.VolumeRebaseFailed, conn.swap_volume,
self.context, mock.sentinel.old_connection_info,
mock.sentinel.new_connection_info,
instance, '/dev/vdb', 0)
connect_volume.assert_called_once_with(self.context,
mock.sentinel.new_connection_info, instance)
disconnect_volume.assert_called_once_with(self.context,
mock.sentinel.new_connection_info, instance)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
@mock.patch('nova.virt.libvirt.guest.BlockDevice.abort_job')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config')
@mock.patch('nova.virt.libvirt.guest.Guest.get_disk')
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
@mock.patch('nova.virt.libvirt.host.Host.write_instance_config')
def test_swap_volume_disconnect_new_volume_on_pivot_error(self,
write_config, get_guest, get_disk, get_volume_config,
connect_volume, disconnect_volume, abort_job, is_job_complete,
get_volume_encryption):
"""Assert that disconnect_volume is called for the new volume if an
error is encountered while pivoting to the new volume
"""
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
guest = libvirt_guest.Guest(mock.MagicMock())
get_guest.return_value = guest
get_volume_encryption.return_value = {}
exc = fakelibvirt.make_libvirtError(fakelibvirt.libvirtError,
'internal error', error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
is_job_complete.return_value = True
abort_job.side_effect = [None, exc]
self.assertRaises(exception.VolumeRebaseFailed, conn.swap_volume,
self.context, mock.sentinel.old_connection_info,
mock.sentinel.new_connection_info,
instance, '/dev/vdb', 0)
connect_volume.assert_called_once_with(self.context,
mock.sentinel.new_connection_info, instance)
disconnect_volume.assert_called_once_with(self.context,
mock.sentinel.new_connection_info, instance)
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
@mock.patch('nova.privsep.path.chown')
def _test_live_snapshot(self, mock_chown, mock_is_job_complete,
can_quiesce=False, require_quiesce=False):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
test_image_meta = self.test_image_meta.copy()
if require_quiesce:
test_image_meta = {'properties': {'os_require_quiesce': 'yes'}}
with test.nested(
mock.patch.object(drvr._conn, 'defineXML', create=True),
mock.patch.object(fake_libvirt_utils, 'get_disk_size'),
mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'),
mock.patch.object(fake_libvirt_utils, 'create_cow_image'),
mock.patch.object(fake_libvirt_utils, 'extract_snapshot'),
mock.patch.object(drvr, '_set_quiesced')
) as (mock_define, mock_size, mock_backing, mock_create_cow,
mock_snapshot, mock_quiesce):
xmldoc = "<domain/>"
srcfile = "/first/path"
dstfile = "/second/path"
bckfile = "/other/path"
dltfile = dstfile + ".delta"
mock_dom.XMLDesc.return_value = xmldoc
mock_dom.isPersistent.return_value = True
mock_size.return_value = 1004009
mock_backing.return_value = bckfile
guest = libvirt_guest.Guest(mock_dom)
if not can_quiesce:
mock_quiesce.side_effect = (
exception.InstanceQuiesceNotSupported(
instance_id=self.test_instance['id'], reason='test'))
image_meta = objects.ImageMeta.from_dict(test_image_meta)
mock_is_job_complete.return_value = True
drvr._live_snapshot(self.context, self.test_instance, guest,
srcfile, dstfile, "qcow2", "qcow2", image_meta)
mock_dom.XMLDesc.assert_called_once_with(flags=(
fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
mock_dom.blockRebase.assert_called_once_with(
srcfile, dltfile, 0, flags=(
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW))
mock_size.assert_called_once_with(srcfile, format="qcow2")
mock_backing.assert_called_once_with(srcfile, basename=False,
format="qcow2")
mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
mock_chown.assert_called_once_with(dltfile, uid=os.getuid())
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
dstfile, "qcow2")
mock_define.assert_called_once_with(xmldoc)
mock_quiesce.assert_any_call(mock.ANY, self.test_instance,
mock.ANY, True)
if can_quiesce:
mock_quiesce.assert_any_call(mock.ANY, self.test_instance,
mock.ANY, False)
def test_live_snapshot(self):
self._test_live_snapshot()
def test_live_snapshot_with_quiesce(self):
self._test_live_snapshot(can_quiesce=True)
def test_live_snapshot_with_require_quiesce(self):
self._test_live_snapshot(can_quiesce=True, require_quiesce=True)
def test_live_snapshot_with_require_quiesce_fails(self):
self.assertRaises(exception.InstanceQuiesceNotSupported,
self._test_live_snapshot,
can_quiesce=False, require_quiesce=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration")
def test_live_migration_hostname_valid(self, mock_lm):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.live_migration(self.context, self.test_instance,
"host1.example.com",
lambda x: x,
lambda x: x)
self.assertEqual(1, mock_lm.call_count)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration")
@mock.patch.object(fake_libvirt_utils, "is_valid_hostname")
def test_live_migration_hostname_invalid(self, mock_hostname, mock_lm):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_hostname.return_value = False
self.assertRaises(exception.InvalidHostname,
drvr.live_migration,
self.context, self.test_instance,
"foo/?com=/bin/sh",
lambda x: x,
lambda x: x)
def test_live_migration_force_complete(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = fake_instance.fake_instance_obj(
None, name='instancename', id=1,
uuid='c83a75d4-4d53-4be5-9a40-04d9c0389ff8')
drvr.active_migrations[instance.uuid] = deque()
drvr.live_migration_force_complete(instance)
self.assertEqual(
1, drvr.active_migrations[instance.uuid].count("force-complete"))
@mock.patch.object(host.Host, "get_connection")
@mock.patch.object(fakelibvirt.virDomain, "abortJob")
def test_live_migration_abort(self, mock_abort, mock_conn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", False)
guest = libvirt_guest.Guest(dom)
with mock.patch.object(nova.virt.libvirt.host.Host, 'get_guest',
return_value=guest):
drvr.live_migration_abort(self.test_instance)
self.assertTrue(mock_abort.called)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('tempfile.mkstemp')
@mock.patch('os.close', return_value=None)
def test_check_instance_shared_storage_local_raw(self,
mock_close,
mock_mkstemp,
mock_exists):
instance_uuid = uuids.fake
self.flags(images_type='raw', group='libvirt')
self.flags(instances_path='/tmp')
mock_mkstemp.return_value = (-1,
'/tmp/{0}/file'.format(instance_uuid))
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
temp_file = driver.check_instance_shared_storage_local(self.context,
instance)
self.assertEqual('/tmp/{0}/file'.format(instance_uuid),
temp_file['filename'])
def test_check_instance_shared_storage_local_rbd(self):
self.flags(images_type='rbd', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.assertIsNone(driver.
check_instance_shared_storage_local(self.context,
instance))
def test_version_to_string(self):
string_ver = libvirt_utils.version_to_string((4, 33, 173))
self.assertEqual("4.33.173", string_ver)
def test_virtuozzo_min_version_fail(self):
self.flags(virt_type='parallels', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(driver._conn, 'getVersion') as mock_getver:
mock_getver.return_value = \
versionutils.convert_version_to_int(
libvirt_driver.MIN_VIRTUOZZO_VERSION) - 1
self.assertRaises(exception.NovaException,
driver.init_host, 'wibble')
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_VIRTUOZZO_VERSION))
def test_virtuozzo_min_version_ok(self, mock_get_virtuozzo_version):
self.flags(virt_type='parallels', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
driver.init_host('wibble')
def test_get_guest_config_parallels_vm(self):
self.flags(virt_type='parallels', group='libvirt')
self.flags(images_type='ploop', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus)
self.assertEqual(fields.VMMode.HVM, cfg.os_type)
self.assertIsNone(cfg.os_root)
self.assertEqual(6, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[0].driver_format, "ploop")
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
def test_get_guest_config_parallels_ct_rescue(self):
self._test_get_guest_config_parallels_ct(rescue=True)
def test_get_guest_config_parallels_ct(self):
self._test_get_guest_config_parallels_ct(rescue=False)
def _test_get_guest_config_parallels_ct(self, rescue=False):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
ct_instance = self.test_instance.copy()
ct_instance["vm_mode"] = fields.VMMode.EXE
instance_ref = objects.Instance(**ct_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
if rescue:
rescue_data = ct_instance
else:
rescue_data = None
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, {'mapping': {'disk': {}}},
rescue_data)
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus)
self.assertEqual(fields.VMMode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertIsNone(cfg.os_root)
if rescue:
self.assertEqual(5, len(cfg.devices))
else:
self.assertEqual(4, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
device_index = 0
fs = cfg.devices[device_index]
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.driver_type, "ploop")
self.assertEqual(fs.target_dir, "/")
if rescue:
device_index = 1
fs = cfg.devices[device_index]
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.driver_type, "ploop")
self.assertEqual(fs.target_dir, "/mnt/rescue")
self.assertIsInstance(cfg.devices[device_index + 1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[device_index + 2],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[device_index + 3],
vconfig.LibvirtConfigGuestVideo)
def _test_get_guest_config_parallels_volume(self, vmmode, devices):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
ct_instance = self.test_instance.copy()
ct_instance["vm_mode"] = vmmode
instance_ref = objects.Instance(**ct_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
conn_info = {'driver_volume_type': 'fake', 'data': {}}
bdm = objects.BlockDeviceMapping(
self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 0,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sda'}))
info = {'block_device_mapping': driver_block_device.convert_volumes(
[bdm])}
info['block_device_mapping'][0]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info, None, info)
mock_save.assert_called_once_with()
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus)
self.assertEqual(vmmode, cfg.os_type)
self.assertIsNone(cfg.os_root)
self.assertEqual(devices, len(cfg.devices))
disk_found = False
for dev in cfg.devices:
result = isinstance(dev, vconfig.LibvirtConfigGuestFilesys)
self.assertFalse(result)
if (isinstance(dev, vconfig.LibvirtConfigGuestDisk) and
(dev.source_path is None or
'disk.local' not in dev.source_path)):
self.assertEqual("disk", dev.source_device)
self.assertEqual("sda", dev.target_dev)
disk_found = True
self.assertTrue(disk_found)
def test_get_guest_config_parallels_volume(self):
self._test_get_guest_config_parallels_volume(fields.VMMode.EXE, 4)
self._test_get_guest_config_parallels_volume(fields.VMMode.HVM, 6)
def test_get_guest_disk_config_rbd_older_config_drive_fall_back(self):
# New config drives are stored in rbd but existing instances have
# config drives in the old location under the instances path.
# Test that the driver falls back to 'flat' for config drive if it
# doesn't exist in rbd.
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
mock_rbd_image = mock.Mock()
mock_flat_image = mock.Mock()
mock_flat_image.libvirt_info.return_value = mock.sentinel.diskconfig
drvr.image_backend.by_name.side_effect = [mock_rbd_image,
mock_flat_image]
mock_rbd_image.exists.return_value = False
instance = objects.Instance()
disk_mapping = {'disk.config': {'bus': 'ide',
'dev': 'hda',
'type': 'file'}}
flavor = objects.Flavor(extra_specs={})
diskconfig = drvr._get_guest_disk_config(
instance, 'disk.config', disk_mapping, flavor,
drvr._get_disk_config_image_type())
self.assertEqual(2, drvr.image_backend.by_name.call_count)
call1 = mock.call(instance, 'disk.config', 'rbd')
call2 = mock.call(instance, 'disk.config', 'flat')
drvr.image_backend.by_name.assert_has_calls([call1, call2])
self.assertEqual(mock.sentinel.diskconfig, diskconfig)
def _test_prepare_domain_for_snapshot(self, live_snapshot, state):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance_ref = objects.Instance(**self.test_instance)
with mock.patch.object(drvr, "suspend") as mock_suspend:
drvr._prepare_domain_for_snapshot(
self.context, live_snapshot, state, instance_ref)
return mock_suspend.called
def test_prepare_domain_for_snapshot(self):
# Ensure that suspend() is only called on RUNNING or PAUSED instances
for test_power_state in power_state.STATE_MAP.keys():
if test_power_state in (power_state.RUNNING, power_state.PAUSED):
self.assertTrue(self._test_prepare_domain_for_snapshot(
False, test_power_state))
else:
self.assertFalse(self._test_prepare_domain_for_snapshot(
False, test_power_state))
def test_prepare_domain_for_snapshot_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
# Ensure that suspend() is never called with LXC
for test_power_state in power_state.STATE_MAP.keys():
self.assertFalse(self._test_prepare_domain_for_snapshot(
False, test_power_state))
def test_prepare_domain_for_snapshot_live_snapshots(self):
# Ensure that suspend() is never called for live snapshots
for test_power_state in power_state.STATE_MAP.keys():
self.assertFalse(self._test_prepare_domain_for_snapshot(
True, test_power_state))
@mock.patch('os.walk')
@mock.patch('os.path.exists')
@mock.patch('os.path.getsize')
@mock.patch('os.path.isdir')
@mock.patch('nova.utils.execute')
@mock.patch.object(host.Host, '_get_domain')
def test_get_instance_disk_info_parallels_ct(self, mock_get_domain,
mock_execute,
mock_isdir,
mock_getsize,
mock_exists,
mock_walk):
dummyxml = ("<domain type='parallels'><name>instance-0000000a</name>"
"<os><type>exe</type></os>"
"<devices>"
"<filesystem type='file'>"
"<driver format='ploop' type='ploop'/>"
"<source file='/test/disk'/>"
"<target dir='/'/></filesystem>"
"</devices></domain>")
ret = ("image: /test/disk/root.hds\n"
"file format: parallels\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 789M\n")
self.flags(virt_type='parallels', group='libvirt')
instance = objects.Instance(**self.test_instance)
instance.vm_mode = fields.VMMode.EXE
fake_dom = FakeVirtDomain(fake_xml=dummyxml)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = fake_dom
mock_walk.return_value = [('/test/disk', [],
['DiskDescriptor.xml', 'root.hds'])]
def getsize_sideeffect(*args, **kwargs):
if args[0] == '/test/disk/DiskDescriptor.xml':
return 790
if args[0] == '/test/disk/root.hds':
return 827326464
mock_getsize.side_effect = getsize_sideeffect
mock_exists.return_value = True
mock_isdir.return_value = True
mock_execute.return_value = (ret, '')
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'ploop')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 827327254)
self.assertEqual(info[0]['over_committed_disk_size'], 20647509226)
self.assertEqual(info[0]['virt_disk_size'], 21474836480)
def test_get_guest_config_with_mdevs(self):
mdevs = [uuids.mdev1]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, {'mapping': {}},
mdevs=mdevs)
# Loop over all devices to make sure we have at least one mediated one.
for device in cfg.devices:
if isinstance(device, vconfig.LibvirtConfigGuestHostdevMDEV):
# Make sure we use the provided UUID
self.assertEqual(uuids.mdev1, device.uuid)
break
else:
assert False, "Unable to find any mediated device for the guest."
class HostStateTestCase(test.NoDBTestCase):
cpu_info = {"vendor": "Intel", "model": "pentium", "arch": "i686",
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge",
"mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}}
instance_caps = [(fields.Architecture.X86_64, "kvm", "hvm"),
(fields.Architecture.I686, "kvm", "hvm")]
pci_devices = [{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:10.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": fields.PciDeviceType.SRIOV_PF,
"phys_function": None}]
numa_topology = objects.NUMATopology(cells=[
objects.NUMACell(
id=1, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0,
mempages=[], siblings=[set([1]), set([2])], pinned_cpus=set([])),
objects.NUMACell(
id=2, cpuset=set([3, 4]), memory=1024, cpu_usage=0, memory_usage=0,
mempages=[], siblings=[set([3]), set([4])], pinned_cpus=set([]))])
class FakeConnection(libvirt_driver.LibvirtDriver):
"""Fake connection object."""
def __init__(self):
super(HostStateTestCase.FakeConnection,
self).__init__(fake.FakeVirtAPI(), True)
self._host = host.Host("qemu:///system")
def _get_memory_mb_total():
return 497
def _get_memory_mb_used():
return 88
self._host.get_memory_mb_total = _get_memory_mb_total
self._host.get_memory_mb_used = _get_memory_mb_used
def _get_vcpu_total(self):
return 1
def _get_vcpu_used(self):
return 0
def _get_vgpu_total(self):
return 0
def _get_cpu_info(self):
return HostStateTestCase.cpu_info
def _get_disk_over_committed_size_total(self):
return 0
def _get_local_gb_info(self):
return {'total': 100, 'used': 20, 'free': 80}
def get_host_uptime(self):
return ('10:01:16 up 1:36, 6 users, '
'load average: 0.21, 0.16, 0.19')
def _get_disk_available_least(self):
return 13091
def _get_instance_capabilities(self):
return HostStateTestCase.instance_caps
def _get_pci_passthrough_devices(self):
return jsonutils.dumps(HostStateTestCase.pci_devices)
def _get_mdev_capable_devices(self, types=None):
return []
def _get_mediated_devices(self, types=None):
return []
def _get_host_numa_topology(self):
return HostStateTestCase.numa_topology
def setUp(self):
super(HostStateTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
@mock.patch.object(fakelibvirt, "openAuth")
def test_update_status(self, mock_open):
mock_open.return_value = fakelibvirt.Connection("qemu:///system")
drvr = HostStateTestCase.FakeConnection()
stats = drvr.get_available_resource("compute1")
self.assertEqual(stats["vcpus"], 1)
self.assertEqual(stats["memory_mb"], 497)
self.assertEqual(stats["local_gb"], 100)
self.assertEqual(stats["vcpus_used"], 0)
self.assertEqual(stats["memory_mb_used"], 88)
self.assertEqual(stats["local_gb_used"], 20)
self.assertEqual(stats["hypervisor_type"], 'QEMU')
self.assertEqual(stats["hypervisor_version"],
fakelibvirt.FAKE_QEMU_VERSION)
self.assertEqual(stats["hypervisor_hostname"], 'compute1')
cpu_info = jsonutils.loads(stats["cpu_info"])
self.assertEqual(cpu_info,
{"vendor": "Intel", "model": "pentium",
"arch": fields.Architecture.I686,
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov",
"mca", "pge", "mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}
})
self.assertEqual(stats["disk_available_least"], 80)
self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
HostStateTestCase.pci_devices)
self.assertEqual(objects.NUMATopology.obj_from_db_obj(
stats['numa_topology']),
HostStateTestCase.numa_topology)
class TestUpdateProviderTree(test.NoDBTestCase):
vcpus = 24
memory_mb = 1024
disk_gb = 200
cpu_traits = {t: False for t in libvirt_utils.CPU_TRAITS_MAPPING.values()}
def setUp(self):
super(TestUpdateProviderTree, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
# create compute node resource provider
self.cn_rp = rp_object.ResourceProvider(
uuid=uuids.cn,
name='compute-node',
)
# create shared storage resource provider
self.shared_rp = rp_object.ResourceProvider(
uuid=uuids.shared_storage,
name='shared_storage_rp',
)
# create resource provider list
rp_list = rp_object.ResourceProviderList(
objects=[self.cn_rp, self.shared_rp]
)
def _pt_with_cn_rp_and_shared_rp():
"""Create a provider tree instance having both compute node
and shared storage resource provider.
"""
pt = provider_tree.ProviderTree()
for rp in rp_list:
pt.new_root(rp.name, rp.uuid, generation=0)
return pt
self.pt = _pt_with_cn_rp_and_shared_rp()
self.cpu_traits['HW_CPU_X86_AVX512F'] = True
self.cpu_traits['HW_CPU_X86_BMI'] = True
def _get_inventory(self):
return {
rc_fields.ResourceClass.VCPU: {
'total': self.vcpus,
'min_unit': 1,
'max_unit': self.vcpus,
'step_size': 1,
},
rc_fields.ResourceClass.MEMORY_MB: {
'total': self.memory_mb,
'min_unit': 1,
'max_unit': self.memory_mb,
'step_size': 1,
},
rc_fields.ResourceClass.DISK_GB: {
'total': self.disk_gb,
'min_unit': 1,
'max_unit': self.disk_gb,
'step_size': 1,
},
}
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_cpu_traits',
new=mock.Mock(return_value=cpu_traits))
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vgpu_total')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_local_gb_info',
return_value={'total': disk_gb})
@mock.patch('nova.virt.libvirt.host.Host.get_memory_mb_total',
return_value=memory_mb)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vcpu_total',
return_value=vcpus)
def _test_update_provider_tree(self, mock_vcpu, mock_mem, mock_disk,
mock_vgpus, total_vgpus=0):
mock_vgpus.return_value = total_vgpus
self.driver.update_provider_tree(self.pt,
self.cn_rp.name)
def test_update_provider_tree(self):
self._test_update_provider_tree()
self.assertEqual(self._get_inventory(),
(self.pt.data(self.cn_rp.uuid)).inventory)
self.assertEqual(set(['HW_CPU_X86_AVX512F', 'HW_CPU_X86_BMI']),
self.pt.data(self.cn_rp.uuid).traits)
def test_update_provider_tree_with_vgpus(self):
self._test_update_provider_tree(total_vgpus=8)
inventory = self._get_inventory()
# Add VGPU in the expected inventory
inventory[rc_fields.ResourceClass.VGPU] = {'step_size': 1,
'min_unit': 1,
'max_unit': 8,
'total': 8}
self.assertEqual(inventory,
(self.pt.data(self.cn_rp.uuid)).inventory)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vgpu_total',
return_value=0)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_local_gb_info',
return_value={'total': disk_gb})
@mock.patch('nova.virt.libvirt.host.Host.get_memory_mb_total',
return_value=memory_mb)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vcpu_total',
return_value=vcpus)
# TODO(efried): Bug #1784020
@unittest.expectedFailure
def test_update_provider_tree_for_shared_disk_gb_resource(
self, mock_vcpu, mock_mem, mock_disk, mock_vgpus):
"""Test to check DISK_GB is reported from shared resource
provider.
"""
shared_rp_inv = {
rc_fields.ResourceClass.DISK_GB: {
'total': self.disk_gb,
'min_unit': 1,
'max_unit': self.disk_gb,
'step_size': 1,
}
}
# report inventory for shared storage resource provider
self.pt.update_inventory(self.shared_rp.uuid, shared_rp_inv)
# add trait to shared storage resource provider
self.pt.update_traits(self.shared_rp.uuid,
['MISC_SHARES_VIA_AGGREGATE'])
self.driver.update_provider_tree(self.pt,
self.cn_rp.name)
inventory = self._get_inventory()
# Remove DISK_GB resource from inventory as you don't expect it to be
# reported by the compute node resource provider.
del inventory[rc_fields.ResourceClass.DISK_GB]
self.assertEqual(inventory,
(self.pt.data(self.cn_rp.uuid)).inventory)
self.assertEqual(shared_rp_inv,
(self.pt.data(self.shared_rp.uuid)).inventory)
def test_update_provider_tree_with_file_backed_memory(self):
self.flags(file_backed_memory=1024,
group="libvirt")
self._test_update_provider_tree()
self.assertEqual(self._get_inventory(),
(self.pt.data(self.cn_rp.uuid)).inventory)
def test_update_provider_tree_with_cpu_traits(self):
# These two traits should be unset when update_provider_tree is called
self.pt.add_traits(self.cn_rp.uuid, 'HW_CPU_X86_VMX', 'HW_CPU_X86_XOP')
self._test_update_provider_tree()
self.assertEqual(set(['HW_CPU_X86_AVX512F', 'HW_CPU_X86_BMI']),
self.pt.data(self.cn_rp.uuid).traits)
class TraitsComparisonMixin(object):
def assertTraitsEqual(self, expected, actual):
exp = {t: t in expected
for t in libvirt_utils.CPU_TRAITS_MAPPING.values()}
self.assertEqual(exp, actual)
class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
self.flags(sysinfo_serial="none", group="libvirt")
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.useFixture(fakelibvirt.FakeLibvirtFixture())
os_vif.initialize()
self.drvr = libvirt_driver.LibvirtDriver(
fake.FakeVirtAPI(), read_only=True)
self.context = context.get_admin_context()
self.test_image_meta = {
"disk_format": "raw",
}
def _create_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
flavor = objects.Flavor(memory_mb=512,
swap=0,
vcpu_weight=None,
root_gb=10,
id=2,
name=u'm1.tiny',
ephemeral_gb=20,
rxtx_factor=1.0,
flavorid=u'1',
vcpus=1,
extra_specs={})
flavor.update(params.pop('flavor', {}))
inst = {}
inst['id'] = 1
inst['uuid'] = uuids.fake_instance_id
inst['os_type'] = 'linux'
inst['image_ref'] = uuids.fake_image_ref
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = 2
inst['ami_launch_index'] = 0
inst['host'] = 'host1'
inst['root_gb'] = flavor.root_gb
inst['ephemeral_gb'] = flavor.ephemeral_gb
inst['config_drive'] = True
inst['kernel_id'] = 2
inst['ramdisk_id'] = 3
inst['key_data'] = 'ABCDEFG'
inst['system_metadata'] = {}
inst['metadata'] = {}
inst['task_state'] = None
inst.update(params)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['metadata', 'system_metadata',
'pci_devices'],
flavor=flavor, **inst)
# Attributes which we need to be set so they don't touch the db,
# but it's not worth the effort to fake properly
for field in ['numa_topology', 'vcpu_model', 'trusted_certs']:
setattr(instance, field, None)
return instance
@mock.patch(('nova.virt.libvirt.driver.LibvirtDriver.'
'_get_instance_disk_info'), return_value=[])
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr',
return_value='10.0.0.1')
@mock.patch(('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_storage_shared_with'), return_value=False)
@mock.patch('os.rename')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute', side_effect=test.TestingException)
def test_migrate_disk_and_power_off_exception(
self, mock_execute, mock_exists, mock_rename, mock_is_shared,
mock_get_host_ip, mock_destroy, mock_get_disk_info):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
ins_ref = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
self.assertRaises(test.TestingException,
self.drvr.migrate_disk_and_power_off,
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None)
@mock.patch(('nova.virt.libvirt.driver.LibvirtDriver.'
'_get_instance_disk_info'))
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr',
return_value='10.0.0.1')
@mock.patch(('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_storage_shared_with'), return_value=False)
@mock.patch('os.rename')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def _test_migrate_disk_and_power_off(
self, ctxt, flavor_obj, mock_execute, mock_exists, mock_rename,
mock_is_shared, mock_get_host_ip, mock_destroy,
mock_get_disk_info, block_device_info=None,
params_for_instance=None):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
instance = self._create_instance(params=params_for_instance)
disk_info = list(fake_disk_info_byname(instance).values())
disk_info_text = jsonutils.dumps(disk_info)
mock_get_disk_info.return_value = disk_info
# dest is different host case
out = self.drvr.migrate_disk_and_power_off(
ctxt, instance, '10.0.0.2', flavor_obj, None,
block_device_info=block_device_info)
self.assertEqual(out, disk_info_text)
# dest is same host case
out = self.drvr.migrate_disk_and_power_off(
ctxt, instance, '10.0.0.1', flavor_obj, None,
block_device_info=block_device_info)
self.assertEqual(out, disk_info_text)
def test_migrate_disk_and_power_off(self):
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(self.context, flavor_obj)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
def test_migrate_disk_and_power_off_boot_from_volume(self,
disconnect_volume):
info = {
'block_device_mapping': [
{'boot_index': None,
'mount_device': '/dev/vdd',
'connection_info': mock.sentinel.conn_info_vdd},
{'boot_index': 0,
'mount_device': '/dev/vda',
'connection_info': mock.sentinel.conn_info_vda}]}
flavor = {'root_gb': 1, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
# Note(Mike_D): The size of instance's ephemeral_gb is 0 gb.
self._test_migrate_disk_and_power_off(self.context,
flavor_obj, block_device_info=info,
params_for_instance={'image_ref': None,
'root_gb': 10,
'ephemeral_gb': 0,
'flavor': {'root_gb': 10,
'ephemeral_gb': 0}})
disconnect_volume.assert_called_with(self.context,
mock.sentinel.conn_info_vda, mock.ANY)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
def test_migrate_disk_and_power_off_boot_from_volume_backed_snapshot(
self, disconnect_volume):
# Such instance has not empty image_ref, but must be considered as
# booted from volume.
info = {
'block_device_mapping': [
{'boot_index': None,
'mount_device': '/dev/vdd',
'connection_info': mock.sentinel.conn_info_vdd},
{'boot_index': 0,
'mount_device': '/dev/vda',
'connection_info': mock.sentinel.conn_info_vda}]}
flavor = {'root_gb': 1, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(self.context,
flavor_obj, block_device_info=info,
params_for_instance={
'image_ref': uuids.fake_volume_backed_image_ref,
'root_gb': 10,
'ephemeral_gb': 0,
'flavor': {'root_gb': 10,
'ephemeral_gb': 0}})
disconnect_volume.assert_called_with(self.context,
mock.sentinel.conn_info_vda, mock.ANY)
@mock.patch('os.rename')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._get_instance_disk_info')
def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info,
get_host_ip_addr,
mock_destroy,
mock_copy_image,
mock_rename):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
# Original instance config
instance = self._create_instance({'flavor': {'root_gb': 10,
'ephemeral_gb': 0}})
disk_info = list(fake_disk_info_byname(instance).values())
mock_get_disk_info.return_value = disk_info
get_host_ip_addr.return_value = '10.0.0.1'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Re-size fake instance to 20G root and 1024M swap disk
flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024}
flavor_obj = objects.Flavor(**flavor)
# Destination is same host
out = drvr.migrate_disk_and_power_off(context.get_admin_context(),
instance, '10.0.0.1',
flavor_obj, None)
mock_get_disk_info.assert_called_once_with(instance, None)
self.assertTrue(get_host_ip_addr.called)
mock_destroy.assert_called_once_with(instance)
disk_info_text = jsonutils.dumps(disk_info)
self.assertEqual(disk_info_text, out)
# disk.swap isn't moved
for call in mock_rename.mock_calls:
self.assertFalse(call[0].endswith('.swap'))
# disk.swap isn't copied
for call in mock_copy_image.mock_calls:
self.assertFalse(call[0].endswith('.swap'))
def _test_migrate_disk_and_power_off_resize_check(self, expected_exc):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection
.migrate_disk_and_power_off.
"""
instance = self._create_instance()
disk_info = list(fake_disk_info_byname(instance).values())
def fake_get_instance_disk_info(instance, xml=None,
block_device_info=None):
return disk_info
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
self.stubs.Set(self.drvr, '_get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
# Migration is not implemented for LVM backed instances
self.assertRaises(expected_exc,
self.drvr.migrate_disk_and_power_off,
None, instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.utils.execute')
@mock.patch('os.rename')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._is_storage_shared_with')
def _test_migrate_disk_and_power_off_backing_file(self,
shared_storage,
mock_is_shared_storage,
mock_get_disk_info,
mock_destroy,
mock_rename,
mock_execute):
self.convert_file_called = False
flavor = {'root_gb': 20, 'ephemeral_gb': 30, 'swap': 0}
flavor_obj = objects.Flavor(**flavor)
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk',
'disk_size': '83886080'}]
mock_get_disk_info.return_value = disk_info
mock_is_shared_storage.return_value = shared_storage
def fake_execute(*args, **kwargs):
self.assertNotEqual(args[0:2], ['qemu-img', 'convert'])
mock_execute.side_effect = fake_execute
instance = self._create_instance()
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), instance, '10.0.0.2',
flavor_obj, None)
self.assertTrue(mock_is_shared_storage.called)
mock_destroy.assert_called_once_with(instance)
disk_info_text = jsonutils.dumps(disk_info)
self.assertEqual(out, disk_info_text)
def test_migrate_disk_and_power_off_shared_storage(self):
self._test_migrate_disk_and_power_off_backing_file(True)
def test_migrate_disk_and_power_off_non_shared_storage(self):
self._test_migrate_disk_and_power_off_backing_file(False)
def test_migrate_disk_and_power_off_lvm(self):
self.flags(images_type='lvm', group='libvirt')
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(utils, 'execute', fake_execute)
expected_exc = exception.InstanceFaultRollback
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
def test_migrate_disk_and_power_off_resize_cannot_ssh(self):
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError()
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.drvr, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
expected_exc = exception.InstanceFaultRollback
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error(self, mock_get_disk_info):
instance = self._create_instance()
flavor = {'root_gb': 5, 'ephemeral_gb': 10}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = fake_disk_info_json(instance)
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error_rbd(self,
mock_get_disk_info):
# Check error on resize root disk down for rbd.
# The difference is that get_instance_disk_info always returns
# an emply list for rbd.
# Ephemeral size is not changed in this case (otherwise other check
# will raise the same error).
self.flags(images_type='rbd', group='libvirt')
instance = self._create_instance()
flavor = {'root_gb': 5, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = []
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error_default_ephemeral(
self, mock_get_disk_info):
# Note(Mike_D): The size of this instance's ephemeral_gb is 20 gb.
instance = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = fake_disk_info_json(instance)
self.assertRaises(exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._get_instance_disk_info')
@mock.patch('nova.virt.driver.block_device_info_get_ephemerals')
def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get,
mock_get_disk_info):
mappings = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
},
{
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'volume_id': 1,
'guest_format': None,
'boot_index': 1,
'volume_size': 6
},
{
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': 1,
'device_type': 'disk',
'guest_format': None,
'boot_index': 0,
'volume_size': 4
},
{
'device_name': '/dev/sda3',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_size': 3
}
]
mock_get.return_value = mappings
instance = self._create_instance()
# Old flavor, eph is 20, real disk is 3, target is 2, fail
flavor = {'root_gb': 10, 'ephemeral_gb': 2}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = fake_disk_info_json(instance)
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
# Old flavor, eph is 20, real disk is 3, target is 4
flavor = {'root_gb': 10, 'ephemeral_gb': 4}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(self.context, flavor_obj)
@mock.patch('os.rename')
@mock.patch('nova.utils.execute')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._is_storage_shared_with')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_copy_disk_info(
self, mock_disk_info, mock_shared, mock_path, mock_destroy,
mock_copy, mock_execute, mock_rename):
instance = self._create_instance()
disk_info = list(fake_disk_info_byname(instance).values())
instance_base = os.path.dirname(disk_info[0]['path'])
flavor = {'root_gb': 10, 'ephemeral_gb': 25}
flavor_obj = objects.Flavor(**flavor)
mock_disk_info.return_value = disk_info
mock_path.return_value = instance_base
mock_shared.return_value = False
src_disk_info_path = os.path.join(instance_base + '_resize',
'disk.info')
with mock.patch.object(os.path, 'exists', autospec=True) \
as mock_exists:
# disk.info exists on the source
mock_exists.side_effect = \
lambda path: path == src_disk_info_path
self.drvr.migrate_disk_and_power_off(context.get_admin_context(),
instance, mock.sentinel,
flavor_obj, None)
self.assertTrue(mock_exists.called)
dst_disk_info_path = os.path.join(instance_base, 'disk.info')
mock_copy.assert_any_call(src_disk_info_path, dst_disk_info_path,
host=mock.sentinel, on_execute=mock.ANY,
on_completion=mock.ANY)
def test_wait_for_running(self):
def fake_get_info(instance):
if instance['name'] == "not_found":
raise exception.InstanceNotFound(instance_id=instance['uuid'])
elif instance['name'] == "running":
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
# instance not found case
self.assertRaises(exception.InstanceNotFound,
self.drvr._wait_for_running,
{'name': 'not_found',
'uuid': 'not_found_uuid'})
# instance is running case
self.assertRaises(loopingcall.LoopingCallDone,
self.drvr._wait_for_running,
{'name': 'running',
'uuid': 'running_uuid'})
# else case
self.drvr._wait_for_running({'name': 'else',
'uuid': 'other_uuid'})
@mock.patch('nova.utils.execute')
@mock.patch('os.rename')
def test_disk_raw_to_qcow2(self, mock_rename, mock_execute):
path = '/test/disk'
_path_qcow = path + '_qcow'
self.drvr._disk_raw_to_qcow2(path)
mock_execute.assert_has_calls([
mock.call('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, _path_qcow)])
mock_rename.assert_has_calls([
mock.call(_path_qcow, path)])
@mock.patch.object(libvirt_driver.LibvirtDriver, '_inject_data')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_domain_and_network')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_disk_raw_to_qcow2')
# Don't write libvirt xml to disk
@mock.patch.object(libvirt_utils, 'write_to_file')
# NOTE(mdbooth): The following 4 mocks are required to execute
# get_guest_xml().
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata')
@mock.patch('nova.privsep.utils.supports_direct_io')
@mock.patch('nova.api.metadata.base.InstanceMetadata')
def _test_finish_migration(self, mock_instance_metadata,
mock_supports_direct_io,
mock_build_device_metadata,
mock_set_host_enabled, mock_write_to_file,
mock_raw_to_qcow2,
mock_create_domain_and_network,
mock_get_info, mock_inject_data,
power_on=True, resize_instance=False):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration.
"""
self.flags(use_cow_images=True)
if power_on:
state = power_state.RUNNING
else:
state = power_state.SHUTDOWN
mock_get_info.return_value = hardware.InstanceInfo(state=state)
instance = self._create_instance(
{'config_drive': str(True),
'task_state': task_states.RESIZE_FINISH,
'flavor': {'swap': 500}})
bdi = {'block_device_mapping': []}
migration = objects.Migration()
migration.source_compute = 'fake-source-compute'
migration.dest_compute = 'fake-dest-compute'
migration.source_node = 'fake-source-node'
migration.dest_node = 'fake-dest-node'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
# Source disks are raw to test conversion
disk_info = list(fake_disk_info_byname(instance, type='raw').values())
disk_info_text = jsonutils.dumps(disk_info)
backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
mock_create_domain_and_network.return_value = \
libvirt_guest.Guest('fake_dom')
self.drvr.finish_migration(
context.get_admin_context(), migration, instance,
disk_info_text, [], image_meta,
resize_instance, bdi, power_on)
# Assert that we converted the root, ephemeral, and swap disks
instance_path = libvirt_utils.get_instance_path(instance)
convert_calls = [mock.call(os.path.join(instance_path, name))
for name in ('disk', 'disk.local', 'disk.swap')]
mock_raw_to_qcow2.assert_has_calls(convert_calls, any_order=True)
# Implicitly assert that we did not convert the config disk
self.assertEqual(len(convert_calls), mock_raw_to_qcow2.call_count)
disks = backend.disks
# Assert that we called cache() on kernel, ramdisk, disk,
# and disk.local.
# This results in creation of kernel, ramdisk, and disk.swap.
# This results in backing file check and resize of disk and disk.local.
for name in ('kernel', 'ramdisk', 'disk', 'disk.local', 'disk.swap'):
self.assertTrue(disks[name].cache.called,
'cache() not called for %s' % name)
# Assert that we created a snapshot for the root disk
root_disk = disks['disk']
self.assertTrue(root_disk.create_snap.called)
# Assert that we didn't import a config disk
# Note that some path currently creates a config disk object,
# but only uses it for an exists() check. Therefore the object may
# exist, but shouldn't have been imported.
if 'disk.config' in disks:
self.assertFalse(disks['disk.config'].import_file.called)
# We shouldn't be injecting data during migration
self.assertFalse(mock_inject_data.called)
# NOTE(mdbooth): If we wanted to check the generated xml, we could
# insert a hook here
mock_create_domain_and_network.assert_called_once_with(
mock.ANY, mock.ANY, instance, [],
block_device_info=bdi, power_on=power_on,
vifs_already_plugged=True, post_xml_callback=mock.ANY)
def test_finish_migration_resize(self):
with mock.patch('nova.virt.libvirt.guest.Guest.sync_guest_time'
) as mock_guest_time:
self._test_finish_migration(resize_instance=True)
self.assertTrue(mock_guest_time.called)
def test_finish_migration_power_on(self):
with mock.patch('nova.virt.libvirt.guest.Guest.sync_guest_time'
) as mock_guest_time:
self._test_finish_migration()
self.assertTrue(mock_guest_time.called)
def test_finish_migration_power_off(self):
self._test_finish_migration(power_on=False)
def _test_finish_revert_migration(self, power_on):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration.
"""
powered_on = power_on
self.fake_create_domain_called = False
def fake_execute(*args, **kwargs):
pass
def fake_plug_vifs(instance, network_info):
pass
def fake_create_domain(context, xml, instance, network_info,
block_device_info=None, power_on=None,
vifs_already_plugged=None):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
self.assertTrue(vifs_already_plugged)
return mock.MagicMock()
def fake_enable_hairpin():
pass
def fake_get_info(instance):
if powered_on:
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None):
return ""
self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.drvr, 'firewall_driver', fw)
self.stubs.Set(self.drvr, '_create_domain_and_network',
fake_create_domain)
self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
self.stubs.Set(utils, 'get_image_from_system_metadata',
lambda *a: self.test_image_meta)
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
ins_ref = self._create_instance()
os.mkdir(os.path.join(tmpdir, ins_ref['name']))
libvirt_xml_path = os.path.join(tmpdir,
ins_ref['name'],
'libvirt.xml')
f = open(libvirt_xml_path, 'w')
f.close()
self.drvr.finish_revert_migration(
context.get_admin_context(), ins_ref,
[], None, power_on)
self.assertTrue(self.fake_create_domain_called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(False)
def _test_finish_revert_migration_after_crash(self, backup_made=True,
del_inst_failed=False):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend
context = 'fake_context'
ins_ref = self._create_instance()
with test.nested(
mock.patch.object(os.path, 'exists', return_value=backup_made),
mock.patch.object(libvirt_utils, 'get_instance_path'),
mock.patch.object(os, 'rename'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(drvr, '_get_guest_xml'),
mock.patch.object(shutil, 'rmtree'),
mock.patch.object(loopingcall, 'FixedIntervalLoopingCall'),
) as (mock_stat, mock_path, mock_rename, mock_cdn, mock_ggx,
mock_rmtree, mock_looping_call):
mock_path.return_value = '/fake/foo'
if del_inst_failed:
mock_rmtree.side_effect = OSError(errno.ENOENT,
'test exception')
drvr.finish_revert_migration(context, ins_ref, [])
if backup_made:
mock_rename.assert_called_once_with('/fake/foo_resize',
'/fake/foo')
else:
self.assertFalse(mock_rename.called)
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(backup_made=False)
def test_finish_revert_migration_after_crash_delete_failed(self):
self._test_finish_revert_migration_after_crash(backup_made=True,
del_inst_failed=True)
def test_finish_revert_migration_preserves_disk_bus(self):
def fake_get_guest_xml(context, instance, network_info, disk_info,
image_meta, block_device_info=None):
self.assertEqual('ide', disk_info['disk_bus'])
image_meta = {"disk_format": "raw",
"properties": {"hw_disk_bus": "ide"}}
instance = self._create_instance()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(drvr, 'image_backend'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=image_meta),
mock.patch.object(drvr, '_get_guest_xml',
side_effect=fake_get_guest_xml)):
drvr.finish_revert_migration('', instance, None, power_on=False)
def test_finish_revert_migration_snap_backend(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend
ins_ref = self._create_instance()
with test.nested(
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(drvr, '_get_guest_xml')) as (
mock_image, mock_cdn, mock_ggx):
mock_image.return_value = {'disk_format': 'raw'}
drvr.finish_revert_migration('', ins_ref, None, power_on=False)
drvr.image_backend.rollback_to_snap.assert_called_once_with(
libvirt_utils.RESIZE_SNAPSHOT_NAME)
drvr.image_backend.remove_snap.assert_called_once_with(
libvirt_utils.RESIZE_SNAPSHOT_NAME)
def test_finish_revert_migration_snap_backend_snapshot_not_found(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend
ins_ref = self._create_instance()
with test.nested(
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(drvr, '_get_guest_xml')) as (
mock_image, mock_cdn, mock_ggx):
mock_image.return_value = {'disk_format': 'raw'}
drvr.image_backend.rollback_to_snap.side_effect = (
exception.SnapshotNotFound(snapshot_id='testing'))
self.assertRaises(exception.SnapshotNotFound,
drvr.finish_revert_migration,
'', ins_ref, None, power_on=False)
drvr.image_backend.remove_snap.assert_not_called()
def test_finish_revert_migration_snap_backend_image_does_not_exist(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend
drvr.image_backend.exists.return_value = False
ins_ref = self._create_instance()
with test.nested(
mock.patch.object(rbd_utils, 'RBDDriver'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(drvr, '_get_guest_xml')) as (
mock_rbd, mock_image, mock_cdn, mock_ggx):
mock_image.return_value = {'disk_format': 'raw'}
drvr.finish_revert_migration('', ins_ref, None, power_on=False)
self.assertFalse(drvr.image_backend.rollback_to_snap.called)
self.assertFalse(drvr.image_backend.remove_snap.called)
def test_cleanup_failed_migration(self):
self.mox.StubOutWithMock(shutil, 'rmtree')
shutil.rmtree('/fake/inst')
self.mox.ReplayAll()
self.drvr._cleanup_failed_migration('/fake/inst')
def test_confirm_migration(self):
ins_ref = self._create_instance()
self.mox.StubOutWithMock(self.drvr, "_cleanup_resize")
self.drvr._cleanup_resize(self.context, ins_ref,
_fake_network_info(self, 1))
self.mox.ReplayAll()
self.drvr.confirm_migration(self.context, "migration_ref", ins_ref,
_fake_network_info(self, 1))
def test_cleanup_resize_same_host(self):
CONF.set_override('policy_dirs', [], group='oslo_policy')
ins_ref = self._create_instance({'host': CONF.host})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend
with test.nested(
mock.patch.object(os.path, 'exists'),
mock.patch.object(libvirt_utils, 'get_instance_path'),
mock.patch.object(shutil, 'rmtree')) as (
mock_exists, mock_get_path, mock_rmtree):
mock_exists.return_value = True
mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize(
self.context, ins_ref, _fake_network_info(self, 1))
mock_get_path.assert_called_once_with(ins_ref)
self.assertEqual(5, mock_rmtree.call_count)
def test_cleanup_resize_not_same_host(self):
CONF.set_override('policy_dirs', [], group='oslo_policy')
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
fake_net = _fake_network_info(self, 1)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False),
mock.patch.object(os.path, 'exists'),
mock.patch.object(libvirt_utils, 'get_instance_path'),
mock.patch.object(shutil, 'rmtree'),
mock.patch.object(drvr.image_backend, 'by_name',
new_callable=mock.NonCallableMock),
mock.patch.object(drvr, '_undefine_domain'),
mock.patch.object(drvr, 'unplug_vifs'),
mock.patch.object(drvr, 'unfilter_instance')
) as (mock_volume_backed, mock_exists, mock_get_path,
mock_rmtree, mock_image_by_name, mock_undef, mock_unplug,
mock_unfilter):
mock_exists.return_value = True
mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize(self.context, ins_ref, fake_net)
mock_get_path.assert_called_once_with(ins_ref)
self.assertEqual(5, mock_rmtree.call_count)
mock_undef.assert_called_once_with(ins_ref)
mock_unplug.assert_called_once_with(ins_ref, fake_net)
mock_unfilter.assert_called_once_with(ins_ref, fake_net)
def test_cleanup_resize_not_same_host_volume_backed(self):
"""Tests cleaning up after a resize is confirmed with a volume-backed
instance. The key point is that the instance base directory should not
be removed for volume-backed instances.
"""
CONF.set_override('policy_dirs', [], group='oslo_policy')
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
fake_net = _fake_network_info(self, 1)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend
drvr.image_backend.exists.return_value = False
with test.nested(
mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True),
mock.patch.object(os.path, 'exists'),
mock.patch.object(libvirt_utils, 'get_instance_path'),
mock.patch.object(shutil, 'rmtree'),
mock.patch.object(drvr, '_undefine_domain'),
mock.patch.object(drvr, 'unplug_vifs'),
mock.patch.object(drvr, 'unfilter_instance')
) as (mock_volume_backed, mock_exists, mock_get_path,
mock_rmtree, mock_undef, mock_unplug, mock_unfilter):
mock_exists.return_value = True
mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize(self.context, ins_ref, fake_net)
mock_get_path.assert_called_once_with(ins_ref)
self.assertEqual(5, mock_rmtree.call_count)
mock_undef.assert_called_once_with(ins_ref)
mock_unplug.assert_called_once_with(ins_ref, fake_net)
mock_unfilter.assert_called_once_with(ins_ref, fake_net)
def test_cleanup_resize_snap_backend(self):
CONF.set_override('policy_dirs', [], group='oslo_policy')
self.flags(images_type='rbd', group='libvirt')
ins_ref = self._create_instance({'host': CONF.host})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend
with test.nested(
mock.patch.object(os.path, 'exists'),
mock.patch.object(libvirt_utils, 'get_instance_path'),
mock.patch.object(shutil, 'rmtree'),
mock.patch.object(drvr.image_backend, 'remove_snap')) as (
mock_exists, mock_get_path, mock_rmtree, mock_remove):
mock_exists.return_value = True
mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize(
self.context, ins_ref, _fake_network_info(self, 1))
mock_get_path.assert_called_once_with(ins_ref)
mock_remove.assert_called_once_with(
libvirt_utils.RESIZE_SNAPSHOT_NAME)
self.assertEqual(5, mock_rmtree.call_count)
def test_cleanup_resize_snap_backend_image_does_not_exist(self):
CONF.set_override('policy_dirs', [], group='oslo_policy')
ins_ref = self._create_instance({'host': CONF.host})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend
drvr.image_backend.exists.return_value = False
with test.nested(
mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False),
mock.patch.object(os.path, 'exists'),
mock.patch.object(libvirt_utils, 'get_instance_path'),
mock.patch.object(shutil, 'rmtree'),
mock.patch.object(drvr.image_backend, 'remove_snap')) as (
mock_volume_backed, mock_exists, mock_get_path,
mock_rmtree, mock_remove):
mock_exists.return_value = True
mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize(
self.context, ins_ref, _fake_network_info(self, 1))
mock_get_path.assert_called_once_with(ins_ref)
self.assertFalse(mock_remove.called)
mock_rmtree.called_once_with('/fake/inst')
def test_get_instance_disk_info_exception(self):
instance = self._create_instance()
class FakeExceptionDomain(FakeVirtDomain):
def __init__(self):
super(FakeExceptionDomain, self).__init__()
def XMLDesc(self, flags):
raise fakelibvirt.libvirtError("Libvirt error")
def fake_get_domain(self, instance):
return FakeExceptionDomain()
self.stubs.Set(host.Host, '_get_domain',
fake_get_domain)
self.assertRaises(exception.InstanceNotFound,
self.drvr.get_instance_disk_info,
instance)
@mock.patch('os.path.exists')
@mock.patch.object(lvm, 'list_volumes')
def test_lvm_disks(self, listlvs, exists):
instance = objects.Instance(uuid=uuids.instance, id=1)
self.flags(images_volume_group='vols', group='libvirt')
exists.return_value = True
listlvs.return_value = ['%s_foo' % uuids.instance,
'other-uuid_foo']
disks = self.drvr._lvm_disks(instance)
self.assertEqual(['/dev/vols/%s_foo' % uuids.instance], disks)
def test_is_booted_from_volume(self):
func = libvirt_driver.LibvirtDriver._is_booted_from_volume
bdm = []
bdi = {'block_device_mapping': bdm}
self.assertFalse(func(bdi))
bdm.append({'boot_index': -1})
self.assertFalse(func(bdi))
bdm.append({'boot_index': None})
self.assertFalse(func(bdi))
bdm.append({'boot_index': 1})
self.assertFalse(func(bdi))
bdm.append({'boot_index': 0})
self.assertTrue(func(bdi))
@mock.patch('nova.virt.libvirt.driver.imagebackend')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._inject_data')
@mock.patch('nova.virt.libvirt.driver.imagecache')
def test_data_not_injects_with_configdrive(self, mock_image, mock_inject,
mock_backend):
self.flags(inject_partition=-1, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# config_drive is True by default, configdrive.required_by()
# returns True
instance_ref = self._create_instance()
disk_images = {'image_id': None}
drvr._create_and_inject_local_root(self.context, instance_ref, False,
'', disk_images, get_injection_info(),
None)
self.assertFalse(mock_inject.called)
@mock.patch('nova.virt.netutils.get_injected_network_template')
@mock.patch('nova.virt.disk.api.inject_data')
@mock.patch.object(libvirt_driver.LibvirtDriver, "_conn")
def _test_inject_data(self, instance, injection_info, path, disk_params,
mock_conn, disk_inject_data, inj_network,
called=True):
class ImageBackend(object):
path = '/path'
def get_model(self, connection):
return imgmodel.LocalFileImage(self.path,
imgmodel.FORMAT_RAW)
def fake_inj_network(*args, **kwds):
return args[0] or None
inj_network.side_effect = fake_inj_network
image_backend = ImageBackend()
image_backend.path = path
with mock.patch.object(self.drvr.image_backend, 'by_name',
return_value=image_backend):
self.flags(inject_partition=0, group='libvirt')
self.drvr._inject_data(image_backend, instance, injection_info)
if called:
disk_inject_data.assert_called_once_with(
mock.ANY,
*disk_params,
partition=None, mandatory=('files',))
self.assertEqual(disk_inject_data.called, called)
def test_inject_data_adminpass(self):
self.flags(inject_password=True, group='libvirt')
instance = self._create_instance()
injection_info = get_injection_info(admin_pass='foobar')
disk_params = [
None, # key
None, # net
{}, # metadata
'foobar', # admin_pass
None, # files
]
self._test_inject_data(instance, injection_info, "/path", disk_params)
# Test with the configuration setted to false.
self.flags(inject_password=False, group='libvirt')
self._test_inject_data(instance, injection_info, "/path", disk_params,
called=False)
def test_inject_data_key(self):
instance = self._create_instance(params={'key_data': 'key-content'})
injection_info = get_injection_info()
self.flags(inject_key=True, group='libvirt')
disk_params = [
'key-content', # key
None, # net
{}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(instance, injection_info, "/path",
disk_params)
# Test with the configuration setted to false.
self.flags(inject_key=False, group='libvirt')
self._test_inject_data(instance, injection_info, "/path", disk_params,
called=False)
def test_inject_data_metadata(self):
instance = self._create_instance(params={'metadata': {'data': 'foo'}})
injection_info = get_injection_info()
disk_params = [
None, # key
None, # net
{'data': 'foo'}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(instance, injection_info, "/path", disk_params)
def test_inject_data_files(self):
instance = self._create_instance()
injection_info = get_injection_info(files=['file1', 'file2'])
disk_params = [
None, # key
None, # net
{}, # metadata
None, # admin_pass
['file1', 'file2'], # files
]
self._test_inject_data(instance, injection_info, "/path", disk_params)
def test_inject_data_net(self):
instance = self._create_instance()
injection_info = get_injection_info(network_info={'net': 'eno1'})
disk_params = [
None, # key
{'net': 'eno1'}, # net
{}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(instance, injection_info, "/path", disk_params)
def test_inject_not_exist_image(self):
instance = self._create_instance()
injection_info = get_injection_info()
disk_params = [
'key-content', # key
None, # net
None, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(instance, injection_info, "/fail/path",
disk_params, called=False)
def test_attach_interface_build_metadata_fails(self):
instance = self._create_instance()
network_info = _fake_network_info(self, 1)
domain = FakeVirtDomain(fake_xml="""
<domain type='kvm'>
<devices>
<interface type='bridge'>
<mac address='52:54:00:f6:35:8f'/>
<model type='virtio'/>
<source bridge='br0'/>
<target dev='tap12345678'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x03' function='0x0'/>
</interface>
</devices>
</domain>""")
fake_image_meta = objects.ImageMeta.from_dict(
{'id': instance.image_ref})
expected = self.drvr.vif_driver.get_config(
instance, network_info[0], fake_image_meta, instance.flavor,
CONF.libvirt.virt_type, self.drvr._host)
with test.nested(
mock.patch.object(host.Host, '_get_domain', return_value=domain),
mock.patch.object(self.drvr.firewall_driver,
'setup_basic_filtering'),
mock.patch.object(domain, 'attachDeviceFlags'),
mock.patch.object(domain, 'info',
return_value=[power_state.RUNNING, 1, 2, 3, 4]),
mock.patch.object(self.drvr.vif_driver, 'get_config',
return_value=expected),
mock.patch.object(self.drvr, '_build_device_metadata',
side_effect=exception.NovaException),
mock.patch.object(self.drvr, 'detach_interface'),
) as (
mock_get_domain, mock_setup_basic_filtering,
mock_attach_device_flags, mock_info, mock_get_config,
mock_build_device_metadata, mock_detach_interface
):
self.assertRaises(exception.InterfaceAttachFailed,
self.drvr.attach_interface, self.context,
instance, fake_image_meta, network_info[0])
mock_get_domain.assert_called_with(instance)
mock_info.assert_called_with()
mock_setup_basic_filtering.assert_called_with(
instance, [network_info[0]])
mock_get_config.assert_called_with(
instance, network_info[0], fake_image_meta, instance.flavor,
CONF.libvirt.virt_type, self.drvr._host)
mock_build_device_metadata.assert_called_with(self.context,
instance)
mock_attach_device_flags.assert_called_with(
expected.to_xml(),
flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
mock_detach_interface.assert_called_with(self.context, instance,
network_info[0])
def _test_attach_interface(self, power_state, expected_flags):
instance = self._create_instance()
network_info = _fake_network_info(self, 1)
domain = FakeVirtDomain(fake_xml="""
<domain type='kvm'>
<devices>
<interface type='bridge'>
<mac address='52:54:00:f6:35:8f'/>
<model type='virtio'/>
<source bridge='br0'/>
<target dev='tap12345678'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x03' function='0x0'/>
</interface>
</devices>
</domain>""")
self.mox.StubOutWithMock(host.Host, '_get_domain')
self.mox.StubOutWithMock(self.drvr.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(domain, 'attachDeviceFlags')
self.mox.StubOutWithMock(domain, 'info')
host.Host._get_domain(instance).AndReturn(domain)
domain.info().AndReturn([power_state, 1, 2, 3, 4])
self.drvr.firewall_driver.setup_basic_filtering(
instance, [network_info[0]])
fake_image_meta = objects.ImageMeta.from_dict(
{'id': instance.image_ref})
expected = self.drvr.vif_driver.get_config(
instance, network_info[0], fake_image_meta, instance.flavor,
CONF.libvirt.virt_type, self.drvr._host)
self.mox.StubOutWithMock(self.drvr.vif_driver,
'get_config')
self.drvr.vif_driver.get_config(
instance, network_info[0],
mox.IsA(objects.ImageMeta),
mox.IsA(objects.Flavor),
CONF.libvirt.virt_type,
self.drvr._host).AndReturn(expected)
self.mox.StubOutWithMock(self.drvr, '_build_device_metadata')
self.drvr._build_device_metadata(self.context, instance).AndReturn(
objects.InstanceDeviceMetadata())
self.mox.StubOutWithMock(objects.Instance, 'save')
objects.Instance.save()
domain.attachDeviceFlags(expected.to_xml(), flags=expected_flags)
self.mox.ReplayAll()
self.drvr.attach_interface(
self.context, instance, fake_image_meta, network_info[0])
self.mox.VerifyAll()
def test_attach_interface_with_running_instance(self):
self._test_attach_interface(
power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_attach_interface_with_pause_instance(self):
self._test_attach_interface(
power_state.PAUSED,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_attach_interface_with_shutdown_instance(self):
self._test_attach_interface(
power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
def _test_detach_interface(self, power_state, expected_flags,
device_not_found=False):
# setup some mocks
instance = self._create_instance()
network_info = _fake_network_info(self, 1)
domain = FakeVirtDomain(fake_xml="""
<domain type='kvm'>
<devices>
<interface type='bridge'>
<mac address='52:54:00:f6:35:8f'/>
<model type='virtio'/>
<source bridge='br0'/>
<target dev='tap12345678'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x03' function='0x0'/>
</interface>
</devices>
</domain>""",
info=[power_state, 1, 2, 3, 4])
guest = libvirt_guest.Guest(domain)
expected_cfg = vconfig.LibvirtConfigGuestInterface()
expected_cfg.parse_str("""
<interface type='bridge'>
<mac address='52:54:00:f6:35:8f'/>
<model type='virtio'/>
<source bridge='br0'/>
<target dev='tap12345678'/>
</interface>""")
if device_not_found:
# This will trigger detach_device_with_retry to raise
# DeviceNotFound
get_interface_calls = [expected_cfg, None]
else:
get_interface_calls = [expected_cfg, expected_cfg, None, None]
with test.nested(
mock.patch.object(host.Host, 'get_guest', return_value=guest),
mock.patch.object(self.drvr.vif_driver, 'get_config',
return_value=expected_cfg),
# This is called multiple times in a retry loop so we use a
# side_effect to simulate the calls to stop the loop.
mock.patch.object(guest, 'get_interface_by_cfg',
side_effect=get_interface_calls),
mock.patch.object(domain, 'detachDeviceFlags'),
mock.patch('nova.virt.libvirt.driver.LOG.warning')
) as (
mock_get_guest, mock_get_config,
mock_get_interface, mock_detach_device_flags,
mock_warning
):
# run the detach method
self.drvr.detach_interface(self.context, instance, network_info[0])
# make our assertions
mock_get_guest.assert_called_once_with(instance)
mock_get_config.assert_called_once_with(
instance, network_info[0], test.MatchType(objects.ImageMeta),
test.MatchType(objects.Flavor), CONF.libvirt.virt_type,
self.drvr._host)
mock_get_interface.assert_has_calls(
[mock.call(expected_cfg) for x in range(len(get_interface_calls))])
if device_not_found:
mock_detach_device_flags.assert_not_called()
self.assertTrue(mock_warning.called)
else:
mock_detach_device_flags.assert_called_once_with(
expected_cfg.to_xml(), flags=expected_flags)
mock_warning.assert_not_called()
def test_detach_interface_with_running_instance(self):
self._test_detach_interface(
power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_interface_with_running_instance_device_not_found(self):
"""Tests that the interface is detached before we try to detach it.
"""
self._test_detach_interface(
power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE),
device_not_found=True)
def test_detach_interface_with_pause_instance(self):
self._test_detach_interface(
power_state.PAUSED,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_interface_with_shutdown_instance(self):
self._test_detach_interface(
power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
@mock.patch('nova.virt.libvirt.driver.LOG')
def test_detach_interface_device_not_found(self, mock_log):
# Asserts that we don't log an error when the interface device is not
# found on the guest after a libvirt error during detach.
instance = self._create_instance()
vif = _fake_network_info(self, 1)[0]
guest = mock.Mock(spec='nova.virt.libvirt.guest.Guest')
guest.get_power_state = mock.Mock()
self.drvr._host.get_guest = mock.Mock(return_value=guest)
error = fakelibvirt.libvirtError(
'no matching network device was found')
error.err = (fakelibvirt.VIR_ERR_OPERATION_FAILED,)
guest.detach_device = mock.Mock(side_effect=error)
# mock out that get_interface_by_cfg doesn't find the interface
guest.get_interface_by_cfg = mock.Mock(return_value=None)
self.drvr.detach_interface(self.context, instance, vif)
# an error shouldn't be logged, but a warning should be logged
self.assertFalse(mock_log.error.called)
self.assertEqual(1, mock_log.warning.call_count)
self.assertIn('the device is no longer found on the guest',
six.text_type(mock_log.warning.call_args[0]))
def test_detach_interface_device_with_same_mac_address(self):
instance = self._create_instance()
network_info = _fake_network_info(self, 1)
domain = FakeVirtDomain(fake_xml="""
<domain type='kvm'>
<devices>
<interface type='bridge'>
<mac address='52:54:00:f6:35:8f'/>
<model type='virtio'/>
<source bridge='br0'/>
<target dev='tap12345678'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x03' function='0x0'/>
</interface>
<interface type='bridge'>
<mac address='52:54:00:f6:35:8f'/>
<model type='virtio'/>
<source bridge='br1'/>
<target dev='tap87654321'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x03' function='0x1'/>
</interface>
</devices>
</domain>""")
self.mox.StubOutWithMock(host.Host, '_get_domain')
self.mox.StubOutWithMock(self.drvr.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(domain, 'detachDeviceFlags')
self.mox.StubOutWithMock(domain, 'info')
host.Host._get_domain(instance).AndReturn(domain)
domain.info().AndReturn([power_state.RUNNING, 1, 2, 3, 4])
expected = vconfig.LibvirtConfigGuestInterface()
expected.parse_str("""
<interface type='bridge'>
<mac address='52:54:00:f6:35:8f'/>
<model type='virtio'/>
<source bridge='br0'/>
<target dev='tap12345678'/>
</interface>""")
self.mox.StubOutWithMock(self.drvr.vif_driver, 'get_config')
self.drvr.vif_driver.get_config(
instance, network_info[0],
mox.IsA(objects.ImageMeta),
mox.IsA(objects.Flavor),
CONF.libvirt.virt_type,
self.drvr._host).AndReturn(expected)
expected_flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
domain.detachDeviceFlags(expected.to_xml(), flags=expected_flags)
self.mox.ReplayAll()
with mock.patch.object(libvirt_guest.Guest, 'get_interface_by_cfg',
side_effect=[expected, expected, None, None]):
self.drvr.detach_interface(self.context, instance, network_info[0])
self.mox.VerifyAll()
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
'_get_all_assigned_mediated_devices')
@mock.patch('nova.virt.libvirt.utils.write_to_file')
# NOTE(mdbooth): The following 4 mocks are required to execute
# get_guest_xml().
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata')
@mock.patch('nova.privsep.utils.supports_direct_io')
@mock.patch('nova.api.metadata.base.InstanceMetadata')
def _test_rescue(self, instance,
mock_instance_metadata, mock_supports_direct_io,
mock_build_device_metadata, mock_set_host_enabled,
mock_write_to_file,
mock_get_mdev,
exists=None):
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
mock_build_device_metadata.return_value = None
mock_supports_direct_io.return_value = True
mock_get_mdev.return_value = {uuids.mdev1: uuids.inst1}
backend = self.useFixture(
fake_imagebackend.ImageBackendFixture(exists=exists))
image_meta = objects.ImageMeta.from_dict(
{'id': uuids.image_id, 'name': 'fake'})
network_info = _fake_network_info(self, 1)
rescue_password = 'fake_password'
domain_xml = [None]
def fake_create_domain(xml=None, domain=None, power_on=True,
pause=False, post_xml_callback=None):
domain_xml[0] = xml
if post_xml_callback is not None:
post_xml_callback()
with mock.patch.object(
self.drvr, '_create_domain',
side_effect=fake_create_domain) as mock_create_domain:
self.drvr.rescue(self.context, instance,
network_info, image_meta, rescue_password)
self.assertTrue(mock_create_domain.called)
return backend, etree.fromstring(domain_xml[0])
def test_rescue(self):
instance = self._create_instance({'config_drive': None})
backend, doc = self._test_rescue(instance)
# Assert that we created the expected set of disks, and no others
self.assertEqual(['disk.rescue', 'kernel.rescue', 'ramdisk.rescue'],
sorted(backend.created_disks.keys()))
disks = backend.disks
kernel_ramdisk = [disks[name + '.rescue']
for name in ('kernel', 'ramdisk')]
# Assert that kernel and ramdisk were both created as raw
for disk in kernel_ramdisk:
self.assertEqual('raw', disk.image_type)
# Assert that the root rescue disk was created as the default type
self.assertIsNone(disks['disk.rescue'].image_type)
# We expect the generated domain to contain disk.rescue and
# disk, in that order
expected_domain_disk_paths = [disks[name].path for name in
('disk.rescue', 'disk')]
domain_disk_paths = doc.xpath('devices/disk/source/@file')
self.assertEqual(expected_domain_disk_paths, domain_disk_paths)
# The generated domain xml should contain the rescue kernel
# and ramdisk
expected_kernel_ramdisk_paths = [os.path.join(CONF.instances_path,
disk.path) for disk
in kernel_ramdisk]
kernel_ramdisk_paths = \
doc.xpath('os/*[self::initrd|self::kernel]/text()')
self.assertEqual(expected_kernel_ramdisk_paths,
kernel_ramdisk_paths)
# The generated domain XML should also contain any existing mdev
self.assertEqual(
[uuids.mdev1],
doc.xpath("devices/*[@type='mdev']/source/address/@uuid"))
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder._make_iso9660')
def test_rescue_config_drive(self, mock_mkisofs):
instance = self._create_instance({'config_drive': str(True)})
backend, doc = self._test_rescue(
instance, exists=lambda name: name != 'disk.config.rescue')
# Assert that we created the expected set of disks, and no others
self.assertEqual(['disk.config.rescue', 'disk.rescue', 'kernel.rescue',
'ramdisk.rescue'],
sorted(backend.created_disks.keys()))
disks = backend.disks
config_disk = disks['disk.config.rescue']
kernel_ramdisk = [disks[name + '.rescue']
for name in ('kernel', 'ramdisk')]
# Assert that we imported the config disk
self.assertTrue(config_disk.import_file.called)
# Assert that the config disk, kernel and ramdisk were created as raw
for disk in [config_disk] + kernel_ramdisk:
self.assertEqual('raw', disk.image_type)
# Assert that the root rescue disk was created as the default type
self.assertIsNone(disks['disk.rescue'].image_type)
# We expect the generated domain to contain disk.rescue, disk, and
# disk.config.rescue in that order
expected_domain_disk_paths = [disks[name].path for name
in ('disk.rescue', 'disk',
'disk.config.rescue')]
domain_disk_paths = doc.xpath('devices/disk/source/@file')
self.assertEqual(expected_domain_disk_paths, domain_disk_paths)
# The generated domain xml should contain the rescue kernel
# and ramdisk
expected_kernel_ramdisk_paths = [os.path.join(CONF.instances_path,
disk.path)
for disk in kernel_ramdisk]
kernel_ramdisk_paths = \
doc.xpath('os/*[self::initrd|self::kernel]/text()')
self.assertEqual(expected_kernel_ramdisk_paths,
kernel_ramdisk_paths)
@mock.patch.object(libvirt_utils, 'get_instance_path')
@mock.patch.object(libvirt_utils, 'load_file')
@mock.patch.object(host.Host, '_get_domain')
def _test_unrescue(self, instance, mock_get_domain, mock_load_file,
mock_get_instance_path):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='block' device='disk'>"
"<source dev='/dev/some-vg/some-lv'/>"
"<target dev='vda' bus='virtio'/></disk>"
"</devices></domain>")
mock_get_instance_path.return_value = '/path'
fake_dom = FakeVirtDomain(fake_xml=dummyxml)
mock_get_domain.return_value = fake_dom
mock_load_file.return_value = "fake_unrescue_xml"
unrescue_xml_path = os.path.join('/path', 'unrescue.xml')
rescue_file = os.path.join('/path', 'rescue.file')
rescue_dir = os.path.join('/path', 'rescue.dir')
def isdir_sideeffect(*args, **kwargs):
if args[0] == '/path/rescue.file':
return False
if args[0] == '/path/rescue.dir':
return True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(libvirt_utils, 'write_to_file'),
mock.patch.object(drvr, '_destroy'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(os, 'unlink'),
mock.patch.object(shutil, 'rmtree'),
mock.patch.object(os.path, "isdir",
side_effect=isdir_sideeffect),
mock.patch.object(drvr, '_lvm_disks',
return_value=['lvm.rescue']),
mock.patch.object(lvm, 'remove_volumes'),
mock.patch.object(glob, 'iglob',
return_value=[rescue_file, rescue_dir])
) as (mock_write, mock_destroy, mock_create, mock_del,
mock_rmtree, mock_isdir, mock_lvm_disks,
mock_remove_volumes, mock_glob):
drvr.unrescue(instance, None)
mock_destroy.assert_called_once_with(instance)
mock_create.assert_called_once_with("fake_unrescue_xml",
fake_dom)
self.assertEqual(2, mock_del.call_count)
self.assertEqual(unrescue_xml_path,
mock_del.call_args_list[0][0][0])
self.assertEqual(1, mock_rmtree.call_count)
self.assertEqual(rescue_dir, mock_rmtree.call_args_list[0][0][0])
self.assertEqual(rescue_file, mock_del.call_args_list[1][0][0])
mock_remove_volumes.assert_called_once_with(['lvm.rescue'])
def test_unrescue(self):
instance = objects.Instance(uuid=uuids.instance, id=1)
self._test_unrescue(instance)
@mock.patch.object(rbd_utils.RBDDriver, '_destroy_volume')
@mock.patch.object(rbd_utils.RBDDriver, '_disconnect_from_rados')
@mock.patch.object(rbd_utils.RBDDriver, '_connect_to_rados')
@mock.patch.object(rbd_utils, 'rbd')
@mock.patch.object(rbd_utils, 'rados')
def test_unrescue_rbd(self, mock_rados, mock_rbd, mock_connect,
mock_disconnect, mock_destroy_volume):
self.flags(images_type='rbd', group='libvirt')
mock_connect.return_value = mock.MagicMock(), mock.MagicMock()
instance = objects.Instance(uuid=uuids.instance, id=1)
all_volumes = [uuids.other_instance + '_disk',
uuids.other_instance + '_disk.rescue',
instance.uuid + '_disk',
instance.uuid + '_disk.rescue']
mock_rbd.RBD.return_value.list.return_value = all_volumes
self._test_unrescue(instance)
mock_destroy_volume.assert_called_once_with(
mock.ANY, instance.uuid + '_disk.rescue')
@mock.patch('shutil.rmtree')
@mock.patch('os.rename')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files(self, mock_get_instance_path,
mock_exists, mock_rename,
mock_shutil):
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid=uuids.instance, id=1)
mock_exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
mock_get_instance_path.assert_called_with(instance)
mock_rename.assert_called_with('/path', '/path_del')
mock_shutil.assert_called_with('/path_del')
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('os.rename')
@mock.patch('os.path.exists')
@mock.patch('os.kill')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_kill_running(
self, mock_get_instance_path, mock_kill, mock_exists,
mock_rename, mock_shutil):
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid=uuids.instance, id=1)
self.drvr.job_tracker.jobs[instance.uuid] = [3, 4]
mock_exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
mock_get_instance_path.assert_called_with(instance)
mock_rename.assert_called_with('/path', '/path_del')
mock_kill.assert_has_calls(
[mock.call(3, signal.SIGKILL), mock.call(3, 0),
mock.call(4, signal.SIGKILL), mock.call(4, 0)])
mock_shutil.assert_called_with('/path_del')
self.assertTrue(result)
self.assertNotIn(instance.uuid, self.drvr.job_tracker.jobs)
@mock.patch('shutil.rmtree')
@mock.patch('os.rename')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_resize(self, mock_get_instance_path,
mock_exists, mock_rename,
mock_shutil):
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid=uuids.instance, id=1)
mock_rename.side_effect = [Exception(), None]
mock_exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
mock_get_instance_path.assert_called_with(instance)
expected = [mock.call('/path', '/path_del'),
mock.call('/path_resize', '/path_del')]
self.assertEqual(expected, mock_rename.mock_calls)
mock_shutil.assert_called_with('/path_del')
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('os.rename')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_failed(self, mock_get_instance_path,
mock_exists, mock_rename,
mock_shutil):
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid=uuids.instance, id=1)
mock_exists.side_effect = [False, False, True, True]
result = self.drvr.delete_instance_files(instance)
mock_get_instance_path.assert_called_with(instance)
mock_rename.assert_called_with('/path', '/path_del')
mock_shutil.assert_called_with('/path_del')
self.assertFalse(result)
@mock.patch('shutil.rmtree')
@mock.patch('os.rename')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_mv_failed(self, mock_get_instance_path,
mock_exists, mock_rename,
mock_shutil):
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid=uuids.instance, id=1)
mock_rename.side_effect = Exception()
mock_exists.side_effect = [True, True]
result = self.drvr.delete_instance_files(instance)
mock_get_instance_path.assert_called_with(instance)
expected = [mock.call('/path', '/path_del'),
mock.call('/path_resize', '/path_del')] * 2
self.assertEqual(expected, mock_rename.mock_calls)
self.assertFalse(result)
@mock.patch('shutil.rmtree')
@mock.patch('os.rename')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_resume(self, mock_get_instance_path,
mock_exists, mock_rename,
mock_shutil):
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid=uuids.instance, id=1)
mock_rename.side_effect = Exception()
mock_exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
mock_get_instance_path.assert_called_with(instance)
expected = [mock.call('/path', '/path_del'),
mock.call('/path_resize', '/path_del')] * 2
self.assertEqual(expected, mock_rename.mock_calls)
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('os.rename')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_none(self, mock_get_instance_path,
mock_exists, mock_rename,
mock_shutil):
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid=uuids.instance, id=1)
mock_rename.side_effect = Exception()
mock_exists.side_effect = [False, False, False, False]
result = self.drvr.delete_instance_files(instance)
mock_get_instance_path.assert_called_with(instance)
expected = [mock.call('/path', '/path_del'),
mock.call('/path_resize', '/path_del')] * 2
self.assertEqual(expected, mock_rename.mock_calls)
self.assertEqual(0, len(mock_shutil.mock_calls))
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('os.rename')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_concurrent(self, mock_get_instance_path,
mock_exists, mock_rename,
mock_shutil):
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid=uuids.instance, id=1)
mock_rename.side_effect = [Exception(), Exception(), None]
mock_exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
mock_get_instance_path.assert_called_with(instance)
expected = [mock.call('/path', '/path_del'),
mock.call('/path_resize', '/path_del')]
expected.append(expected[0])
self.assertEqual(expected, mock_rename.mock_calls)
mock_shutil.assert_called_with('/path_del')
self.assertTrue(result)
def _assert_on_id_map(self, idmap, klass, start, target, count):
self.assertIsInstance(idmap, klass)
self.assertEqual(start, idmap.start)
self.assertEqual(target, idmap.target)
self.assertEqual(count, idmap.count)
def test_get_id_maps(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.virt_type = "lxc"
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(len(idmaps), 4)
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestUIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestUIDMap,
1, 20000, 10)
self._assert_on_id_map(idmaps[2],
vconfig.LibvirtConfigGuestGIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[3],
vconfig.LibvirtConfigGuestGIDMap,
1, 20000, 10)
def test_get_id_maps_not_lxc(self):
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(0, len(idmaps))
def test_get_id_maps_only_uid(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = []
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(2, len(idmaps))
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestUIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestUIDMap,
1, 20000, 10)
def test_get_id_maps_only_gid(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.uid_maps = []
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(2, len(idmaps))
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestGIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestGIDMap,
1, 20000, 10)
def test_instance_on_disk(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(uuid=uuids.instance, id=1)
self.assertFalse(drvr.instance_on_disk(instance))
def test_instance_on_disk_rbd(self):
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(uuid=uuids.instance, id=1)
self.assertTrue(drvr.instance_on_disk(instance))
def test_get_disk_xml(self):
dom_xml = """
<domain type="kvm">
<devices>
<disk type="file">
<source file="disk1_file"/>
<target dev="vda" bus="virtio"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type="block">
<source dev="/path/to/dev/1"/>
<target dev="vdb" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
"""
diska_xml = """<disk type="file" device="disk">
<source file="disk1_file"/>
<target bus="virtio" dev="vda"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>"""
diskb_xml = """<disk type="block" device="disk">
<source dev="/path/to/dev/1"/>
<target bus="virtio" dev="vdb"/>
</disk>"""
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
# NOTE(gcb): etree.tostring(node) returns an extra line with
# some white spaces, need to strip it.
actual_diska_xml = guest.get_disk('vda').to_xml()
self.assertEqual(diska_xml.strip(), actual_diska_xml.strip())
actual_diskb_xml = guest.get_disk('vdb').to_xml()
self.assertEqual(diskb_xml.strip(), actual_diskb_xml.strip())
self.assertIsNone(guest.get_disk('vdc'))
def test_vcpu_model_from_config(self):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
vcpu_model = drv._cpu_config_to_vcpu_model(None, None)
self.assertIsNone(vcpu_model)
cpu = vconfig.LibvirtConfigGuestCPU()
feature1 = vconfig.LibvirtConfigGuestCPUFeature()
feature2 = vconfig.LibvirtConfigGuestCPUFeature()
feature1.name = 'sse'
feature1.policy = fields.CPUFeaturePolicy.REQUIRE
feature2.name = 'aes'
feature2.policy = fields.CPUFeaturePolicy.REQUIRE
cpu.features = set([feature1, feature2])
cpu.mode = fields.CPUMode.CUSTOM
cpu.sockets = 1
cpu.cores = 2
cpu.threads = 4
vcpu_model = drv._cpu_config_to_vcpu_model(cpu, None)
self.assertEqual(fields.CPUMatch.EXACT, vcpu_model.match)
self.assertEqual(fields.CPUMode.CUSTOM, vcpu_model.mode)
self.assertEqual(4, vcpu_model.topology.threads)
self.assertEqual(set(['sse', 'aes']),
set([f.name for f in vcpu_model.features]))
cpu.mode = fields.CPUMode.HOST_MODEL
vcpu_model_1 = drv._cpu_config_to_vcpu_model(cpu, vcpu_model)
self.assertEqual(fields.CPUMode.HOST_MODEL, vcpu_model.mode)
self.assertEqual(vcpu_model, vcpu_model_1)
@mock.patch('nova.virt.disk.api.get_disk_size', return_value=10)
@mock.patch.object(lvm, 'get_volume_size', return_value=10)
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(dmcrypt, 'delete_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.unfilter_instance')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
@mock.patch.object(objects.Instance, 'save')
def test_cleanup_lvm_encrypted(self, mock_save, mock_undefine_domain,
mock_unfilter, mock_delete_volume,
mock_get_guest, mock_get_lvm_size,
mock_get_size):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(
uuid=uuids.instance, id=1,
ephemeral_key_uuid=uuids.ephemeral_key_uuid)
instance.system_metadata = {}
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
'block_device_mapping': []}
self.flags(images_type="lvm",
group='libvirt')
dom_xml = """
<domain type="kvm">
<devices>
<disk type="block">
<driver name='qemu' type='raw' cache='none'/>
<source dev="/dev/mapper/fake-dmcrypt"/>
<target dev="vda" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
"""
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
mock_get_guest.return_value = guest
drv.cleanup(self.context, instance, 'fake_network', destroy_vifs=False,
block_device_info=block_device_info)
mock_delete_volume.assert_called_once_with('/dev/mapper/fake-dmcrypt')
@mock.patch('nova.virt.disk.api.get_disk_size', return_value=10)
@mock.patch.object(lvm, 'get_volume_size', return_value=10)
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(dmcrypt, 'delete_volume')
def _test_cleanup_lvm(self, mock_delete_volume, mock_get_guest,
mock_lvm_size, mock_get_size, encrypted=False):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(
uuid=uuids.instance, id=1,
ephemeral_key_uuid=uuids.ephemeral_key_uuid)
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
'block_device_mapping': []}
dev_name = 'fake-dmcrypt' if encrypted else 'fake'
dom_xml = """
<domain type="kvm">
<devices>
<disk type="block">
<driver name='qemu' type='raw' cache='none'/>
<source dev="/dev/mapper/%s"/>
<target dev="vda" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
""" % dev_name
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
mock_get_guest.return_value = guest
drv._cleanup_lvm(instance, block_device_info)
if encrypted:
mock_delete_volume.assert_called_once_with(
'/dev/mapper/fake-dmcrypt')
else:
self.assertFalse(mock_delete_volume.called)
def test_cleanup_lvm(self):
self._test_cleanup_lvm()
def test_cleanup_encrypted_lvm(self):
self._test_cleanup_lvm(encrypted=True)
def test_vcpu_model_to_config(self):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
feature = objects.VirtCPUFeature(
policy=fields.CPUFeaturePolicy.REQUIRE, name='sse')
feature_1 = objects.VirtCPUFeature(
policy=fields.CPUFeaturePolicy.FORBID, name='aes')
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4)
vcpu_model = objects.VirtCPUModel(mode=fields.CPUMode.HOST_MODEL,
features=[feature, feature_1],
topology=topo)
cpu = drv._vcpu_model_to_cpu_config(vcpu_model)
self.assertEqual(fields.CPUMode.HOST_MODEL, cpu.mode)
self.assertEqual(1, cpu.sockets)
self.assertEqual(4, cpu.threads)
self.assertEqual(2, len(cpu.features))
self.assertEqual(set(['sse', 'aes']),
set([f.name for f in cpu.features]))
self.assertEqual(set([fields.CPUFeaturePolicy.REQUIRE,
fields.CPUFeaturePolicy.FORBID]),
set([f.policy for f in cpu.features]))
def test_trigger_crash_dump(self):
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
instance = objects.Instance(uuid=uuids.instance, id=1)
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=mock_guest):
self.drvr.trigger_crash_dump(instance)
def test_trigger_crash_dump_not_running(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'Requested operation is not valid: domain is not running',
error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.inject_nmi = mock.Mock(side_effect=ex)
instance = objects.Instance(uuid=uuids.instance, id=1)
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=mock_guest):
self.assertRaises(exception.InstanceNotRunning,
self.drvr.trigger_crash_dump, instance)
def test_trigger_crash_dump_not_supported(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.inject_nmi = mock.Mock(side_effect=ex)
instance = objects.Instance(uuid=uuids.instance, id=1)
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=mock_guest):
self.assertRaises(exception.TriggerCrashDumpNotSupported,
self.drvr.trigger_crash_dump, instance)
def test_trigger_crash_dump_unexpected_error(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'UnexpectedError',
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.inject_nmi = mock.Mock(side_effect=ex)
instance = objects.Instance(uuid=uuids.instance, id=1)
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=mock_guest):
self.assertRaises(fakelibvirt.libvirtError,
self.drvr.trigger_crash_dump, instance)
@mock.patch.object(libvirt_driver.LOG, 'debug')
def test_get_volume_driver_invalid_connector_exception(self, mock_debug):
"""Tests that the driver doesn't fail to initialize if one of the
imported volume drivers raises InvalidConnectorProtocol from os-brick.
"""
# make a copy of the normal list and add a volume driver that raises
# the handled os-brick exception when imported.
libvirt_volume_drivers_copy = copy.copy(
libvirt_driver.libvirt_volume_drivers)
libvirt_volume_drivers_copy.append(
'invalid=nova.tests.unit.virt.libvirt.test_driver.'
'FakeInvalidVolumeDriver'
)
with mock.patch.object(libvirt_driver, 'libvirt_volume_drivers',
libvirt_volume_drivers_copy):
drvr = libvirt_driver.LibvirtDriver(
fake.FakeVirtAPI(), read_only=True
)
# make sure we didn't register the invalid volume driver
self.assertNotIn('invalid', drvr.volume_drivers)
# make sure we logged something
mock_debug.assert_called_with(
('Unable to load volume driver %s. '
'It is not supported on this host.'),
'nova.tests.unit.virt.libvirt.test_driver.FakeInvalidVolumeDriver'
)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._get_mediated_devices')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._get_mdev_capable_devices')
def test_get_vgpu_total(self, get_mdev_devs, get_mdevs):
get_mdev_devs.return_value = [
{'dev_id': 'pci_0000_84_00_0',
'types': {'nvidia-11': {'availableInstances': 14,
'name': 'GRID M60-0B',
'deviceAPI': 'vfio-pci'},
}}]
get_mdevs.return_value = [
{'dev_id': 'mdev_4b20d080_1b54_4048_85b3_a6a62d165c01',
'uuid': "4b20d080-1b54-4048-85b3-a6a62d165c01",
'type': 'nvidia-11',
'iommuGroup': 1
},
{'dev_id': 'mdev_4b20d080_1b54_4048_85b3_a6a62d165c02',
'uuid': "4b20d080-1b54-4048-85b3-a6a62d165c02",
'type': 'nvidia-11',
'iommuGroup': 1
},
]
# By default, no specific types are supported
self.assertEqual(0, self.drvr._get_vgpu_total())
# Now, ask for only one
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
# We have 14 available for nvidia-11. We also have 2 mdevs of the type.
# So, as a total, we have 14+2, hence 16.
self.assertEqual(16, self.drvr._get_vgpu_total())
@mock.patch.object(host.Host, 'device_lookup_by_name')
@mock.patch.object(host.Host, 'list_mdev_capable_devices')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_MDEV_SUPPORT))
def test_get_mdev_capable_devices(self, _get_libvirt_version,
list_mdev_capable_devs,
device_lookup_by_name):
list_mdev_capable_devs.return_value = ['pci_0000_06_00_0']
def fake_nodeDeviceLookupByName(name):
return FakeNodeDevice(_fake_NodeDevXml[name])
device_lookup_by_name.side_effect = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
expected = [{"dev_id": "pci_0000_06_00_0",
"types": {'nvidia-11': {'availableInstances': 16,
'name': 'GRID M60-0B',
'deviceAPI': 'vfio-pci'},
}
}]
self.assertEqual(expected, drvr._get_mdev_capable_devices())
@mock.patch.object(host.Host, 'device_lookup_by_name')
@mock.patch.object(host.Host, 'list_mdev_capable_devices')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_MDEV_SUPPORT))
def test_get_mdev_capable_devices_filtering(self, _get_libvirt_version,
list_mdev_capable_devs,
device_lookup_by_name):
list_mdev_capable_devs.return_value = ['pci_0000_06_00_0']
def fake_nodeDeviceLookupByName(name):
return FakeNodeDevice(_fake_NodeDevXml[name])
device_lookup_by_name.side_effect = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Since we filter by a type not supported by the physical device,
# we don't get results.
self.assertEqual([],
drvr._get_mdev_capable_devices(types=['nvidia-12']))
@mock.patch.object(host.Host, 'device_lookup_by_name')
@mock.patch.object(host.Host, 'list_mediated_devices')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_MDEV_SUPPORT))
def test_get_mediated_devices(self, _get_libvirt_version,
list_mediated_devices,
device_lookup_by_name):
list_mediated_devices.return_value = [
'mdev_4b20d080_1b54_4048_85b3_a6a62d165c01']
def fake_nodeDeviceLookupByName(name):
return FakeNodeDevice(_fake_NodeDevXml[name])
device_lookup_by_name.side_effect = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
expected = [{"dev_id": "mdev_4b20d080_1b54_4048_85b3_a6a62d165c01",
"uuid": "4b20d080-1b54-4048-85b3-a6a62d165c01",
"type": "nvidia-11",
"iommu_group": 12
}]
self.assertEqual(expected, drvr._get_mediated_devices())
@mock.patch.object(host.Host, 'device_lookup_by_name')
@mock.patch.object(host.Host, 'list_mediated_devices')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_MDEV_SUPPORT))
def test_get_mediated_devices_filtering(self, _get_libvirt_version,
list_mediated_devices,
device_lookup_by_name):
list_mediated_devices.return_value = [
'mdev_4b20d080_1b54_4048_85b3_a6a62d165c01']
def fake_nodeDeviceLookupByName(name):
return FakeNodeDevice(_fake_NodeDevXml[name])
device_lookup_by_name.side_effect = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Since we filter by a type not supported by the physical device,
# we don't get results.
self.assertEqual([], drvr._get_mediated_devices(types=['nvidia-12']))
@mock.patch.object(host.Host, 'list_guests')
def test_get_all_assigned_mediated_devices(self, list_guests):
dom_with_vgpu = """
<domain type="kvm">
<devices>
<hostdev mode='subsystem' type='mdev' model='vfio-pci'>
<source>
<address uuid='%s'/>
</source>
</hostdev>
</devices>
</domain>
""" % uuids.mdev
guest1 = libvirt_guest.Guest(FakeVirtDomain())
guest2 = libvirt_guest.Guest(FakeVirtDomain(fake_xml=dom_with_vgpu))
list_guests.return_value = [guest1, guest2]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual({uuids.mdev: guest2.uuid},
drvr._get_all_assigned_mediated_devices())
@mock.patch.object(host.Host, 'get_guest')
def test_get_all_assigned_mediated_devices_for_an_instance(self,
get_guest):
dom_with_vgpu = """
<domain type="kvm">
<devices>
<hostdev mode='subsystem' type='mdev' model='vfio-pci'>
<source>
<address uuid='%s'/>
</source>
</hostdev>
</devices>
</domain>
""" % uuids.mdev
guest = libvirt_guest.Guest(FakeVirtDomain(fake_xml=dom_with_vgpu))
get_guest.return_value = guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_inst = objects.Instance()
self.assertEqual({uuids.mdev: guest.uuid},
drvr._get_all_assigned_mediated_devices(fake_inst))
get_guest.assert_called_once_with(fake_inst)
@mock.patch.object(host.Host, 'get_guest')
def test_get_all_assigned_mediated_devices_for_a_non_existing_instance(
self, get_guest):
get_guest.side_effect = exception.InstanceNotFound(instance_id='fake')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_inst = objects.Instance()
self.assertEqual({},
drvr._get_all_assigned_mediated_devices(fake_inst))
def test_allocate_mdevs_with_no_vgpu_allocations(self):
allocations = {
'rp1': {
'resources': {
# Just any resource class but VGPU
rc_fields.ResourceClass.VCPU: 1,
}
}
}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertIsNone(drvr._allocate_mdevs(allocations=allocations))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_existing_mdevs_not_assigned')
def test_allocate_mdevs_with_available_mdevs(self, get_unassigned_mdevs):
allocations = {
'rp1': {
'resources': {
rc_fields.ResourceClass.VGPU: 1,
}
}
}
get_unassigned_mdevs.return_value = set([uuids.mdev1])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual([uuids.mdev1],
drvr._allocate_mdevs(allocations=allocations))
@mock.patch.object(nova.privsep.libvirt, 'create_mdev')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_mdev_capable_devices')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_existing_mdevs_not_assigned')
def test_allocate_mdevs_with_no_mdevs_but_capacity(self,
unallocated_mdevs,
get_mdev_capable_devs,
privsep_create_mdev):
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
allocations = {
'rp1': {
'resources': {
rc_fields.ResourceClass.VGPU: 1,
}
}
}
unallocated_mdevs.return_value = set()
get_mdev_capable_devs.return_value = [
{"dev_id": "pci_0000_06_00_0",
"types": {'nvidia-11': {'availableInstances': 16,
'name': 'GRID M60-0B',
'deviceAPI': 'vfio-pci'},
}
}]
privsep_create_mdev.return_value = uuids.mdev1
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual([uuids.mdev1],
drvr._allocate_mdevs(allocations=allocations))
privsep_create_mdev.assert_called_once_with("0000:06:00.0",
'nvidia-11',
uuid=None)
@mock.patch.object(nova.privsep.libvirt, 'create_mdev')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_mdev_capable_devices')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_existing_mdevs_not_assigned')
def test_allocate_mdevs_with_no_gpu_capacity(self,
unallocated_mdevs,
get_mdev_capable_devs,
privsep_create_mdev):
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
allocations = {
'rp1': {
'resources': {
rc_fields.ResourceClass.VGPU: 1,
}
}
}
unallocated_mdevs.return_value = set()
# Mock the fact all possible mediated devices are created and all of
# them being assigned
get_mdev_capable_devs.return_value = [
{"dev_id": "pci_0000_06_00_0",
"types": {'nvidia-11': {'availableInstances': 0,
'name': 'GRID M60-0B',
'deviceAPI': 'vfio-pci'},
}
}]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.ComputeResourcesUnavailable,
drvr._allocate_mdevs, allocations=allocations)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_mediated_devices')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_all_assigned_mediated_devices')
def test_get_existing_mdevs_not_assigned(self, get_all_assigned_mdevs,
get_mediated_devices):
# mdev2 is assigned to instance1
get_all_assigned_mdevs.return_value = {uuids.mdev2: uuids.inst1}
# there is a total of 2 mdevs, mdev1 and mdev2
get_mediated_devices.return_value = [{'dev_id': 'mdev_some_uuid1',
'uuid': uuids.mdev1,
'type': 'nvidia-11',
'iommu_group': 1},
{'dev_id': 'mdev_some_uuid2',
'uuid': uuids.mdev2,
'type': 'nvidia-11',
'iommu_group': 1}]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Since mdev2 is assigned to inst1, only mdev1 is available
self.assertEqual(set([uuids.mdev1]),
drvr._get_existing_mdevs_not_assigned())
@mock.patch.object(nova.privsep.libvirt, 'create_mdev')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_mdev_capable_devices')
@mock.patch.object(os.path, 'exists')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_all_assigned_mediated_devices')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_MDEV_SUPPORT))
def test_recreate_mediated_device_on_init_host(
self, _get_libvirt_version,
get_all_assigned_mdevs, exists, get_mdev_capable_devs,
privsep_create_mdev):
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
get_all_assigned_mdevs.return_value = {uuids.mdev1: uuids.inst1,
uuids.mdev2: uuids.inst2}
# Fake the fact that mdev1 is existing but mdev2 not
def _exists(path):
# Just verify what we ask
self.assertIn('/sys/bus/mdev/devices/', path)
return True if uuids.mdev1 in path else False
exists.side_effect = _exists
get_mdev_capable_devs.return_value = [
{"dev_id": "pci_0000_06_00_0",
"types": {'nvidia-11': {'availableInstances': 16,
'name': 'GRID M60-0B',
'deviceAPI': 'vfio-pci'},
}
}]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host(host='foo')
privsep_create_mdev.assert_called_once_with(
"0000:06:00.0", 'nvidia-11', uuid=uuids.mdev2)
@mock.patch.object(libvirt_guest.Guest, 'detach_device')
def _test_detach_mediated_devices(self, side_effect, detach_device):
dom_with_vgpu = (
"""<domain> <devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='xxx'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</disk>
<hostdev mode='subsystem' type='mdev' managed='no'
model='vfio-pci'>
<source>
<address uuid='81db53c6-6659-42a0-a34c-1507fdc72983'/>
</source>
<alias name='hostdev0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05'
function='0x0'/>
</hostdev>
</devices></domain>""")
detach_device.side_effect = side_effect
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
guest = libvirt_guest.Guest(FakeVirtDomain(fake_xml=dom_with_vgpu))
drvr._detach_mediated_devices(guest)
return detach_device
def test_detach_mediated_devices(self):
def fake_detach_device(cfg_obj, **kwargs):
self.assertIsInstance(cfg_obj,
vconfig.LibvirtConfigGuestHostdevMDEV)
detach_mock = self._test_detach_mediated_devices(fake_detach_device)
detach_mock.assert_called_once_with(mock.ANY, live=True)
def test_detach_mediated_devices_raises_exc_unsupported(self):
exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, 'virDomainDetachDeviceFlags() failed',
error_code=fakelibvirt.VIR_ERR_CONFIG_UNSUPPORTED)
self.assertRaises(exception.InstanceFaultRollback,
self._test_detach_mediated_devices, exc)
def test_detach_mediated_devices_raises_exc(self):
exc = test.TestingException()
self.assertRaises(test.TestingException,
self._test_detach_mediated_devices, exc)
def test_cpu_traits_with_passthrough_mode(self):
"""Test getting CPU traits when cpu_mmode is 'host-passthrough', traits
are calculated from fakelibvirt's baseline CPU features.
"""
self.flags(cpu_mode='host-passthrough', group='libvirt')
self.assertTraitsEqual(['HW_CPU_X86_AESNI', 'HW_CPU_X86_VMX'],
self.drvr._get_cpu_traits())
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
def test_cpu_traits_with_mode_none(self, mock_baseline):
"""Test getting CPU traits when cpu_mode is 'none', traits are
calculated from _fake_qemu64_cpu_features.
"""
self.flags(cpu_mode='none', group='libvirt')
mock_baseline.return_value = _fake_qemu64_cpu_feature
self.assertTraitsEqual(['HW_CPU_X86_SSE', 'HW_CPU_X86_SVM',
'HW_CPU_X86_MMX', 'HW_CPU_X86_SSE2'],
self.drvr._get_cpu_traits())
mock_baseline.assert_called_with([u'''<cpu>
<arch>x86_64</arch>
<model>qemu64</model>
<vendor>Intel</vendor>
<topology sockets="1" cores="2" threads="1"/>
</cpu>
'''], 1)
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
def test_cpu_traits_with_mode_custom(self, mock_baseline):
"""Test getting CPU traits when cpu_mode is 'custom' and cpu_model is
'Broadwell-noTSX', traits are calculated from
_fake_broadwell_cpu_features.
"""
self.flags(cpu_mode='custom',
cpu_model='Broadwell-noTSX',
group='libvirt')
mock_baseline.return_value = _fake_broadwell_cpu_feature
self.assertTraitsEqual(
[
'HW_CPU_X86_BMI2',
'HW_CPU_X86_AVX2',
'HW_CPU_X86_BMI',
'HW_CPU_X86_AVX',
'HW_CPU_X86_AESNI',
'HW_CPU_X86_SSE42',
'HW_CPU_X86_SSE41',
'HW_CPU_X86_FMA3',
'HW_CPU_X86_SSSE3',
'HW_CPU_X86_CLMUL',
'HW_CPU_X86_SSE2',
'HW_CPU_X86_SSE',
'HW_CPU_X86_MMX'
], self.drvr._get_cpu_traits()
)
mock_baseline.assert_called_with([u'''<cpu>
<arch>x86_64</arch>
<model>Broadwell-noTSX</model>
<vendor>Intel</vendor>
<topology sockets="1" cores="2" threads="1"/>
</cpu>
'''], 1)
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
def test_cpu_traits_with_no_baseline_support(self, mock_baseline):
"""Test getting CPU traits when baseline call is not supported."""
self.flags(cpu_mode='none', group='libvirt')
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_baseline.side_effect = not_supported_exc
self.assertTraitsEqual([], self.drvr._get_cpu_traits())
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.getCapabilities')
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
def test_cpu_traits_on_s390x(self, mock_baseline, mock_cap):
"""Test getting CPU traits on s390x, baseline call is not supported on
the platform.
"""
self.flags(cpu_mode='none', group='libvirt')
mock_cap.return_value = """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>s390x</arch>
<topology sockets='1' cores='6' threads='1'/>
<pages unit='KiB' size='4' />
<pages unit='KiB' size='1024' />
</cpu>
</host>
</capabilities>
"""
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver: cannot'
' compute baseline CPU',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
missing_model_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'XML error: Missing CPU model name',
error_code=fakelibvirt.VIR_ERR_XML_ERROR)
# model the libvirt behavior on s390x
def mocked_baseline(cpu_xml, *args):
xml = cpu_xml[0]
if "<model>" in xml:
raise not_supported_exc
else:
raise missing_model_exc
mock_baseline.side_effect = mocked_baseline
self.assertTraitsEqual([], self.drvr._get_cpu_traits())
def test_cpu_traits_with_invalid_virt_type(self):
"""Test getting CPU traits when using a virt_type that doesn't support
the feature, only kvm and qemu supports reporting CPU traits.
"""
self.flags(cpu_mode='custom',
cpu_model='IvyBridge',
virt_type='lxc',
group='libvirt'
)
self.assertRaises(exception.Invalid, self.drvr._get_cpu_traits)
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.getCapabilities')
@mock.patch('nova.virt.libvirt.utils.cpu_features_to_traits')
def test_cpu_traits_with_mode_passthrough_and_extra_flags(
self, mock_to_traits, mock_cap):
"""Test if extra flags are accounted when cpu_mode is set to
host-passthrough.
"""
self.flags(cpu_mode='host-passthrough',
cpu_model_extra_flags='PCID',
group='libvirt')
mock_cap.return_value = """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>IvyBridge</arch>
<topology sockets='1' cores='2' threads='2'/>
<feature policy='require' name='erms'/>
<pages unit='KiB' size='4' />
<pages unit='KiB' size='1024' />
</cpu>
</host>
</capabilities>
"""
self.drvr._get_cpu_traits()
self.assertItemsEqual(['pcid', 'erms'], mock_to_traits.call_args[0][0])
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
@mock.patch('nova.virt.libvirt.utils.cpu_features_to_traits')
def test_cpu_traits_with_mode_custom_and_extra_flags(self, mock_to_traits,
mock_baseline):
"""Test if extra flags are accounted when cpu_mode is set to custom.
"""
self.flags(cpu_mode='custom',
cpu_model='IvyBridge',
cpu_model_extra_flags='PCID',
group='libvirt')
mock_baseline.return_value = """
<cpu mode='custom' match='exact'>
<model fallback='forbid'>IvyBridge</model>
<vendor>Intel</vendor>
<feature policy='require' name='erms'/>
<feature policy='require' name='pcid'/>
</cpu>
"""
self.drvr._get_cpu_traits()
mock_baseline.assert_called_with([u'''<cpu>
<arch>x86_64</arch>
<model>IvyBridge</model>
<vendor>Intel</vendor>
<topology sockets="1" cores="2" threads="1"/>
<feature name="pcid"/>
</cpu>
'''], 1)
self.assertItemsEqual(['pcid', 'erms'], mock_to_traits.call_args[0][0])
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
@mock.patch('nova.virt.libvirt.utils.cpu_features_to_traits')
def test_cpu_traits_with_mode_not_set_and_extra_flags(self, mock_to_traits,
mock_baseline):
"""Test if extra flags are accounted when cpu_mode is not set."""
self.flags(cpu_mode=None,
cpu_model_extra_flags='PCID',
virt_type='kvm',
group='libvirt'
)
mock_baseline.return_value = """
<cpu mode='custom' match='exact'>
<model fallback='forbid'>IvyBridge</model>
<vendor>Intel</vendor>
<feature policy='require' name='erms'/>
</cpu>
"""
self.drvr._get_cpu_traits()
self.assertItemsEqual(['pcid', 'erms'], mock_to_traits.call_args[0][0])
def test_cpu_traits_with_mode_none_and_invalid_virt_type(self):
"""Test case that cpu mode is none and virt_type is neither kvm nor
qemu.
"""
self.flags(cpu_mode='none',
virt_type='lxc',
group='libvirt')
self.assertIsNone(self.drvr._get_cpu_traits())
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.getCapabilities')
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
def test_cpu_traits_with_mode_none_on_power(self, mock_baseline, mock_cap):
"""Test case that cpu mode is none on Power machines."""
self.flags(cpu_mode='none', virt_type='kvm', group='libvirt')
mock_cap.return_value = '''
<capabilities>
<host>
<uuid>1f71d34a-7c89-45cf-95ce-3df20fc6b936</uuid>
<cpu>
<model>POWER8</model>
<vendor>IBM</vendor>
<arch>ppc64le</arch>
<topology sockets='1' cores='5' threads='1'/>
<pages unit='KiB' size='64'/>
</cpu>
</host>
</capabilities>
'''
mock_baseline.return_value = '''
<cpu>
<model>POWER8</model>
<vendor>IBM</vendor>
</cpu>
'''
self.drvr._get_cpu_traits()
mock_baseline.assert_called_with([u'''<cpu>
<arch>ppc64le</arch>
<model>POWER8</model>
<vendor>IBM</vendor>
<topology sockets="1" cores="5" threads="1"/>
</cpu>
'''], 1)
class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
def setUp(self):
super(LibvirtVolumeUsageTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.ins_ref = objects.Instance(
id=1729,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64'
)
# verify bootable volume device path also
self.bdms = [{'volume_id': 1,
'device_name': '/dev/vde'},
{'volume_id': 2,
'device_name': 'vda'}]
def test_get_all_volume_usage(self):
def fake_block_stats(instance_name, disk):
return (169, 688640, 0, 0, -1)
self.stubs.Set(self.drvr, 'block_stats', fake_block_stats)
vol_usage = self.drvr.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
expected_usage = [{'volume': 1,
'instance': self.ins_ref,
'rd_bytes': 688640, 'wr_req': 0,
'rd_req': 169, 'wr_bytes': 0},
{'volume': 2,
'instance': self.ins_ref,
'rd_bytes': 688640, 'wr_req': 0,
'rd_req': 169, 'wr_bytes': 0}]
self.assertEqual(vol_usage, expected_usage)
def test_get_all_volume_usage_device_not_found(self):
def fake_get_domain(self, instance):
raise exception.InstanceNotFound(instance_id="fakedom")
self.stubs.Set(host.Host, '_get_domain', fake_get_domain)
vol_usage = self.drvr.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
self.assertEqual(vol_usage, [])
class LibvirtNonblockingTestCase(test.NoDBTestCase):
"""Test libvirtd calls are nonblocking."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.flags(connection_uri="test:///default",
group='libvirt')
def test_connection_to_primitive(self):
# Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
drvr = libvirt_driver.LibvirtDriver('')
drvr.set_host_enabled = mock.Mock()
jsonutils.to_primitive(drvr._conn, convert_instances=True)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_tpool_execute_calls_libvirt(self, mock_svc):
conn = fakelibvirt.virConnect()
conn.is_expected = True
self.mox.StubOutWithMock(eventlet.tpool, 'execute')
eventlet.tpool.execute(
fakelibvirt.openAuth,
'test:///default',
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(conn)
eventlet.tpool.execute(
conn.domainEventRegisterAny,
None,
fakelibvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
mox.IgnoreArg(),
mox.IgnoreArg())
if hasattr(fakelibvirt.virConnect, 'registerCloseCallback'):
eventlet.tpool.execute(
conn.registerCloseCallback,
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
c = driver._get_connection()
self.assertTrue(c.is_expected)
class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
"""Tests for libvirtDriver.volume_snapshot_create/delete."""
def setUp(self):
super(LibvirtVolumeSnapshotTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.flags(instance_name_template='instance-%s')
# creating instance
self.inst = {}
self.inst['uuid'] = uuids.fake
self.inst['id'] = '1'
# system_metadata is needed for objects.Instance.image_meta conversion
self.inst['system_metadata'] = {}
# create domain info
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
# alternate domain info with network-backed snapshot chain
self.dom_netdisk_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2'/>
<source protocol='netfs' name='vol1/root.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='1'>
<driver name='qemu' type='qcow2'/>
<source protocol='netfs' name='vol1/snap.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='2'>
<driver name='qemu' type='qcow2'/>
<source protocol='netfs' name='vol1/snap-b.img'>
<host name='server1' port='24007'/>
</source>
<backingStore/>
</backingStore>
</backingStore>
<target dev='vdb' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
</devices>
</domain>
"""
# XML with netdisk attached, and 1 snapshot taken
self.dom_netdisk_xml_2 = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2'/>
<source protocol='netfs' name='vol1/snap.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='1'>
<driver name='qemu' type='qcow2'/>
<source protocol='netfs' name='vol1/root.img'>
<host name='server1' port='24007'/>
</source>
<backingStore/>
</backingStore>
<target dev='vdb' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
</devices>
</domain>
"""
self.create_info = {'type': 'qcow2',
'snapshot_id': '1234-5678',
'new_file': 'new-file'}
self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d'
self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162'
self.delete_info_1 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': None}
self.delete_info_2 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'other-snap.img'}
self.delete_info_3 = {'type': 'qcow2',
'file_to_merge': None,
'merge_target_file': None}
self.delete_info_netdisk = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'root.img'}
self.delete_info_invalid_type = {'type': 'made_up_type',
'file_to_merge': 'some_file',
'merge_target_file':
'some_other_file'}
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.'
'refresh_connection_info')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_and_instance')
def test_volume_refresh_connection_info(self,
mock_get_by_volume_and_instance,
mock_refresh_connection_info):
instance = objects.Instance(**self.inst)
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'instance_uuid': uuids.instance,
'device_name': '/dev/sdb',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': '{"fake": "connection_info"}'})
fake_bdm = objects.BlockDeviceMapping(self.c, **fake_bdm)
mock_get_by_volume_and_instance.return_value = fake_bdm
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
mock_get_by_volume_and_instance.assert_called_once_with(
self.c, self.volume_uuid, instance.uuid)
mock_refresh_connection_info.assert_called_once_with(self.c, instance,
self.drvr._volume_api, self.drvr)
def _test_volume_snapshot_create(self, quiesce=True, can_quiesce=True,
quiesce_required=False):
"""Test snapshot creation with file-based disk."""
self.flags(instance_name_template='instance-%s')
self.mox.StubOutWithMock(self.drvr._host, '_get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
if quiesce_required:
self.inst['system_metadata']['image_os_require_quiesce'] = True
instance = objects.Instance(**self.inst)
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="vdb" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = (snap_flags |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
can_quiesce_mock = mock.Mock()
if can_quiesce:
can_quiesce_mock.return_value = None
if quiesce:
domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q)
else:
# we can quiesce but snapshot with quiesce fails
domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q).\
AndRaise(fakelibvirt.libvirtError(
'quiescing failed, no qemu-ga'))
if not quiesce_required:
# quiesce is not required so try snapshot again without it
domain.snapshotCreateXML(snap_xml_src, flags=snap_flags)
else:
can_quiesce_mock.side_effect = exception.QemuGuestAgentNotEnabled
if not quiesce_required:
# quiesce is not required so try snapshot again without it
domain.snapshotCreateXML(snap_xml_src, flags=snap_flags)
self.drvr._can_quiesce = can_quiesce_mock
self.mox.ReplayAll()
guest = libvirt_guest.Guest(domain)
if quiesce_required and (not quiesce or not can_quiesce):
# If we can't quiesce but it's required by the image then we should
# fail.
if not quiesce:
# snapshot + quiesce failed which is a libvirtError
expected_error = fakelibvirt.libvirtError
else:
# quiesce is required but we can't do it
expected_error = exception.QemuGuestAgentNotEnabled
self.assertRaises(expected_error,
self.drvr._volume_snapshot_create,
self.c, instance, guest, self.volume_uuid,
new_file)
else:
self.drvr._volume_snapshot_create(self.c, instance, guest,
self.volume_uuid, new_file)
# instance.image_meta generates a new objects.ImageMeta object each
# time it's called so just use a mock.ANY for the image_meta arg.
can_quiesce_mock.assert_called_once_with(instance, mock.ANY)
self.mox.VerifyAll()
def test_volume_snapshot_create_libgfapi(self):
"""Test snapshot creation with libgfapi network disk."""
self.flags(instance_name_template = 'instance-%s')
self.mox.StubOutWithMock(self.drvr._host, '_get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source protocol='netfs' name='netfs1/volume-1234'>
<host name='127.3.4.5' port='24007'/>
</source>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
instance = objects.Instance(**self.inst)
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="vdb" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = (snap_flags |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q)
self.mox.ReplayAll()
guest = libvirt_guest.Guest(domain)
with mock.patch.object(self.drvr, '_can_quiesce', return_value=None):
self.drvr._volume_snapshot_create(self.c, instance, guest,
self.volume_uuid, new_file)
self.mox.VerifyAll()
def test_volume_snapshot_create_cannot_quiesce(self):
# We can't quiesce so we don't try.
self._test_volume_snapshot_create(can_quiesce=False)
def test_volume_snapshot_create_cannot_quiesce_quiesce_required(self):
# We can't quiesce but it's required so we fail.
self._test_volume_snapshot_create(can_quiesce=False,
quiesce_required=True)
def test_volume_snapshot_create_can_quiesce_quiesce_required_fails(self):
# We can quiesce but it fails and it's required so we fail.
self._test_volume_snapshot_create(
quiesce=False, can_quiesce=True, quiesce_required=True)
def test_volume_snapshot_create_noquiesce(self):
# We can quiesce but it fails but it's not required so we don't fail.
self._test_volume_snapshot_create(quiesce=False)
def test_volume_snapshot_create_noquiesce_cannot_quiesce(self):
# We can't quiesce so we don't try, and if we did we'd fail.
self._test_volume_snapshot_create(quiesce=False, can_quiesce=False)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_can_quiesce(self, ver):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = objects.ImageMeta.from_dict(
{"properties": {
"hw_qemu_guest_agent": "yes"}})
self.assertIsNone(self.drvr._can_quiesce(instance, image_meta))
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_can_quiesce_bad_hyp(self, ver):
self.flags(virt_type='lxc', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = objects.ImageMeta.from_dict(
{"properties": {
"hw_qemu_guest_agent": "yes"}})
self.assertRaises(exception.InstanceQuiesceNotSupported,
self.drvr._can_quiesce, instance, image_meta)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_can_quiesce_agent_not_enable(self, ver):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = objects.ImageMeta.from_dict({})
self.assertRaises(exception.QemuGuestAgentNotEnabled,
self.drvr._can_quiesce, instance, image_meta)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_volume_snapshot_create')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_volume_refresh_connection_info')
def test_volume_snapshot_create_outer_success(self, mock_refresh,
mock_snap_create, mock_loop):
class FakeLoopingCall(object):
def __init__(self, func):
self.func = func
def start(self, *a, **k):
try:
self.func()
except loopingcall.LoopingCallDone:
pass
return self
def wait(self):
return None
mock_loop.side_effect = FakeLoopingCall
instance = objects.Instance(**self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1)
guest = libvirt_guest.Guest(domain)
@mock.patch.object(self.drvr, '_volume_api')
@mock.patch.object(self.drvr._host, 'get_guest')
def _test(mock_get_guest, mock_vol_api):
mock_get_guest.return_value = guest
mock_vol_api.get_snapshot.return_value = {'status': 'available'}
self.drvr.volume_snapshot_create(self.c, instance,
self.volume_uuid,
self.create_info)
mock_get_guest.assert_called_once_with(instance)
mock_snap_create.assert_called_once_with(
self.c, instance, guest, self.volume_uuid,
self.create_info['new_file'])
mock_vol_api.update_snapshot_status.assert_called_once_with(
self.c, self.create_info['snapshot_id'], 'creating')
mock_vol_api.get_snapshot.assert_called_once_with(
self.c, self.create_info['snapshot_id'])
mock_refresh.assert_called_once_with(
self.c, instance, self.volume_uuid)
_test()
def test_volume_snapshot_create_outer_failure(self):
instance = objects.Instance(**self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1)
guest = libvirt_guest.Guest(domain)
self.mox.StubOutWithMock(self.drvr._host, 'get_guest')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create')
self.drvr._host.get_guest(instance).AndReturn(guest)
self.drvr._volume_snapshot_create(self.c,
instance,
guest,
self.volume_uuid,
self.create_info['new_file']).\
AndRaise(exception.NovaException('oops'))
self.drvr._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'error')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_create,
self.c,
instance,
self.volume_uuid,
self.create_info)
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
def test_volume_snapshot_delete_1(self, mock_is_job_complete):
"""Deleting newest snapshot -- blockRebase."""
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, '_get_domain')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.drvr._host._get_domain(instance).AndReturn(domain)
domain.blockRebase('vda', 'snap.img', 0, flags=0)
self.mox.ReplayAll()
# is_job_complete returns False when initially called, then True
mock_is_job_complete.side_effect = (False, True)
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
self.assertEqual(2, mock_is_job_complete.call_count)
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
def test_volume_snapshot_delete_relative_1(self, mock_is_job_complete):
"""Deleting newest snapshot -- blockRebase using relative flag"""
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
guest = libvirt_guest.Guest(domain)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_guest')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.drvr._host.get_guest(instance).AndReturn(guest)
domain.blockRebase('vda', 'snap.img', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
self.mox.ReplayAll()
# is_job_complete returns False when initially called, then True
mock_is_job_complete.side_effect = (False, True)
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
self.assertEqual(2, mock_is_job_complete.call_count)
def _setup_block_rebase_domain_and_guest_mocks(self, dom_xml):
mock_domain = mock.Mock(spec=fakelibvirt.virDomain)
mock_domain.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(mock_domain)
exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, 'virDomainBlockRebase() failed',
error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID)
mock_domain.blockRebase.side_effect = exc
return mock_domain, guest
@mock.patch.object(host.Host, "has_min_version",
mock.Mock(return_value=True))
@mock.patch("nova.virt.libvirt.guest.Guest.is_active",
mock.Mock(return_value=False))
@mock.patch('nova.virt.images.qemu_img_info',
return_value=mock.Mock(file_format="fake_fmt"))
@mock.patch('nova.utils.execute')
def test_volume_snapshot_delete_when_dom_not_running(self, mock_execute,
mock_qemu_img_info):
"""Deleting newest snapshot of a file-based image when the domain is
not running should trigger a blockRebase using qemu-img not libvirt.
In this test, we rebase the image with another image as backing file.
"""
mock_domain, guest = self._setup_block_rebase_domain_and_guest_mocks(
self.dom_xml)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=guest):
self.drvr._volume_snapshot_delete(self.c, instance,
self.volume_uuid, snapshot_id,
self.delete_info_1)
mock_qemu_img_info.assert_called_once_with("snap.img")
mock_execute.assert_called_once_with('qemu-img', 'rebase',
'-b', 'snap.img', '-F',
'fake_fmt', 'disk1_file')
@mock.patch.object(host.Host, "has_min_version",
mock.Mock(return_value=True))
@mock.patch("nova.virt.libvirt.guest.Guest.is_active",
mock.Mock(return_value=False))
@mock.patch('nova.virt.images.qemu_img_info',
return_value=mock.Mock(file_format="fake_fmt"))
@mock.patch('nova.utils.execute')
def test_volume_snapshot_delete_when_dom_not_running_and_no_rebase_base(
self, mock_execute, mock_qemu_img_info):
"""Deleting newest snapshot of a file-based image when the domain is
not running should trigger a blockRebase using qemu-img not libvirt.
In this test, the image is rebased onto no backing file (i.e.
it will exist independently of any backing file)
"""
mock_domain, mock_guest = (
self._setup_block_rebase_domain_and_guest_mocks(self.dom_xml))
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=mock_guest):
self.drvr._volume_snapshot_delete(self.c, instance,
self.volume_uuid, snapshot_id,
self.delete_info_3)
self.assertEqual(0, mock_qemu_img_info.call_count)
mock_execute.assert_called_once_with('qemu-img', 'rebase',
'-b', '', 'disk1_file')
@mock.patch.object(host.Host, "has_min_version",
mock.Mock(return_value=True))
@mock.patch("nova.virt.libvirt.guest.Guest.is_active",
mock.Mock(return_value=False))
def test_volume_snapshot_delete_when_dom_with_nw_disk_not_running(self):
"""Deleting newest snapshot of a network disk when the domain is not
running should raise a NovaException.
"""
mock_domain, mock_guest = (
self._setup_block_rebase_domain_and_guest_mocks(
self.dom_netdisk_xml))
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=mock_guest):
ex = self.assertRaises(exception.NovaException,
self.drvr._volume_snapshot_delete,
self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.assertIn('has not been fully tested', six.text_type(ex))
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
def test_volume_snapshot_delete_relative_2(self, mock_is_job_complete):
"""Deleting older snapshot -- blockCommit using relative flag"""
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, '_get_domain')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.drvr._host._get_domain(instance).AndReturn(domain)
domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
self.mox.ReplayAll()
# is_job_complete returns False when initially called, then True
mock_is_job_complete.side_effect = (False, True)
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_2)
self.mox.VerifyAll()
self.assertEqual(2, mock_is_job_complete.call_count)
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
def test_volume_snapshot_delete_nonrelative_null_base(
self, mock_is_job_complete):
# Deleting newest and last snapshot of a volume
# with blockRebase. So base of the new image will be null.
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
guest = libvirt_guest.Guest(domain)
mock_is_job_complete.return_value = True
with test.nested(
mock.patch.object(domain, 'XMLDesc', return_value=self.dom_xml),
mock.patch.object(self.drvr._host, 'get_guest',
return_value=guest),
mock.patch.object(domain, 'blockRebase'),
) as (mock_xmldesc, mock_get_guest, mock_rebase):
self.drvr._volume_snapshot_delete(self.c, instance,
self.volume_uuid, snapshot_id,
self.delete_info_3)
mock_xmldesc.assert_called_once_with(flags=0)
mock_get_guest.assert_called_once_with(instance)
mock_rebase.assert_called_once_with('vda', None, 0, flags=0)
mock_is_job_complete.assert_called()
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
def test_volume_snapshot_delete_netdisk_nonrelative_null_base(
self, mock_is_job_complete):
# Deleting newest and last snapshot of a network attached volume
# with blockRebase. So base of the new image will be null.
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_netdisk_xml_2)
guest = libvirt_guest.Guest(domain)
mock_is_job_complete.return_value = True
with test.nested(
mock.patch.object(domain, 'XMLDesc',
return_value=self.dom_netdisk_xml_2),
mock.patch.object(self.drvr._host, 'get_guest',
return_value=guest),
mock.patch.object(domain, 'blockRebase'),
) as (mock_xmldesc, mock_get_guest, mock_rebase):
self.drvr._volume_snapshot_delete(self.c, instance,
self.volume_uuid, snapshot_id,
self.delete_info_3)
mock_xmldesc.assert_called_once_with(flags=0)
mock_get_guest.assert_called_once_with(instance)
mock_rebase.assert_called_once_with('vdb', None, 0, flags=0)
mock_is_job_complete.assert_called()
def test_volume_snapshot_delete_outer_success(self):
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, '_get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete')
self.drvr._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1)
self.drvr._volume_api.update_snapshot_status(
self.c, snapshot_id, 'deleting')
self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info')
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
self.mox.ReplayAll()
self.drvr.volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_outer_failure(self):
instance = objects.Instance(**self.inst)
snapshot_id = '1234-9876'
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, '_get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete')
self.drvr._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1).\
AndRaise(exception.NovaException('oops'))
self.drvr._volume_api.update_snapshot_status(
self.c, snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_invalid_type(self):
instance = objects.Instance(**self.inst)
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, '_get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.drvr._volume_api.update_snapshot_status(
self.c, self.snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
self.snapshot_id,
self.delete_info_invalid_type)
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
def test_volume_snapshot_delete_netdisk_1(self, mock_is_job_complete):
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, '_get_domain')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.drvr._host._get_domain(instance).AndReturn(domain)
domain.blockRebase('vdb', 'vdb[1]', 0, flags=0)
self.mox.ReplayAll()
# is_job_complete returns False when initially called, then True
mock_is_job_complete.side_effect = (False, True)
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
self.assertEqual(2, mock_is_job_complete.call_count)
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
def test_volume_snapshot_delete_netdisk_relative_1(
self, mock_is_job_complete):
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, '_get_domain')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.drvr._host._get_domain(instance).AndReturn(domain)
domain.blockRebase('vdb', 'vdb[1]', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
self.mox.ReplayAll()
# is_job_complete returns False when initially called, then True
mock_is_job_complete.side_effect = (False, True)
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
self.assertEqual(2, mock_is_job_complete.call_count)
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
def test_volume_snapshot_delete_netdisk_relative_2(
self, mock_is_job_complete):
"""Delete older snapshot -- blockCommit for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, '_get_domain')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.drvr._host._get_domain(instance).AndReturn(domain)
domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
self.mox.ReplayAll()
# is_job_complete returns False when initially called, then True
mock_is_job_complete.side_effect = (False, True)
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_netdisk)
self.mox.VerifyAll()
self.assertEqual(2, mock_is_job_complete.call_count)
def _fake_convert_image(source, dest, in_format, out_format,
run_as_root=True):
libvirt_driver.libvirt_utils.files[dest] = b''
class _BaseSnapshotTests(test.NoDBTestCase):
def setUp(self):
super(_BaseSnapshotTests, self).setUp()
self.flags(snapshots_directory='./', group='libvirt')
self.context = context.get_admin_context()
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self)
self.mock_update_task_state = mock.Mock()
test_instance = _create_test_instance()
self.instance_ref = objects.Instance(**test_instance)
self.instance_ref.info_cache = objects.InstanceInfoCache(
network_info=None)
def _assert_snapshot(self, snapshot, disk_format,
expected_properties=None):
self.mock_update_task_state.assert_has_calls([
mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
mock.call(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)])
props = snapshot['properties']
self.assertEqual(props['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], disk_format)
self.assertEqual(snapshot['name'], 'test-snap')
if expected_properties:
for expected_key, expected_value in \
expected_properties.items():
self.assertEqual(expected_value, props[expected_key])
def _create_image(self, extra_properties=None):
properties = {'instance_id': self.instance_ref['id'],
'user_id': str(self.context.user_id)}
if extra_properties:
properties.update(extra_properties)
sent_meta = {'name': 'test-snap',
'is_public': False,
'status': 'creating',
'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = self.image_service.create(self.context, sent_meta)
return recv_meta
@mock.patch.object(host.Host, 'has_min_version')
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
@mock.patch.object(host.Host, '_get_domain')
def _snapshot(self, image_id, mock_get_domain, mock_resolve, mock_version):
mock_get_domain.return_value = FakeVirtDomain()
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
driver.snapshot(self.context, self.instance_ref, image_id,
self.mock_update_task_state)
snapshot = self.image_service.show(self.context, image_id)
return snapshot
def _test_snapshot(self, disk_format, extra_properties=None):
recv_meta = self._create_image(extra_properties=extra_properties)
snapshot = self._snapshot(recv_meta['id'])
self._assert_snapshot(snapshot, disk_format=disk_format,
expected_properties=extra_properties)
class LibvirtSnapshotTests(_BaseSnapshotTests):
def setUp(self):
super(LibvirtSnapshotTests, self).setUp()
# All paths through livesnapshot trigger a chown behind privsep
self.privsep_chown = mock.patch.object(nova.privsep.path, 'chown')
self.addCleanup(self.privsep_chown.stop)
self.privsep_chown.start()
def test_ami(self):
# Assign different image_ref from nova/images/fakes for testing ami
self.instance_ref.image_ref = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
self.instance_ref.system_metadata = \
utils.get_system_metadata_from_image(
{'disk_format': 'ami'})
self._test_snapshot(disk_format='ami')
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='raw')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
def test_raw(self, mock_convert_image):
self._test_snapshot(disk_format='raw')
def test_qcow2(self):
self._test_snapshot(disk_format='qcow2')
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='ploop')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
def test_ploop(self, mock_convert_image):
self._test_snapshot(disk_format='ploop')
def test_no_image_architecture(self):
self.instance_ref.image_ref = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
self._test_snapshot(disk_format='qcow2')
def test_no_original_image(self):
self.instance_ref.image_ref = '661122aa-1234-dede-fefe-babababababa'
self._test_snapshot(disk_format='qcow2')
def test_snapshot_metadata_image(self):
# Assign an image with an architecture defined (x86_64)
self.instance_ref.image_ref = 'a440c04b-79fa-479c-bed1-0b816eaec379'
extra_properties = {'architecture': 'fake_arch',
'key_a': 'value_a',
'key_b': 'value_b',
'os_type': 'linux'}
self._test_snapshot(disk_format='qcow2',
extra_properties=extra_properties)
@mock.patch.object(libvirt_driver.LOG, 'exception')
def test_snapshot_update_task_state_failed(self, mock_exception):
res = [None, exception.InstanceNotFound(instance_id='foo')]
self.mock_update_task_state.side_effect = res
self.assertRaises(exception.InstanceNotFound, self._test_snapshot,
disk_format='qcow2')
self.assertFalse(mock_exception.called)
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(host.Host, 'write_instance_config')
def test_failing_domain_not_found(self, mock_write_config, mock_get_guest):
self.flags(disable_libvirt_livesnapshot=False, group='workarounds')
mock_dev = mock.Mock(spec=libvirt_guest.BlockDevice)
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_guest.get_power_state.return_value = power_state.RUNNING
mock_guest.get_block_device.return_value = mock_dev
mock_guest._domain = mock.Mock()
mock_get_guest.return_value = mock_guest
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain",
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
mock_dev.rebase.side_effect = ex
self.assertRaises(exception.InstanceNotFound, self._test_snapshot,
disk_format='qcow2')
@mock.patch.object(rbd_utils, 'RBDDriver')
@mock.patch.object(rbd_utils, 'rbd')
def test_raw_with_rbd_clone(self, mock_rbd, mock_driver):
self.flags(images_type='rbd', group='libvirt')
rbd = mock_driver.return_value
rbd.parent_info = mock.Mock(return_value=['test-pool', '', ''])
rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd'])
with mock.patch.object(fake_libvirt_utils, 'find_disk',
return_value=('rbd://some/fake/rbd/image',
'raw')):
with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'):
self._test_snapshot(disk_format='raw')
rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool')
rbd.flatten.assert_called_with(mock.ANY, pool='test-pool')
@mock.patch.object(rbd_utils, 'RBDDriver')
@mock.patch.object(rbd_utils, 'rbd')
def test_raw_with_rbd_clone_graceful_fallback(self, mock_rbd, mock_driver):
self.flags(images_type='rbd', group='libvirt')
rbd = mock_driver.return_value
rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable(
image_id='fake_id', reason='rbd testing'))
with test.nested(
mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image),
mock.patch.object(fake_libvirt_utils, 'find_disk',
return_value=('rbd://some/fake/rbd/image',
'raw')),
mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')):
self._test_snapshot(disk_format='raw')
self.assertFalse(rbd.clone.called)
@mock.patch.object(rbd_utils, 'RBDDriver')
@mock.patch.object(rbd_utils, 'rbd')
def test_raw_with_rbd_clone_eperm(self, mock_rbd, mock_driver):
self.flags(images_type='rbd', group='libvirt')
rbd = mock_driver.return_value
rbd.parent_info = mock.Mock(return_value=['test-pool', '', ''])
rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd'])
rbd.clone = mock.Mock(side_effect=exception.Forbidden(
image_id='fake_id', reason='rbd testing'))
with test.nested(
mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image),
mock.patch.object(fake_libvirt_utils, 'find_disk',
return_value=('rbd://some/fake/rbd/image',
'raw')),
mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')):
self._test_snapshot(disk_format='raw')
# Ensure that the direct_snapshot attempt was cleaned up
rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=False,
pool='b', force=True)
@mock.patch.object(rbd_utils, 'RBDDriver')
@mock.patch.object(rbd_utils, 'rbd')
def test_raw_with_rbd_clone_post_process_fails(self, mock_rbd,
mock_driver):
self.flags(images_type='rbd', group='libvirt')
rbd = mock_driver.return_value
rbd.parent_info = mock.Mock(return_value=['test-pool', '', ''])
rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd'])
with test.nested(
mock.patch.object(fake_libvirt_utils, 'find_disk',
return_value=('rbd://some/fake/rbd/image',
'raw')),
mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'),
mock.patch.object(self.image_service, 'update',
side_effect=test.TestingException)):
self.assertRaises(test.TestingException, self._test_snapshot,
disk_format='raw')
rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool')
rbd.flatten.assert_called_with(mock.ANY, pool='test-pool')
# Ensure that the direct_snapshot attempt was cleaned up
rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=True,
pool='b', force=True)
@mock.patch.object(imagebackend.Image, 'direct_snapshot')
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(host.Host, 'get_guest')
def test_raw_with_rbd_clone_is_live_snapshot(self,
mock_get_guest,
mock_version,
mock_resolve,
mock_snapshot):
self.flags(disable_libvirt_livesnapshot=False, group='workarounds')
self.flags(images_type='rbd', group='libvirt')
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_guest._domain = mock.Mock()
mock_get_guest.return_value = mock_guest
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
recv_meta = self._create_image()
with mock.patch.object(driver, "suspend") as mock_suspend:
driver.snapshot(self.context, self.instance_ref, recv_meta['id'],
self.mock_update_task_state)
self.assertFalse(mock_suspend.called)
@mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image',
side_effect=_fake_convert_image)
@mock.patch.object(fake_libvirt_utils, 'find_disk')
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(rbd_utils, 'RBDDriver')
@mock.patch.object(rbd_utils, 'rbd')
def test_raw_with_rbd_clone_failure_does_cold_snapshot(self,
mock_rbd,
mock_driver,
mock_get_guest,
mock_version,
mock_resolve,
mock_find_disk,
mock_convert):
self.flags(disable_libvirt_livesnapshot=False, group='workarounds')
self.flags(images_type='rbd', group='libvirt')
rbd = mock_driver.return_value
rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable(
image_id='fake_id', reason='rbd testing'))
mock_find_disk.return_value = ('rbd://some/fake/rbd/image', 'raw')
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_guest.get_power_state.return_value = power_state.RUNNING
mock_guest._domain = mock.Mock()
mock_get_guest.return_value = mock_guest
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
recv_meta = self._create_image()
with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'):
with mock.patch.object(driver, "suspend") as mock_suspend:
driver.snapshot(self.context, self.instance_ref,
recv_meta['id'], self.mock_update_task_state)
self.assertTrue(mock_suspend.called)
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_cold_snapshot_based_on_power_state(
self, mock_version, mock_get_guest):
"""Tests that a cold snapshot is attempted because the guest power
state is SHUTDOWN or PAUSED.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
image = self._create_image()
for p_state in (power_state.SHUTDOWN, power_state.PAUSED):
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_guest.get_power_state.return_value = p_state
mock_guest._domain = mock.Mock()
mock_get_guest.return_value = mock_guest
# Make _prepare_domain_for_snapshot short-circuit and fail, we just
# want to know that it was called with the correct live_snapshot
# argument based on the power_state.
with mock.patch.object(
drvr, '_prepare_domain_for_snapshot',
side_effect=test.TestingException) as mock_prep:
self.assertRaises(test.TestingException,
drvr.snapshot, self.context,
self.instance_ref, image['id'],
self.mock_update_task_state)
mock_prep.assert_called_once_with(
self.context, False, p_state, self.instance_ref)
class LXCSnapshotTests(LibvirtSnapshotTests):
"""Repeat all of the Libvirt snapshot tests, but with LXC enabled"""
def setUp(self):
super(LXCSnapshotTests, self).setUp()
self.flags(virt_type='lxc', group='libvirt')
def test_raw_with_rbd_clone_failure_does_cold_snapshot(self):
self.skipTest("managedSave is not supported with LXC")
class LVMSnapshotTests(_BaseSnapshotTests):
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='lvm')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
@mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info')
def _test_lvm_snapshot(self, disk_format, mock_volume_info,
mock_convert_image):
self.flags(images_type='lvm',
images_volume_group='nova-vg', group='libvirt')
self._test_snapshot(disk_format=disk_format)
mock_volume_info.assert_has_calls([mock.call('/dev/nova-vg/lv')])
mock_convert_image.assert_called_once_with(
'/dev/nova-vg/lv', mock.ANY, 'raw', disk_format,
run_as_root=True)
def test_raw(self):
self._test_lvm_snapshot('raw')
def test_qcow2(self):
self.flags(snapshot_image_format='qcow2', group='libvirt')
self._test_lvm_snapshot('qcow2')
class TestLibvirtMultiattach(test.NoDBTestCase):
"""Libvirt driver tests for volume multiattach support."""
def setUp(self):
super(TestLibvirtMultiattach, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
@mock.patch('nova.virt.libvirt.host.Host.has_min_version',
return_value=True)
def test_init_host_supports_multiattach_new_enough_libvirt(self, min_ver):
"""Tests that the driver supports multiattach because libvirt>=3.10.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._set_multiattach_support()
self.assertTrue(drvr.capabilities['supports_multiattach'])
min_ver.assert_called_once_with(
lv_ver=libvirt_driver.MIN_LIBVIRT_MULTIATTACH)
@mock.patch('nova.virt.libvirt.host.Host.has_min_version',
side_effect=[False, False])
def test_init_host_supports_multiattach_old_enough_qemu(self, min_ver):
"""Tests that the driver supports multiattach because qemu<2.10.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._set_multiattach_support()
self.assertTrue(drvr.capabilities['supports_multiattach'])
calls = [mock.call(lv_ver=libvirt_driver.MIN_LIBVIRT_MULTIATTACH),
mock.call(hv_ver=(2, 10, 0))]
min_ver.assert_has_calls(calls)
# FIXME(mriedem): This test intermittently fails when run at the same time
# as LibvirtConnTestCase, presumably because of shared global state on the
# version check.
# @mock.patch('nova.virt.libvirt.host.Host.has_min_version',
# side_effect=[False, True])
# def test_init_host_supports_multiattach_no_support(self,
# has_min_version):
# """Tests that the driver does not support multiattach because
# qemu>=2.10 and libvirt<3.10.
# """
# drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
# drvr._set_multiattach_support()
# self.assertFalse(drvr.capabilities['supports_multiattach'])
# calls = [mock.call(lv_ver=libvirt_driver.MIN_LIBVIRT_MULTIATTACH),
# mock.call(hv_ver=(2, 10, 0))]
# has_min_version.assert_has_calls(calls)
| 45.843176 | 92 | 0.593375 |
4a23f60ae4d4981c154f86980c84b697fda1cf57 | 9,515 | py | Python | infdist/simulator/experiment/legacy/trial.py | zeroos/infdist | 5fca2c42bbe5ea650866a26568d1eaf240b2b47e | [
"MIT"
] | null | null | null | infdist/simulator/experiment/legacy/trial.py | zeroos/infdist | 5fca2c42bbe5ea650866a26568d1eaf240b2b47e | [
"MIT"
] | null | null | null | infdist/simulator/experiment/legacy/trial.py | zeroos/infdist | 5fca2c42bbe5ea650866a26568d1eaf240b2b47e | [
"MIT"
] | null | null | null | from copy import deepcopy
from optimization.agent import ( # NOQA
EstimatingAgent,
FixedRatioAgent,
FullCommAgent,
FullKnowledgeAgent,
GreedyConstrainedAgent,
)
from simulator.network import NS3Network
from simulator import simulator
from optimization import missions, simplesim
from optimization.models import MessageSet
class Trial:
def __init__(self, nodes_num, t_end, msgset):
self.nodes_num = nodes_num
self.t_end = t_end
self.msgset = msgset
self.agent_cls = FullCommAgent
self.agent_kwargs = {}
self.messages = None
self.ctx = None
self.net = None
self.agents = None
self.constraints = {}
self.now_func = simulator.now_float
self.network_data_rate = 5.5
def create_agent(self, i):
return self.agent_cls(i, self.net, self.ctx, self.now_func,
**self.agent_kwargs)
def agent_stats(self):
return {
agent: (
len(agent.received_messages),
self.ctx.utility(agent.received_messages).value(),
)
for agent in self.agents
}
def stats(self):
total_utility = self.ctx.utility(self.all_received_messages()).value()
no_duplicates = MessageSet(
self.all_received_messages().t_end,
list(set(self.all_received_messages().all())),
)
latencies = [
m.t_rcv - m.t_gen
for m in self.all_received_messages().all()
]
avg_latency = sum(latencies)/(len(latencies) or 1)
constraints = {}
for name, constraint in self.constraints.items():
constraints[name] = constraint(no_duplicates)
all_messages = deepcopy(self.messages)
simplesim.apply_latency(all_messages, 0)
return {
'all_messages': all_messages,
't_end': self.t_end,
'no_duplicates': no_duplicates,
'all_received_messages': self.all_received_messages(),
'received_num': sum(
[len(agent.received_messages) for agent in self.agents]
),
'sent_num': sum(
[len(agent.sent_messages) for agent in self.agents]
),
'sent_received_num': len(no_duplicates),
'total_utility': total_utility,
'normalized_utility': total_utility/len(self.agents)/self.t_end,
'total_messages': len(self.messages),
'constraints': constraints,
'max_utility': self.ctx.utility(all_messages).value(),
'avg_latency': avg_latency,
'agents_num': len(self.agents),
}
def all_generated_messages(self):
result = MessageSet(0, [])
for agent in self.agents:
result += agent.generated_messages
return result
def all_received_messages(self):
result = MessageSet(0, [])
for agent in self.agents:
result += agent.received_messages
return result
@staticmethod
def print_stats(stats):
print(
(
"Received # {}, sent: {}, "
"total utility: {}, "
"normalized utility: {}"
).format(
stats['received_num'],
stats['sent_num'],
stats['total_utility'],
stats['normalized_utility'],
)
)
print((
"Received {:.0f}% of all messages, "
"{:.0f}% of sent messages.").format(
stats['sent_received_num']/stats['total_messages']*100,
stats['sent_received_num']/(stats['sent_num'] or 1)*100,
))
print("AVG data rate: {:.3f} Mbps with avg latency of {}".format(
sum([m.size for m in stats['no_duplicates'].all()]) * 8 / 10**6
/ stats['t_end'],
stats['avg_latency'],
))
print("Max utility: {}".format(
stats['max_utility']
))
for name, constraint_violations in stats['constraints'].items():
if constraint_violations > 0:
print("!!! {} constraint NOT met ({} times)".format(
name, constraint_violations
))
def finish_mission(self):
real_t_end = simulator.now_float()
for a in self.agents:
a.finish_mission(real_t_end)
@staticmethod
def generate_messages_from_msgset(msgset, t_end, nodes_num):
msgset_type = msgset.get('type', '3D_reconstruction')
seed = msgset.get('seed', 0)
if msgset_type == '3D_reconstruction':
messages, ctx = \
missions.generate_simple_3D_reconstruction(
t_end,
msgset=msgset,
senders=set(range(nodes_num)),
seed=seed,
)
elif msgset_type == 'serialized':
messages = msgset['messages']
ctx = msgset['ctx']
return messages, ctx
def prepare_messages(self):
if self.messages is not None:
assert self.ctx is not None
return # already prepared
self.messages, self.ctx = self.generate_messages_from_msgset(
self.msgset, self.t_end, self.nodes_num,
)
def prepare_agents(self):
assert self.net is not None, "Network has to be prepared before agents"
if self.agents is not None:
return # already prepared
self.agents = [
self.create_agent(i)
for i in range(self.nodes_num)
]
def prepare_network(self):
if self.net is not None:
return # already prepared
self.net = NS3Network(self.nodes_num, self.network_data_rate)
def print_progress(self):
print(
f" {self.now_func():.02f}s "
f"({self.now_func()/self.t_end*100:.02f}%)",
end="\r"
)
def run(self):
self.prepare_messages()
self.prepare_network()
self.prepare_agents() # this has to be done after network
for i in range(self.nodes_num):
self.net.add_message_received_callback(
self.agents[i].gen_message_received_callback(),
i
)
for m in self.messages.all():
# print("Scheduling sending at {} by {}".format(m.t_gen, m.sender))
native_message = self.net.serialize(m)
agent = self.agents[m.sender]
simulator.schedule(m.t_gen, agent.gen_generate_message_callback(
native_message
))
simulator.schedule(
m.t_gen,
self.print_progress
)
simulator.schedule(self.t_end, self.finish_mission)
simulator.stop(self.t_end+1)
simulator.run()
class FixedRatioTrial(Trial):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.agent_cls = FixedRatioAgent
def set_drop_rate(self, drop_rate):
self.agent_kwargs = {'drop_ratio': drop_rate}
class GreedyTrial(Trial):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.agent_cls = GreedyConstrainedAgent
self.agent_kwargs = {
'constraints': {},
}
@property
def constraints(self):
return self.agent_kwargs['constraints']
@constraints.setter
def constraints(self, value):
self.agent_kwargs['constraints'] = value
class TreeTrial(Trial):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.prepare_messages()
self.agent_cls = EstimatingAgent
self.agent_kwargs = {
'agents': {
ident: lambda t: set()
for ident in range(self.nodes_num)
},
'constraints': {},
'window_size': 1000,
}
self.drop_rate_set = False
self.throughput_set = False
@property
def constraints(self):
return self.agent_kwargs['constraints']
@constraints.setter
def constraints(self, value):
self.agent_kwargs['constraints'] = value
def add_msgnum_constraint(self, messages_num, timeslot_length):
# self.agent_kwargs['window_size'] = timeslot_length
self.constraints = {
'MSGNUM': simplesim.create_msgnum_constraint_violations(
messages_num, timeslot_length
),
}
def add_throughput_constraint(self, throughput, timeslot_length):
# self.agent_kwargs['window_size'] = timeslot_length
self.constraints = {
'TPUT': simplesim.create_throughput_constraint_violations(
throughput, timeslot_length,
),
}
return self.constraints
def set_throughput(self, throughput):
return self.add_throughput_constraint(throughput, 2.5)
def set_drop_rate(self, drop_rate):
assert not self.drop_rate_set
timeslot_length = 2.5
avg_msgs_per_second = 1.5*len(self.messages)/self.t_end
self.add_msgnum_constraint(
(1-drop_rate)*(timeslot_length)*avg_msgs_per_second,
timeslot_length
)
def set_simulations_num(self, value):
self.agent_kwargs['simulations_num'] = value
def set_suppress_warnings(self, value):
self.agent_kwargs['suppress_warnings'] = value
| 31.611296 | 79 | 0.575617 |
4a23f63da5d360ccac4fa801eeba104c2f980dca | 11,701 | py | Python | pyLHD/OLHD.py | toledo60/pyLHD | 40df7f2015e06e9e1190cce49c68f17068b86070 | [
"MIT"
] | 1 | 2021-11-20T17:33:43.000Z | 2021-11-20T17:33:43.000Z | pyLHD/OLHD.py | toledo60/pyLHD | 40df7f2015e06e9e1190cce49c68f17068b86070 | [
"MIT"
] | 1 | 2021-11-20T18:43:01.000Z | 2021-11-20T20:04:48.000Z | pyLHD/OLHD.py | toledo60/pyLHD | 40df7f2015e06e9e1190cce49c68f17068b86070 | [
"MIT"
] | null | null | null | import numpy as np
import pyLHD
# --- Orthogonal Latin Hypercube Designs --- #
# --- Butler, N.A. (2001) Construction --- #
def OLHD_Butler01(nrows, ncols):
""" Orthogonal Latin Hypercube Design (OLHD). Based on the construction method of Butler (2001)
Args:
nrows (int): A positive integer specifying the number of rows
ncols (int): A postive integer specifying the number of columns
Raises:
ValueError: If ncols is not less than or equal to nrows
ValueError: If nrows is not greater than or equal to 3
ValueError: If nrows is not an odd prime number
Returns:
numpy.ndarray: A (nrows by ncols) orthogonal LHD
Examples:
Create an orthogonal LHD with nrows =11 and ncols =5
>>> pyLHD.OLHD_Butler01(nrows=11,ncols=5)
Create an orthogonal LHD with nrows =11 and ncols =5
>>> pyLHD.OLHD_Butler01(nrows=7,ncols=6)
"""
if ncols >= nrows:
raise ValueError("ncols must be less than or equal to nrows")
if nrows < 3:
raise ValueError("nrows must be greater than or equal to 3")
if (not pyLHD.is_prime(nrows) or nrows % 2 != 1):
raise ValueError("nrows must be an odd prime number")
n0 = int((nrows-1)/2)
rng = np.random.default_rng()
if ncols <= n0:
seq = np.arange(start=1, stop=n0+1)
g = rng.choice(seq, ncols, replace=False)
W = np.zeros((nrows, ncols))
for i in range(nrows):
for j in range(ncols):
if (nrows % 4 == 1):
W[i, j] = ((i+1)*g[j] + (nrows-1)/4) % nrows
if(nrows % 4 == 3):
W[i, j] = ((i+1) * g[j] + (3*nrows - 1)/4) % nrows
X = pyLHD.williams_transform(W)
else:
g0 = np.arange(start=1, stop=n0+1)
W0 = np.zeros((nrows, n0))
for i in range(nrows):
for j in range(n0):
if (nrows % 4 == 1):
W0[i, j] = ((i+1)*g0[j] + (nrows-1)/4) % nrows
if (nrows % 4 == 3):
W0[i, j] = ((i+1)*g0[j] + (3*nrows-1)/4) % nrows
X0 = pyLHD.williams_transform(W0)
r = ncols - n0
seq = np.arange(start=1, stop=n0+1)
g1 = rng.choice(seq, r, replace=False)
W1 = np.zeros((nrows, r))
for i in range(nrows):
for j in range(r):
W1[i, j] = ((i+1)*g1[j]) % nrows
X1 = pyLHD.williams_transform(W1)
X = np.column_stack((X0, X1))
return X
# --- Sun et al. (2010) Construction --- #
def OLHD_Sun10(C, r, type='odd'):
"""Orthogonal Latin Hypercube Design (OLHD). Based on the construction method of Sun et al. (2010)
Args:
C (int): A positve integer.
r (int): A positve integer.
type (str, optional): Run size of design, this can be either odd or even. Defaults to 'odd'.
If (type) is 'odd' the run size of the OLHD will be (r*2^(C+1)+1). If (type) is 'even' the run size of
the OLHD will be (r*2^(C+1))
Returns:
numpy.ndarray: An orthogonal LHD with the following run size: (r*2^(C+1)+1) if type ='odd', or (r*2^(C+1)) if type ='even'.
The resulting columns will be (2^(C))
Examples:
Create an orthogonal LHD with C=3, r=3, type = 'odd'
So n = (3*2^(3+1) )+1 = 49 (rows) and k=2^(3)=8 (columns)
>>> pyLHD.OLHD_Sun10(C=3,r=3,type='odd')
Create an orthogonal LHD with C=3, r=3, type = 'even'
So n = 3*2^(3+1) = 48 (rows) and k=2^(3)=8 (columns)
>>> pyLHD.OLHD_Sun10(C=3,r=3,type='even')
"""
Sc = np.array([[1, 1], [1, -1]])
Tc = np.array([[1, 2], [2, -1]])
if C >= 2:
counter = 2
while counter <= C:
Sc_star = Sc.copy()
Tc_star = Tc.copy()
index = int((Sc_star.shape[0])/2)
for i in range(index):
Sc_star[i, :] = -1*Sc_star[i, :]
Tc_star[i, :] = -1*Tc_star[i, :]
a = np.vstack((Sc, Sc))
b = np.vstack((-1*Sc_star, Sc_star))
c = np.vstack((Tc, Tc + Sc*2**(counter-1)))
d = np.vstack((-1*(Tc_star + Sc_star*2**(counter-1)), Tc_star))
Sc = np.hstack((a, b))
Tc = np.hstack((c, d))
counter = counter+1
if type == 'odd':
A = [Tc.copy() + Sc.copy()*(i)*2**(C) for i in range(r)]
A_vstack = np.vstack(A)
CP = np.zeros((1,2**C))
X = np.concatenate((A_vstack, CP, (-1)*A_vstack), axis=0)
if type == 'even':
Hc = Tc.copy() - Sc.copy()*0.5
B = [Hc + Sc.copy()*(i)*2**(C) for i in range(r)]
B_vstack = np.vstack(B)
X = np.vstack((B_vstack,-B_vstack))
return X
# --- Cioppa and Lucas (2007) Constuction --- #
def OLHD_Cioppa07(m):
"""Orthogonal Latin Hyercube Design. Based on the construction method of Cioppa and Lucas (2007)
Args:
m (int): A positive integer, and it must be greater than or equal to 2
Raises:
ValueError: If m is not greater than or equal to 2
Returns:
numpy.ndarray: An orthogonal LHD with the following run size: (n=2^m + 1) and
factor size: (k= m+ (m-1 choose 2))
Examples:
# Create an orthogonal LHD with m=4. So n=2^m+1=17 runs and k=4+3=7 factors
>>> pyLHD.OLHD_Cioppa07(m=4)
# Create an orthogonal LHD with m=5. So n=2^m+1=33 runs and k=5+7=11 factors
>>> pyLHD.OLHD_Cioppa07(m=5)
"""
if m < 2:
raise ValueError('m must be greater than or equal to 2')
q = 2**(m-1)
# construction of M starts
e = np.arange(1, q+1).reshape(-1,1)
I = np.eye(2)
R = np.array([[0,1],[1,0]])
AL = np.zeros((m-1,q,q)) #there are m-1 of AL's
if m==2:
AL[m-2] = R.copy()
M = np.hstack( (e, np.matmul(AL[m-2],e) ))
if m > 2:
for i in range(m-2):
a = 1
b = 1
for j in range(m-1-(i+1)):
a = np.kron(a,I)
for k in range(i+1):
b = np.kron(b,R)
AL[i] = np.kron(a,b)
c = 1
for l in range(m-1):
c = np.kron(c,R)
AL[m-2] = c.copy()
M = e.copy()
for i in range(m-1):
M = np.hstack( (M,np.matmul(AL[i],e) ) )
for i in range(m-1):
for j in range(i+1,m-1):
M= np.hstack((M,AL[i] @ AL[j] @ e))
# construction of M ends
# Construction of S starts
j = np.ones(q).reshape(-1,1)
ak = np.zeros((m-1,q,1))
B = np.ones((m-1,2,1))
if m==2:
B[m-2,0,:]=-1
ak[m-2] = B[0]
S = np.hstack((j,ak[m-2]))
if m > 2:
for i in range(m-1):
temp = B.copy()
temp[m-(i+2),0,:] = -1
d=1
for k in range(m-1):
d = np.kron(d,temp[k])
ak[i] = d.copy()
S = j.copy()
for i in range(m-1):
S = np.hstack((S,ak[i]))
for i in range(m-2):
for j in range(i+1,m-1):
S = np.hstack((S,ak[i]*ak[j]) )
# construction of S ends
# construction of T starts
if m==2:
T0 = np.zeros((q,2))
for i in range(q):
for k in range(2):
T0[i,k] = M[i,k] * S[i,k]
CP = np.zeros((1,2))
if m>2:
T0 = np.zeros((q,m+pyLHD.comb(m-1,2)))
for i in range(q):
for k in range(m+pyLHD.comb(m-1,2)):
T0[i,k] = M[i,k]*S[i,k]
# Construction of T ends
CP = np.zeros((1,m+pyLHD.comb(m-1,2)))
X = np.vstack((T0,CP,-T0))
return X
# --- Ye (1998) Constuction --- #
def OLHD_Ye98(m):
"""Orthogonal Latin Hyercube Design. Based on the construction method of Ye (1998)
Args:
m (int): A positive integer, and it must be greater than or equal to 2
Raises:
ValueError: If m is not greater than or equal to 2
Returns:
numpy.ndarray: An orthogonal LHD with the following run size: (n=2^m + 1) and
factor size: (k=2m-2)
Examples:
# Create an orthogonal LHD with m=4. So n=2^m+1=9 runs and k=2*m-2=4 factors
>>> pyLHD.OLHD_Ye98(m=3)
# Create an orthogonal LHD with m=5. So n=2^m+1=17 runs and k=2*m-2=6 factors
>>> pyLHD.OLHD_Ye98(m=4)
"""
if m < 2:
raise ValueError('m must be greater than or equal to 2')
rng = np.random.default_rng()
q = 2**(m-1)
# construction of M starts
e = rng.choice(np.arange(1,q+1),q,replace=False).reshape(-1,1)
I = np.eye(2)
R = np.array([[0,1],[1,0]])
AL = np.zeros((m-1,q,q)) #there are m-1 of AL's
if m==2:
AL[m-2] = R.copy()
M = np.hstack( (e, np.matmul(AL[m-2],e) ))
if m > 2:
for i in range(m-2):
a = 1
b = 1
for _ in range(m-1-(i+1)):
a = np.kron(a,I)
for _ in range(i+1):
b = np.kron(b,R)
AL[i] = np.kron(a,b)
c = 1
for _ in range(m-1):
c = np.kron(c,R)
AL[m-2] = c.copy()
M = e.copy()
for i in range(m-1):
M = np.hstack( (M,np.matmul(AL[i],e) ) )
for i in range(m-2):
M= np.hstack((M,AL[i] @ AL[m-2] @ e))
# construction of M ends
# Construction of S starts
j = np.ones(q).reshape(-1,1)
ak = np.zeros((m-1,q,1))
B = np.ones((m-1,2,1))
if m==2:
B[:,0,m-2]=-1
ak[m-2] = B[0]
S = np.hstack((j,ak[m-2]))
if m > 2:
for i in range(m-1):
temp = B.copy()
temp[m-(i+2),0,:] = -1
d=1
for k in range(m-1):
d = np.kron(d,temp[k])
ak[i] = d.copy()
S = j.copy()
for i in range(m-1):
S = np.hstack((S,ak[i]))
for i in range(1,m-1):
S = np.hstack((S,ak[0]*ak[i]) )
# construction of S ends
# construction of T starts
T0 = np.zeros((q,2*m-2))
for i in range(q):
for k in range(2*m-2):
T0[i,k] = M[i,k]*S[i,k]
# constuction of T ends
CP = np.zeros((1,2*m-2))
X = np.vstack((T0,CP,-T0))
return X
# --- Lin et al. (2009) Constuction --- #
def OLHD_Lin09(OLHD,OA):
"""Orthogonal Latin Hypercube Design. Based on the construction method of Lin et al. (2009)
Args:
OLHD ([type]): An orthogonal Latin hypercube design with run size (n) and factor size (p),
and it will be coupled with the input orthogonal array
OA ([type]): An orthogonal array, with (n^2) rows, (2f) columns, (n) symbols,
strength two and index unity is available, which can be denoted as OA(n^2,2f,n,2)
Returns:
numpy.ndarray: orthogonal Latin hypercube design with the following run size: (n^2)
and the following factor size: (2fp)
Examples:
# Create a 5 by 2 OLHD
>>> pyLHD.OLHD_example = OLHD_Cioppa07(m=2)
# Create an OA(25,6,5,2)
>>> OA_example = np.array([ [2,2,2,2,2,1],[2,1,5,4,3,5],
[3,2,1,5,4,5],[1,5,4,3,2,5],
[4,1,3,5,2,3],[1,2,3,4,5,2],
[1,3,5,2,4,3],[1,1,1,1,1,1],
[4,3,2,1,5,5],[5,5,5,5,5,1],
[4,4,4,4,4,1],[3,1,4,2,5,4],
[3,3,3,3,3,1],[3,5,2,4,1,3],
[3,4,5,1,2,2],[5,4,3,2,1,5],
[2,3,4,5,1,2],[2,5,3,1,4,4],
[1,4,2,5,3,4],[4,2,5,3,1,4],
[2,4,1,3,5,3],[5,3,1,4,2,4],
[5,2,4,1,3,3],[5,1,2,3,4,2],
[4,5,1,2,3,2] ])
# Construct a 25 by 12 OLHD
>>> pyLHD.OLHD_Lin09(OLHD = OLHD_example,OA = OA_example)
"""
n1 = OLHD.shape[0]
k = OLHD.shape[1]
n2 = np.unique(OA[:,0]).size
f = int(OA.shape[1]*0.5)
l = []
for i in range(k):
l.append(OA.copy())
A = np.stack(l)
M = np.zeros((k,n2**2,2*f))
V = np.array([[1,-n2],[n2,1]])
for i in range(k):
for j in range(n2):
for m in range(2*f):
location = np.where(A[i,:,m]==(j+1))
A[i,location,m] = OLHD[j,i]
M_list = []
for i in range(k):
for j in range(f):
M[i,:,2*(j+1)-2:2*(j+1)] = A[i,:,2*(j+1)-2:2*(j+1)] @ V
M_list.append(M[i])
return np.hstack(M_list) | 25.436957 | 129 | 0.514571 |
4a23f881d2c8835da2ee2d0fc26e99dd5a5a1b33 | 852 | py | Python | setup.py | lp6m/openvino2tensorflow | b540b89ee1a968a48219f4761e1d5e691293fd2c | [
"MIT"
] | null | null | null | setup.py | lp6m/openvino2tensorflow | b540b89ee1a968a48219f4761e1d5e691293fd2c | [
"MIT"
] | null | null | null | setup.py | lp6m/openvino2tensorflow | b540b89ee1a968a48219f4761e1d5e691293fd2c | [
"MIT"
] | null | null | null | from setuptools import setup, Extension
from setuptools import find_packages
from os import listdir
with open("README.md") as f:
long_description = f.read()
scripts = ["scripts/"+i for i in listdir("scripts")]
if __name__ == "__main__":
setup(
name="openvino2tensorflow",
scripts=scripts,
version="1.15.1",
description="This script converts the OpenVINO IR model to Tensorflow's saved_model, tflite, h5 and pb. in (NCHW) format",
long_description=long_description,
long_description_content_type="text/markdown",
author="Katsuya Hyodo",
author_email="[email protected]",
url="https://github.com/PINTO0309/openvino2tensorflow",
license="MIT License",
packages=find_packages(),
platforms=["linux", "unix"],
python_requires=">3.6",
)
| 32.769231 | 130 | 0.664319 |
4a23f8f66a144cf9f035040f95e52e169fd0802f | 923 | py | Python | fedml_core/distributed/communication/gRPC/grpc_server.py | lawsuisum/FedML | 2a7cacbf0b74307f9adedcceb3b0fb6f92c41067 | [
"Apache-2.0"
] | 1 | 2021-08-10T13:16:36.000Z | 2021-08-10T13:16:36.000Z | fedml_core/distributed/communication/gRPC/grpc_server.py | lawsuisum/FedML | 2a7cacbf0b74307f9adedcceb3b0fb6f92c41067 | [
"Apache-2.0"
] | null | null | null | fedml_core/distributed/communication/gRPC/grpc_server.py | lawsuisum/FedML | 2a7cacbf0b74307f9adedcceb3b0fb6f92c41067 | [
"Apache-2.0"
] | null | null | null | from FedML.fedml_core.distributed.communication.gRPC import grpc_comm_manager_pb2, grpc_comm_manager_pb2_grpc
import queue
import threading
lock = threading.Lock()
class GRPCCOMMServicer(grpc_comm_manager_pb2_grpc.gRPCCommManagerServicer):
def __init__(self, host, port, client_num, client_id):
self.host = host
self.port = port
self.client_num = client_num
self.client_id = client_id
self.message_q = queue.Queue()
def sendMessage(self, request, context):
print("client_{} got something from client_{}".format(
self.client_id,
request.client_id
))
response = grpc_comm_manager_pb2.CommResponse()
response.message = "message received"
lock.acquire()
self.message_q.put(request.message)
lock.release()
return response
def handleReceiveMessage(self, request, context):
pass | 32.964286 | 109 | 0.684724 |
4a23f93cc42b489cee8cc385866c9b22cb022077 | 1,694 | py | Python | app/tests/autopilot/Scriptor/tests/__init__.py | Funmungus/scriptor | 47f94211c36a12861f53db998c5d875c93714517 | [
"BSD-2-Clause"
] | null | null | null | app/tests/autopilot/Scriptor/tests/__init__.py | Funmungus/scriptor | 47f94211c36a12861f53db998c5d875c93714517 | [
"BSD-2-Clause"
] | null | null | null | app/tests/autopilot/Scriptor/tests/__init__.py | Funmungus/scriptor | 47f94211c36a12861f53db998c5d875c93714517 | [
"BSD-2-Clause"
] | null | null | null | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
"""Ubuntu Touch App Autopilot tests."""
import os
import logging
import Scriptor
from autopilot.testcase import AutopilotTestCase
from autopilot import logging as autopilot_logging
import ubuntuuitoolkit
from ubuntuuitoolkit import base
logger = logging.getLogger(__name__)
class BaseTestCase(AutopilotTestCase):
"""A common test case class
"""
local_location = os.path.dirname(os.path.dirname(os.getcwd()))
local_location_qml = os.path.join(local_location, 'Main.qml')
click_package = '{0}.{1}'.format('Scriptor', 'NewParadigmSoftware')
def setUp(self):
super(BaseTestCase, self).setUp()
self.launcher, self.test_type = self.get_launcher_and_type()
self.app = Scriptor.TouchApp(self.launcher(), self.test_type)
def get_launcher_and_type(self):
if os.path.exists(self.local_location_qml):
launcher = self.launch_test_local
test_type = 'local'
else:
launcher = self.launch_test_click
test_type = 'click'
return launcher, test_type
@autopilot_logging.log_action(logger.info)
def launch_test_local(self):
return self.launch_test_application(
base.get_qmlscene_launch_command(),
self.local_location_qml,
app_type='qt',
emulator_base=ubuntuuitoolkit.UbuntuUIToolkitCustomProxyObjectBase)
@autopilot_logging.log_action(logger.info)
def launch_test_click(self):
return self.launch_click_package(
self.click_package,
emulator_base=ubuntuuitoolkit.UbuntuUIToolkitCustomProxyObjectBase)
| 30.8 | 79 | 0.698347 |
4a23f9eb19e8d6995e8076ad3126f4c794247607 | 3,445 | py | Python | lizard_languages/python.py | sdrees/lizard | b95e6ad7e429fd80897cae9fa6dac4989168b280 | [
"MIT"
] | null | null | null | lizard_languages/python.py | sdrees/lizard | b95e6ad7e429fd80897cae9fa6dac4989168b280 | [
"MIT"
] | null | null | null | lizard_languages/python.py | sdrees/lizard | b95e6ad7e429fd80897cae9fa6dac4989168b280 | [
"MIT"
] | null | null | null | ''' Language parser for Python '''
from .code_reader import CodeReader, CodeStateMachine
from .script_language import ScriptLanguageMixIn
def count_spaces(token):
return len(token.replace('\t', ' ' * 8))
class PythonIndents: # pylint: disable=R0902
def __init__(self, context):
self.indents = [0]
self.context = context
def set_nesting(self, spaces, token = ""):
while self.indents[-1] > spaces and (not token.startswith(")")):
self.indents.pop()
self.context.pop_nesting()
if self.indents[-1] < spaces:
self.indents.append(spaces)
self.context.add_bare_nesting()
def reset(self):
self.set_nesting(0)
class PythonReader(CodeReader, ScriptLanguageMixIn):
ext = ['py']
language_names = ['python']
_conditions = set(['if', 'for', 'while', 'and', 'or',
'elif', 'except', 'finally'])
def __init__(self, context):
super(PythonReader, self).__init__(context)
self.parallel_states = [PythonStates(context, self)]
@staticmethod
def generate_tokens(source_code, addition='', token_class=None):
return ScriptLanguageMixIn.generate_common_tokens(
source_code,
r"|\'\'\'.*?\'\'\'" + r'|\"\"\".*?\"\"\"', token_class)
def preprocess(self, tokens):
indents = PythonIndents(self.context)
current_leading_spaces = 0
reading_leading_space = True
for token in tokens:
if token != '\n':
if reading_leading_space:
if token.isspace():
current_leading_spaces += count_spaces(token)
else:
if not token.startswith('#'):
current_function = self.context.current_function
if current_function.name == '*global*' or current_function.long_name.endswith(')'):
indents.set_nesting(current_leading_spaces, token)
reading_leading_space = False
else:
reading_leading_space = True
current_leading_spaces = 0
if not token.isspace() or token == '\n':
yield token
indents.reset()
class PythonStates(CodeStateMachine): # pylint: disable=R0903
def __init__(self, context, reader):
super(PythonStates, self).__init__(context)
self.reader = reader
def _state_global(self, token):
if token == 'def':
self._state = self._function
def _function(self, token):
if token != '(':
self.context.restart_new_function(token)
self.context.add_to_long_function_name("(")
else:
self._state = self._dec
def _dec(self, token):
if token == ')':
self._state = self._state_colon
else:
self.context.parameter(token)
return
self.context.add_to_long_function_name(" " + token)
def _state_colon(self, token):
if token == ':':
self.next(self._state_first_line)
else:
self.next(self._state_global)
def _state_first_line(self, token):
self._state = self._state_global
if token.startswith('"""') or token.startswith("'''"):
self.context.add_nloc(-token.count('\n') - 1)
self._state_global(token)
| 33.446602 | 111 | 0.573004 |
4a23fb6a4804cb008c3dd3cd7b5f581cc3350ae2 | 5,865 | py | Python | utils/XeThru_utils/xeX4Thru_software/ModuleConnector/Latest_MC_examples/PYTHON/x4m2x0_plot_sleep_csv_recording_file.py | ApocalyVec/mGesf | 21e0bf37a9d11a3cdde86a8d54e2f6c6a2211ab5 | [
"MIT"
] | 18 | 2020-06-02T11:21:47.000Z | 2022-03-25T08:16:57.000Z | PYTHON/x4m2x0_plot_sleep_csv_recording_file.py | S-B55/HeartConnect | f8a58b7edab18a04d8b10549846370e4c2e17086 | [
"MIT"
] | 4 | 2020-06-20T13:53:44.000Z | 2021-09-11T22:58:21.000Z | PYTHON/x4m2x0_plot_sleep_csv_recording_file.py | S-B55/HeartConnect | f8a58b7edab18a04d8b10549846370e4c2e17086 | [
"MIT"
] | 6 | 2020-04-23T21:30:17.000Z | 2021-08-03T19:59:12.000Z | #!/usr/bin/env python
""" \example x4m2x0_plot_sleep_csv_recording_file.py
Latest examples is located at https://github.com/xethru/XeThru_ModuleConnector_Examples or https://dev.azure.com/xethru/XeThruApps/_git/XeThru_ModuleConnector_Examples.
# Target:
# X4M200/X4M210 sleep recording file.
# Introduction:
# X4M200/X4M210 support sleep message output, which contain respiration and hear rate(only X4M210 support) information. The message can be record as sleep recording file, for example xethru_sleep_20190124_141808.csv.
# This script can plot sleep recording file to show the data for one periode, for example one night.
# Command to run:
$ python plot_vitalsigns_csv_recording_file.py --savefig --report xethru_sleep_xxx_xxx.csv
This generates a matplotlibplot which allows zooming and storing the plot as an image. The --savefig option stores the plot as a png image in the same folder as the csv file.
"""
from __future__ import division, print_function
from matplotlib import pyplot as plt
import matplotlib.dates as mdate
from numpy import loadtxt
import numpy as np
def get_log_header_nrow(fname, delimiter=';'):
"""Expects data to start after the first line starting with a letter
"""
from string import ascii_letters
startrow = 0
comments = ''
with open(fname, 'r') as f:
while 1:
line = f.readline().rstrip()
if line == '':
startrow = -1
break
startrow += 1
if line[0] in ascii_letters:
break
comments += line+'\n'
return startrow, line.split(delimiter), comments
def read_log(fname):
""" Reads a XeThru Respiration log file
Returns: dict with the log file values
"""
import dateutil
from matplotlib.dates import date2num
from collections import OrderedDict
delimiter = ";"
startrow, header, comments = get_log_header_nrow(fname, delimiter)
def datestr2num(x): return date2num(
dateutil.parser.parse(x, ignoretz=True))
data = loadtxt(fname, delimiter=delimiter,
skiprows=startrow, converters={0: datestr2num})
res = OrderedDict()
for ifield, name in enumerate(header):
res[name] = data[:, ifield]
res['comments'] = comments
return res
def get_stateness(states, statearray):
res = np.zeros(len(states))
for i in range(len(res)):
res[i] = sum(statearray == i)/len(statearray)
return res
def report(logfile, savefig=False):
"""!Read and plot a XeThru log file.
@param logfile: path to csv file
@param savefig: bool, saves a png image of the plotted result
@return: Figure object
"""
states = ['Breathing',
'Movement',
'Movement, tracking',
'NoMovement',
'Initializing',
'Error',
'Unknown']
# The following data is only valid in Breathing state:
breathing_state_data = ['ObjectDistance',
'ObjectMovement', 'SignalQuality']
sens = read_log(logfile)
# Remove data not to be plotted
timestamp = sens.pop('TimeStamp')
framecounter = sens.pop('FrameCounter', None)
comments = sens.pop('comments', None)
# Number of data sets to plot
M = len(sens.keys())
# Summarize time in each state
stateness = get_stateness(states, sens['State'])
sstateness = ''
for i in range(len(stateness)):
sstateness += "%s: %4.2f %% \n" % (states[i], stateness[i]*100)
fig, axs = plt.subplots(M, 1, sharex=True, figsize=(20, 12))
fig.suptitle(logfile)
for ikey, key in enumerate(sens.keys()):
ax = axs[ikey]
ax.set_title(key)
# Mask invalid data
if key in breathing_state_data:
data = np.ma.masked_where(sens['State'] > 2, sens[key])
else:
data = sens[key]
if key == 'RPM':
data = np.ma.masked_where(sens['State'] != 0, sens[key])
ax.plot_date(timestamp, data, color='#4B0082', fmt='-')
# Data specific plotting rules
if key == 'State':
locs = ax.set_yticks(range(len(states)))
labels = ax.set_yticklabels(states)
ax.text(0.9, 0, sstateness, transform=ax.transAxes)
if key == 'SignalQuality':
ax.set_ylabel("Signal Quality (0-10)")
ax.set_ylim(-0.1, 10.9)
ax.grid()
# ax.set_ylabel(key)
ax.set_xlabel("Time")
# xtick format string
date_fmt = '%H:%M:%S'
# Use a DateFormatter to set the data to the correct format.
date_formatter = mdate.DateFormatter(date_fmt)
ax.xaxis.set_major_formatter(date_formatter)
fig.autofmt_xdate()
plt.tight_layout()
plt.subplots_adjust(top=0.92)
if savefig:
fig.savefig(logfile+'.png')
return fig
def report_all(folder, savefig=False):
from glob import glob
logfiles = glob(folder+'/*.csv')
for logfile in logfiles:
report(logfile, savefig=savefig)
def main():
import argparse
parser = argparse.ArgumentParser(
description='XeThru Respiration log plotter')
parser.add_argument('--report', type=str,
help="Report measurement")
parser.add_argument('--report-all', type=str,
help="Report all measurements")
parser.add_argument('--savefig', action="store_true",
help="Save the figure")
args = parser.parse_args()
if args.report:
report(args.report, savefig=args.savefig)
plt.show()
elif args.report_all:
report_all(args.report_all, savefig=args.savefig)
else:
parser.parse_args(['-h'])
if __name__ == "__main__":
from matplotlib import pyplot as plt
plt.ion()
main()
plt.show()
#raw_input("[enter] to exit")
| 28.609756 | 216 | 0.632566 |
4a23fb70e2ef4f08400abb224a46c0eab18ca49f | 40,264 | py | Python | spotify/session.py | zapu/pyspotify | a11583f546ecff552d2028ba80f159005d74edd4 | [
"Apache-2.0"
] | 2 | 2016-11-18T08:49:26.000Z | 2018-05-14T13:27:19.000Z | spotify/session.py | zapu/pyspotify | a11583f546ecff552d2028ba80f159005d74edd4 | [
"Apache-2.0"
] | null | null | null | spotify/session.py | zapu/pyspotify | a11583f546ecff552d2028ba80f159005d74edd4 | [
"Apache-2.0"
] | 1 | 2021-06-24T15:57:12.000Z | 2021-06-24T15:57:12.000Z | from __future__ import unicode_literals
import logging
import weakref
import spotify
import spotify.connection
import spotify.player
import spotify.social
from spotify import ffi, lib, serialized, utils, ffi_callback_win
__all__ = [
'Session',
'SessionEvent',
]
logger = logging.getLogger(__name__)
class Session(utils.EventEmitter):
"""The Spotify session.
If no ``config`` is provided, the default config is used.
The session object will emit a number of events. See :class:`SessionEvent`
for a list of all available events and how to connect your own listener
functions up to get called when the events happens.
.. warning::
You can only have one :class:`Session` instance per process. This is a
libspotify limitation. If you create a second :class:`Session` instance
in the same process pyspotify will raise a :exc:`RuntimeError` with the
message "Session has already been initialized".
:param config: the session config
:type config: :class:`Config` or :class:`None`
"""
@serialized
def __init__(self, config=None):
super(Session, self).__init__()
if spotify._session_instance is not None:
raise RuntimeError('Session has already been initialized')
if config is not None:
self.config = config
else:
self.config = spotify.Config()
if self.config.application_key is None:
self.config.load_application_key_file()
sp_session_ptr = ffi.new('sp_session **')
spotify.Error.maybe_raise(lib.sp_session_create(
self.config._sp_session_config, sp_session_ptr))
self._sp_session = ffi.gc(sp_session_ptr[0], lib.sp_session_release)
self._cache = weakref.WeakValueDictionary()
self._emitters = []
self._callback_handles = set()
self.connection = spotify.connection.Connection(self)
self.offline = spotify.offline.Offline(self)
self.player = spotify.player.Player(self)
self.social = spotify.social.Social(self)
spotify._session_instance = self
_cache = None
"""A mapping from sp_* objects to their corresponding Python instances.
The ``_cached`` helper constructors on wrapper objects use this cache for
finding and returning existing alive wrapper objects for the sp_* object it
is about to create a wrapper for.
The cache *does not* keep objects alive. It's only a means for looking up
the objects if they are kept alive somewhere else in the application.
Internal attribute.
"""
_emitters = None
"""A list of event emitters with attached listeners.
When an event emitter has attached event listeners, we must keep the
emitter alive for as long as the listeners are attached. This is achieved
by adding them to this list.
When creating wrapper objects around sp_* objects we must also return the
existing wrapper objects instead of creating new ones so that the set of
event listeners on the wrapper object can be modified. This is achieved
with a combination of this list and the :attr:`_cache` mapping.
Internal attribute.
"""
_callback_handles = None
"""A set of handles returned by :meth:`spotify.ffi.new_handle`.
These must be kept alive for the handle to remain valid until the callback
arrives, even if the end user does not maintain a reference to the object
the callback works on.
Internal attribute.
"""
config = None
"""A :class:`Config` instance with the current configuration.
Once the session has been created, changing the attributes of this object
will generally have no effect.
"""
connection = None
"""An :class:`~spotify.connection.Connection` instance for controlling the
connection to the Spotify servers."""
offline = None
"""An :class:`~spotify.offline.Offline` instance for controlling offline
sync."""
player = None
"""A :class:`~spotify.player.Player` instance for controlling playback."""
social = None
"""A :class:`~spotify.social.Social` instance for controlling social
sharing."""
def login(self, username, password=None, remember_me=False, blob=None):
"""Authenticate to Spotify's servers.
You can login with one of two combinations:
- ``username`` and ``password``
- ``username`` and ``blob``
To get the ``blob`` string, you must once log in with ``username`` and
``password``. You'll then get the ``blob`` string passed to the
:attr:`~SessionCallbacks.credentials_blob_updated` callback.
If you set ``remember_me`` to :class:`True`, you can later login to the
same account without providing any ``username`` or credentials by
calling :meth:`relogin`.
"""
username = utils.to_char(username)
if password is not None:
password = utils.to_char(password)
blob = ffi.NULL
elif blob is not None:
password = ffi.NULL
blob = utils.to_char(blob)
else:
raise AttributeError('password or blob is required to login')
spotify.Error.maybe_raise(lib.sp_session_login(
self._sp_session, username, password, bool(remember_me), blob))
def logout(self):
"""Log out the current user.
If you logged in with the ``remember_me`` argument set to
:class:`True`, you will also need to call :meth:`forget_me` to
completely remove all credentials of the user that was logged in.
"""
spotify.Error.maybe_raise(lib.sp_session_logout(self._sp_session))
@property
def remembered_user_name(self):
"""The username of the remembered user from a previous :meth:`login`
call."""
return utils.get_with_growing_buffer(
lib.sp_session_remembered_user, self._sp_session)
def relogin(self):
"""Relogin as the remembered user.
To be able do this, you must previously have logged in with
:meth:`login` with the ``remember_me`` argument set to :class:`True`.
To check what user you'll be logged in as if you call this method, see
:attr:`remembered_user_name`.
"""
spotify.Error.maybe_raise(lib.sp_session_relogin(self._sp_session))
def forget_me(self):
"""Forget the remembered user from a previous :meth:`login` call."""
spotify.Error.maybe_raise(lib.sp_session_forget_me(self._sp_session))
@property
@serialized
def user(self):
"""The logged in :class:`User`."""
sp_user = lib.sp_session_user(self._sp_session)
if sp_user == ffi.NULL:
return None
return spotify.User(self, sp_user=sp_user, add_ref=True)
@property
@serialized
def user_name(self):
"""The username of the logged in user."""
return utils.to_unicode(lib.sp_session_user_name(self._sp_session))
@property
@serialized
def user_country(self):
"""The country of the currently logged in user.
The :attr:`~SessionEvent.OFFLINE_STATUS_UPDATED` event is emitted on
the session object when this changes.
"""
return utils.to_country(lib.sp_session_user_country(self._sp_session))
@property
@serialized
def playlist_container(self):
"""The :class:`PlaylistContainer` for the currently logged in user."""
sp_playlistcontainer = lib.sp_session_playlistcontainer(
self._sp_session)
if sp_playlistcontainer == ffi.NULL:
return None
return spotify.PlaylistContainer._cached(
self, sp_playlistcontainer, add_ref=True)
@property
def inbox(self):
"""The inbox :class:`Playlist` for the currently logged in user."""
sp_playlist = lib.sp_session_inbox_create(self._sp_session)
if sp_playlist == ffi.NULL:
return None
return spotify.Playlist._cached(
self, sp_playlist=sp_playlist, add_ref=False)
def set_cache_size(self, size):
"""Set maximum size in MB for libspotify's cache.
If set to 0 (the default), up to 10% of the free disk space will be
used."""
spotify.Error.maybe_raise(lib.sp_session_set_cache_size(
self._sp_session, size))
def flush_caches(self):
"""Write all cached data to disk.
libspotify does this regularly and on logout, so you should never need
to call this method yourself.
"""
spotify.Error.maybe_raise(
lib.sp_session_flush_caches(self._sp_session))
def preferred_bitrate(self, bitrate):
"""Set preferred :class:`Bitrate` for music streaming."""
spotify.Error.maybe_raise(lib.sp_session_preferred_bitrate(
self._sp_session, bitrate))
def preferred_offline_bitrate(self, bitrate, allow_resync=False):
"""Set preferred :class:`Bitrate` for offline sync.
If ``allow_resync`` is :class:`True` libspotify may resynchronize
already synced tracks.
"""
spotify.Error.maybe_raise(lib.sp_session_preferred_offline_bitrate(
self._sp_session, bitrate, allow_resync))
@property
def volume_normalization(self):
"""Whether volume normalization is active or not.
Set to :class:`True` or :class:`False` to change.
"""
return bool(lib.sp_session_get_volume_normalization(self._sp_session))
@volume_normalization.setter
def volume_normalization(self, value):
spotify.Error.maybe_raise(lib.sp_session_set_volume_normalization(
self._sp_session, value))
def process_events(self):
"""Process pending events in libspotify.
This method must be called for most callbacks to be called. Without
calling this method, you'll only get the callbacks that are called from
internal libspotify threads. When the
:attr:`~SessionEvent.NOTIFY_MAIN_THREAD` event is emitted (from an
internal libspotify thread), it's your job to make sure this method is
called (from the thread you use for accessing Spotify), so that further
callbacks can be triggered (from the same thread).
pyspotify provides an :class:`~spotify.EventLoop` that you can use for
processing events when needed.
"""
next_timeout = ffi.new('int *')
spotify.Error.maybe_raise(lib.sp_session_process_events(
self._sp_session, next_timeout))
return next_timeout[0]
def inbox_post_tracks(
self, canonical_username, tracks, message, callback=None):
"""Post a ``message`` and one or more ``tracks`` to the inbox of the
user with the given ``canonical_username``.
``tracks`` can be a single :class:`~spotify.Track` or a list of
:class:`~spotify.Track` objects.
Returns an :class:`InboxPostResult` that can be used to check if the
request completed successfully.
If callback isn't :class:`None`, it is called with an
:class:`InboxPostResult` instance when the request has completed.
"""
return spotify.InboxPostResult(
self, canonical_username, tracks, message, callback)
def get_starred(self, canonical_username=None):
"""Get the starred :class:`Playlist` for the user with
``canonical_username``.
If ``canonical_username`` isn't specified, the starred playlist for
the currently logged in user is returned.
"""
if canonical_username is None:
sp_playlist = lib.sp_session_starred_create(self._sp_session)
else:
sp_playlist = lib.sp_session_starred_for_user_create(
self._sp_session, utils.to_bytes(canonical_username))
if sp_playlist == ffi.NULL:
return None
return spotify.Playlist._cached(self, sp_playlist, add_ref=False)
def get_published_playlists(self, canonical_username=None):
"""Get the :class:`PlaylistContainer` of published playlists for the
user with ``canonical_username``.
If ``canonical_username`` isn't specified, the published container for
the currently logged in user is returned.
"""
if canonical_username is None:
canonical_username = ffi.NULL
else:
canonical_username = utils.to_bytes(canonical_username)
sp_playlistcontainer = (
lib.sp_session_publishedcontainer_for_user_create(
self._sp_session, canonical_username))
if sp_playlistcontainer == ffi.NULL:
return None
return spotify.PlaylistContainer._cached(
self, sp_playlistcontainer, add_ref=False)
def get_link(self, uri):
"""
Get :class:`Link` from any Spotify URI.
A link can be created from a string containing a Spotify URI on the
form ``spotify:...``.
Example::
>>> session = spotify.Session()
# ...
>>> session.get_link(
... 'spotify:track:2Foc5Q5nqNiosCNqttzHof')
Link('spotify:track:2Foc5Q5nqNiosCNqttzHof')
>>> session.get_link(
... 'http://open.spotify.com/track/4wl1dK5dHGp3Ig51stvxb0')
Link('spotify:track:4wl1dK5dHGp3Ig51stvxb0')
"""
return spotify.Link(self, uri=uri)
def get_track(self, uri):
"""
Get :class:`Track` from a Spotify track URI.
Example::
>>> session = spotify.Session()
# ...
>>> track = session.get_track(
... 'spotify:track:2Foc5Q5nqNiosCNqttzHof')
>>> track.load().name
u'Get Lucky'
"""
return spotify.Track(self, uri=uri)
def get_local_track(
self, artist=None, title=None, album=None, length=None):
"""
Get :class:`Track` for a local track.
Spotify's official clients supports adding your local music files to
Spotify so they can be played in the Spotify client. These are not
synced with Spotify's servers or between your devices and there is not
trace of them in your Spotify user account. The exception is when you
add one of these local tracks to a playlist or mark them as starred.
This creates a "local track" which pyspotify also will be able to
observe.
"Local tracks" can be recognized in several ways:
- The track's URI will be of the form
``spotify:local:ARTIST:ALBUM:TITLE:LENGTH_IN_SECONDS``. Any of the
parts in all caps can be left out if there is no information
available. That is, ``spotify:local::::`` is a valid local track URI.
- :attr:`Link.type` will be :class:`LinkType.LOCALTRACK` for the
track's link.
- :attr:`Track.is_local` will be :class:`True` for the track.
This method can be used to create local tracks that can be starred or
added to playlists.
``artist`` may be an artist name. ``title`` may be a track name.
``album`` may be an album name. ``length`` may be a track length in
milliseconds.
Note that when creating a local track you provide the length in
milliseconds, while the local track URI contains the length in seconds.
"""
if artist is None:
artist = ''
if title is None:
title = ''
if album is None:
album = ''
if length is None:
length = -1
artist = utils.to_char(artist)
title = utils.to_char(title)
album = utils.to_char(album)
sp_track = lib.sp_localtrack_create(artist, title, album, length)
return spotify.Track(self, sp_track=sp_track, add_ref=False)
def get_album(self, uri):
"""
Get :class:`Album` from a Spotify album URI.
Example::
>>> session = spotify.Session()
# ...
>>> album = session.get_album(
... 'spotify:album:6wXDbHLesy6zWqQawAa91d')
>>> album.load().name
u'Forward / Return'
"""
return spotify.Album(self, uri=uri)
def get_artist(self, uri):
"""
Get :class:`Artist` from a Spotify artist URI.
Example::
>>> session = spotify.Session()
# ...
>>> artist = session.get_artist(
... 'spotify:artist:22xRIphSN7IkPVbErICu7s')
>>> artist.load().name
u'Rob Dougan'
"""
return spotify.Artist(self, uri=uri)
def get_playlist(self, uri):
"""
Get :class:`Playlist` from a Spotify playlist URI.
Example::
>>> session = spotify.Session()
# ...
>>> playlist = session.get_playlist(
... 'spotify:user:fiat500c:playlist:54k50VZdvtnIPt4d8RBCmZ')
>>> playlist.load().name
u'500C feelgood playlist'
"""
return spotify.Playlist(self, uri=uri)
def get_user(self, uri):
"""
Get :class:`User` from a Spotify user URI.
Example::
>>> session = spotify.Session()
# ...
>>> user = session.get_user('spotify:user:jodal')
>>> user.load().display_name
u'jodal'
"""
return spotify.User(self, uri=uri)
def get_image(self, uri, callback=None):
"""
Get :class:`Image` from a Spotify image URI.
If ``callback`` isn't :class:`None`, it is expected to be a callable
that accepts a single argument, an :class:`Image` instance, when
the image is done loading.
Example::
>>> session = spotify.Session()
# ...
>>> image = session.get_image(
... 'spotify:image:a0bdcbe11b5cd126968e519b5ed1050b0e8183d0')
>>> image.load().data_uri[:50]
u'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEBLAEsAAD'
"""
return spotify.Image(self, uri=uri, callback=callback)
def search(
self, query, callback=None,
track_offset=0, track_count=20,
album_offset=0, album_count=20,
artist_offset=0, artist_count=20,
playlist_offset=0, playlist_count=20,
search_type=None):
"""
Search Spotify for tracks, albums, artists, and playlists matching
``query``.
The ``query`` string can be free format, or use some prefixes like
``title:`` and ``artist:`` to limit what to match on. There is no
official docs on the search query format, but there's a `Spotify blog
post
<https://www.spotify.com/blog/archives/2008/01/22/searching-spotify/>`_
from 2008 with some examples.
If ``callback`` isn't :class:`None`, it is expected to be a callable
that accepts a single argument, a :class:`Search` instance, when
the search completes.
The ``*_offset`` and ``*_count`` arguments can be used to retrieve more
search results. libspotify will currently not respect ``*_count``
values higher than 200, though this may change at any time as the limit
isn't documented in any official docs. If you want to retrieve more
than 200 results, you'll have to search multiple times with different
``*_offset`` values. See the ``*_total`` attributes on the
:class:`Search` to see how many results exists, and to figure out
how many searches you'll need to make to retrieve everything.
``search_type`` is a :class:`SearchType` value. It defaults to
:attr:`SearchType.STANDARD`.
Returns a :class:`Search` instance.
"""
return spotify.Search(
self, query=query, callback=callback,
track_offset=track_offset, track_count=track_count,
album_offset=album_offset, album_count=album_count,
artist_offset=artist_offset, artist_count=artist_count,
playlist_offset=playlist_offset, playlist_count=playlist_count,
search_type=search_type)
def get_toplist(
self, type=None, region=None, canonical_username=None,
callback=None):
"""Get a :class:`Toplist` of artists, albums, or tracks that are the
currently most popular worldwide or in a specific region.
``type`` is a :class:`ToplistType` instance that specifies the type of
toplist to create.
``region`` is either a :class:`ToplistRegion` instance, or a 2-letter
ISO 3166-1 country code as a unicode string, that specifies the
geographical region to create a toplist for.
If ``region`` is :attr:`ToplistRegion.USER` and ``canonical_username``
isn't specified, the region of the current user will be used. If
``canonical_username`` is specified, the region of the specified user
will be used instead.
If ``callback`` isn't :class:`None`, it is expected to be a callable
that accepts a single argument, a :class:`Toplist` instance, when the
toplist request completes.
Example::
>>> import spotify
>>> session = spotify.Session()
# ...
>>> toplist = session.get_toplist(
... type=spotify.ToplistType.TRACKS, region='US')
>>> toplist.load()
>>> len(toplist.tracks)
100
>>> len(toplist.artists)
0
>>> toplist.tracks[0]
Track(u'spotify:track:2dLLR6qlu5UJ5gk0dKz0h3')
"""
return spotify.Toplist(
self, type=type, region=region,
canonical_username=canonical_username, callback=callback)
class SessionEvent(object):
"""Session events.
Using the :class:`Session` object, you can register listener functions to
be called when various session related events occurs. This class enumerates
the available events and the arguments your listener functions will be
called with.
Example usage::
import spotify
def logged_in(session, error_type):
if error_type is spotify.ErrorType.OK:
print('Logged in as %s' % session.user)
else:
print('Login failed: %s' % error_type)
session = spotify.Session()
session.on(spotify.SessionEvent.LOGGED_IN, logged_in)
session.login('alice', 's3cret')
All events will cause debug log statements to be emitted, even if no
listeners are registered. Thus, there is no need to register listener
functions just to log that they're called.
"""
LOGGED_IN = 'logged_in'
"""Called when login has completed.
Note that even if login has succeeded, that does not mean that you're
online yet as libspotify may have cached enough information to let you
authenticate with Spotify while offline.
This event should be used to get notified about login errors. To get
notified about the authentication and connection state, refer to the
:attr:`SessionEvent.CONNECTION_STATE_UPDATED` event.
:param session: the current session
:type session: :class:`Session`
:param error_type: the login error type
:type error_type: :class:`ErrorType`
"""
LOGGED_OUT = 'logged_out'
"""Called when logout has completed or there is a permanent connection
error.
:param session: the current session
:type session: :class:`Session`
"""
METADATA_UPDATED = 'metadata_updated'
"""Called when some metadata has been updated.
There is no way to know what metadata was updated, so you'll have to
refresh all you metadata caches.
:param session: the current session
:type session: :class:`Session`
"""
CONNECTION_ERROR = 'connection_error'
"""Called when there is a connection error and libspotify has problems
reconnecting to the Spotify service.
May be called repeatedly as long as the problem persists. Will be called
with an :attr:`ErrorType.OK` error when the problem is resolved.
:param session: the current session
:type session: :class:`Session`
:param error_type: the connection error type
:type error_type: :class:`ErrorType`
"""
MESSAGE_TO_USER = 'message_to_user'
"""Called when libspotify wants to show a message to the end user.
:param session: the current session
:type session: :class:`Session`
:param data: the message
:type data: text
"""
NOTIFY_MAIN_THREAD = 'notify_main_thread'
"""Called when processing on the main thread is needed.
When this is called, you should call :meth:`~Session.process_events` from
your main thread. Failure to do so may cause request timeouts, or a lost
connection.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
"""
MUSIC_DELIVERY = 'music_delivery'
"""Called when there is decompressed audio data available.
If the function returns a lower number of frames consumed than
``num_frames``, libspotify will retry delivery of the unconsumed frames in
about 100ms. This can be used for rate limiting if libspotify is giving you
audio data too fast.
.. note::
You can register at most one event listener for this event.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
:param audio_format: the audio format
:type audio_format: :class:`AudioFormat`
:param frames: the audio frames
:type frames: bytestring
:param num_frames: the number of frames
:type num_frames: int
:returns: the number of frames consumed
"""
PLAY_TOKEN_LOST = 'play_token_lost'
"""Music has been paused because an account only allows music to be played
from one location simultaneously.
When this event is emitted, you should pause playback.
:param session: the current session
:type session: :class:`Session`
"""
LOG_MESSAGE = 'log_message'
"""Called when libspotify have something to log.
Note that pyspotify logs this for you, so you'll probably never need to
register a listener for this event.
:param session: the current session
:type session: :class:`Session`
:param data: the message
:type data: text
"""
END_OF_TRACK = 'end_of_track'
"""Called when all audio data for the current track has been delivered.
:param session: the current session
:type session: :class:`Session`
"""
STREAMING_ERROR = 'streaming_error'
"""Called when audio streaming cannot start or continue.
:param session: the current session
:type session: :class:`Session`
:param error_type: the streaming error type
:type error_type: :class:`ErrorType`
"""
USER_INFO_UPDATED = 'user_info_updated'
"""Called when anything related to :class:`User` objects is updated.
:param session: the current session
:type session: :class:`Session`
"""
START_PLAYBACK = 'start_playback'
"""Called when audio playback should start.
You need to implement a listener for the :attr:`GET_AUDIO_BUFFER_STATS`
event for the :attr:`START_PLAYBACK` event to be useful.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
"""
STOP_PLAYBACK = 'stop_playback'
"""Called when audio playback should stop.
You need to implement a listener for the :attr:`GET_AUDIO_BUFFER_STATS`
event for the :attr:`STOP_PLAYBACK` event to be useful.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
"""
GET_AUDIO_BUFFER_STATS = 'get_audio_buffer_stats'
"""Called to query the application about its audio buffer.
.. note::
You can register at most one event listener for this event.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
:returns: an :class:`AudioBufferStats` instance
"""
OFFLINE_STATUS_UPDATED = 'offline_status_updated'
"""Called when offline sync status is updated.
:param session: the current session
:type session: :class:`Session`
"""
CREDENTIALS_BLOB_UPDATED = 'credentials_blob_updated'
"""Called when storable credentials have been updated, typically right
after login.
The ``blob`` argument can be stored and later passed to
:meth:`~Session.login` to login without storing the user's password.
:param session: the current session
:type session: :class:`Session`
:param blob: the authentication blob
:type blob: bytestring
"""
CONNECTION_STATE_UPDATED = 'connection_state_updated'
"""Called when the connection state is updated.
The connection state includes login, logout, offline mode, etc.
:param session: the current session
:type session: :class:`Session`
"""
SCROBBLE_ERROR = 'scrobble_error'
"""Called when there is a scrobble error event.
:param session: the current session
:type session: :class:`Session`
:param error_type: the scrobble error type
:type error_type: :class:`ErrorType`
"""
PRIVATE_SESSION_MODE_CHANGED = 'private_session_mode_changed'
"""Called when there is a change in the private session mode.
:param session: the current session
:type session: :class:`Session`
:param is_private: whether the session is private
:type is_private: bool
"""
class _SessionCallbacks(object):
"""Internal class."""
@classmethod
def get_struct(cls):
return ffi.new('sp_session_callbacks *', {
'logged_in': cls.logged_in,
'logged_out': cls.logged_out,
'metadata_updated': cls.metadata_updated,
'connection_error': cls.connection_error,
'message_to_user': cls.message_to_user,
'notify_main_thread': cls.notify_main_thread,
'music_delivery': cls.music_delivery,
'play_token_lost': cls.play_token_lost,
'log_message': cls.log_message,
'end_of_track': cls.end_of_track,
'streaming_error': cls.streaming_error,
'userinfo_updated': cls.user_info_updated,
'start_playback': cls.start_playback,
'stop_playback': cls.stop_playback,
'get_audio_buffer_stats': cls.get_audio_buffer_stats,
'offline_status_updated': cls.offline_status_updated,
'credentials_blob_updated': cls.credentials_blob_updated,
'connectionstate_updated': cls.connection_state_updated,
'scrobble_error': cls.scrobble_error,
'private_session_mode_changed': cls.private_session_mode_changed,
})
# XXX Avoid use of the spotify._session_instance global in the following
# callbacks.
@staticmethod
@ffi_callback_win('void(sp_session *, sp_error)')
def logged_in(sp_session, sp_error):
if not spotify._session_instance:
return
error_type = spotify.ErrorType(sp_error)
if error_type == spotify.ErrorType.OK:
logger.info('Spotify logged in')
else:
logger.error('Spotify login error: %r', error_type)
spotify._session_instance.emit(
SessionEvent.LOGGED_IN, spotify._session_instance, error_type)
@staticmethod
@ffi_callback_win('void(sp_session *)')
def logged_out(sp_session):
if not spotify._session_instance:
return
logger.info('Spotify logged out')
spotify._session_instance.emit(
SessionEvent.LOGGED_OUT, spotify._session_instance)
@staticmethod
@ffi_callback_win('void(sp_session *)')
def metadata_updated(sp_session):
if not spotify._session_instance:
return
logger.debug('Metadata updated')
spotify._session_instance.emit(
SessionEvent.METADATA_UPDATED, spotify._session_instance)
@staticmethod
@ffi_callback_win('void(sp_session *, sp_error)')
def connection_error(sp_session, sp_error):
if not spotify._session_instance:
return
error_type = spotify.ErrorType(sp_error)
logger.error('Spotify connection error: %r', error_type)
spotify._session_instance.emit(
SessionEvent.CONNECTION_ERROR,
spotify._session_instance, error_type)
@staticmethod
@ffi_callback_win('void(sp_session *, const char *)')
def message_to_user(sp_session, data):
if not spotify._session_instance:
return
data = utils.to_unicode(data).strip()
logger.debug('Message to user: %s', data)
spotify._session_instance.emit(
SessionEvent.MESSAGE_TO_USER, spotify._session_instance, data)
@staticmethod
@ffi_callback_win('void(sp_session *)')
def notify_main_thread(sp_session):
if not spotify._session_instance:
return
logger.debug('Notify main thread')
spotify._session_instance.emit(
SessionEvent.NOTIFY_MAIN_THREAD, spotify._session_instance)
@staticmethod
@ffi_callback_win(
'int(sp_session *, const sp_audioformat *, const void *, int)')
def music_delivery(sp_session, sp_audioformat, frames, num_frames):
if not spotify._session_instance:
return 0
if spotify._session_instance.num_listeners(
SessionEvent.MUSIC_DELIVERY) == 0:
logger.debug('Music delivery, but no listener')
return 0
audio_format = spotify.AudioFormat(sp_audioformat)
frames_buffer = ffi.buffer(
frames, audio_format.frame_size() * num_frames)
frames_bytes = frames_buffer[:]
num_frames_consumed = spotify._session_instance.call(
SessionEvent.MUSIC_DELIVERY,
spotify._session_instance, audio_format, frames_bytes, num_frames)
logger.debug(
'Music delivery of %d frames, %d consumed', num_frames,
num_frames_consumed)
return num_frames_consumed
@staticmethod
@ffi_callback_win('void(sp_session *)')
def play_token_lost(sp_session):
if not spotify._session_instance:
return
logger.debug('Play token lost')
spotify._session_instance.emit(
SessionEvent.PLAY_TOKEN_LOST, spotify._session_instance)
@staticmethod
@ffi_callback_win('void(sp_session *, const char *)')
def log_message(sp_session, data):
if not spotify._session_instance:
return
data = utils.to_unicode(data).strip()
logger.debug('libspotify log message: %s', data)
spotify._session_instance.emit(
SessionEvent.LOG_MESSAGE, spotify._session_instance, data)
@staticmethod
@ffi_callback_win('void(sp_session *)')
def end_of_track(sp_session):
if not spotify._session_instance:
return
logger.debug('End of track')
spotify._session_instance.emit(
SessionEvent.END_OF_TRACK, spotify._session_instance)
@staticmethod
@ffi_callback_win('void(sp_session *, sp_error)')
def streaming_error(sp_session, sp_error):
if not spotify._session_instance:
return
error_type = spotify.ErrorType(sp_error)
logger.error('Spotify streaming error: %r', error_type)
spotify._session_instance.emit(
SessionEvent.STREAMING_ERROR,
spotify._session_instance, error_type)
@staticmethod
@ffi_callback_win('void(sp_session *)')
def user_info_updated(sp_session):
if not spotify._session_instance:
return
logger.debug('User info updated')
spotify._session_instance.emit(
SessionEvent.USER_INFO_UPDATED, spotify._session_instance)
@staticmethod
@ffi_callback_win('void(sp_session *)')
def start_playback(sp_session):
if not spotify._session_instance:
return
logger.debug('Start playback called')
spotify._session_instance.emit(
SessionEvent.START_PLAYBACK, spotify._session_instance)
@staticmethod
@ffi_callback_win('void(sp_session *)')
def stop_playback(sp_session):
if not spotify._session_instance:
return
logger.debug('Stop playback called')
spotify._session_instance.emit(
SessionEvent.STOP_PLAYBACK, spotify._session_instance)
@staticmethod
@ffi_callback_win('void(sp_session *, sp_audio_buffer_stats *)')
def get_audio_buffer_stats(sp_session, sp_audio_buffer_stats):
if not spotify._session_instance:
return
if spotify._session_instance.num_listeners(
SessionEvent.GET_AUDIO_BUFFER_STATS) == 0:
logger.debug('Audio buffer stats requested, but no listener')
return
logger.debug('Audio buffer stats requested')
stats = spotify._session_instance.call(
SessionEvent.GET_AUDIO_BUFFER_STATS, spotify._session_instance)
sp_audio_buffer_stats.samples = stats.samples
sp_audio_buffer_stats.stutter = stats.stutter
@staticmethod
@ffi_callback_win('void(sp_session *)')
def offline_status_updated(sp_session):
if not spotify._session_instance:
return
logger.debug('Offline status updated')
spotify._session_instance.emit(
SessionEvent.OFFLINE_STATUS_UPDATED, spotify._session_instance)
@staticmethod
@ffi_callback_win('void(sp_session *, const char *)')
def credentials_blob_updated(sp_session, data):
if not spotify._session_instance:
return
data = ffi.string(data)
logger.debug('Credentials blob updated: %r', data)
spotify._session_instance.emit(
SessionEvent.CREDENTIALS_BLOB_UPDATED,
spotify._session_instance, data)
@staticmethod
@ffi_callback_win('void(sp_session *)')
def connection_state_updated(sp_session):
if not spotify._session_instance:
return
logger.debug('Connection state updated')
spotify._session_instance.emit(
SessionEvent.CONNECTION_STATE_UPDATED,
spotify._session_instance)
@staticmethod
@ffi_callback_win('void(sp_session *, sp_error)')
def scrobble_error(sp_session, sp_error):
if not spotify._session_instance:
return
error_type = spotify.ErrorType(sp_error)
logger.error('Spotify scrobble error: %r', error_type)
spotify._session_instance.emit(
SessionEvent.SCROBBLE_ERROR,
spotify._session_instance, error_type)
@staticmethod
@ffi_callback_win('void (sp_session *, bool)')
def private_session_mode_changed(sp_session, is_private):
if not spotify._session_instance:
return
is_private = bool(is_private)
status = 'private' if is_private else 'public'
logger.debug('Private session mode changed: %s', status)
spotify._session_instance.emit(
SessionEvent.PRIVATE_SESSION_MODE_CHANGED,
spotify._session_instance, is_private)
| 35.600354 | 79 | 0.653934 |
4a23fc2486f2f6e20e8a0b67e4ec0836cfa2c5c3 | 2,149 | py | Python | tests/test_gui/test_property.py | akapkotel/arcade | 6e43ec53e7bfa3dee1aa574404794e3695aad381 | [
"MIT"
] | null | null | null | tests/test_gui/test_property.py | akapkotel/arcade | 6e43ec53e7bfa3dee1aa574404794e3695aad381 | [
"MIT"
] | 1 | 2022-03-21T06:24:29.000Z | 2022-03-21T06:24:29.000Z | tests/test_gui/test_property.py | Ibrahim2750mi/arcade | bf3229e64117931bffb8e50926a996a7a8fc9b8b | [
"MIT"
] | null | null | null | import gc
from arcade.gui.property import Property, bind
class MyObject:
name = Property()
class Observer:
called = None
def call(self, *args, **kwargs):
self.called = (args, kwargs)
def __call__(self, *args, **kwargs):
self.called = (args, kwargs)
def test_callback():
observer = Observer()
my_obj = MyObject()
bind(my_obj, "name", observer)
assert not observer.called
# WHEN
my_obj.name = "New Name"
assert observer.called == (tuple(), {})
def test_get_default():
my_obj = MyObject()
assert my_obj.name is None
def test_set_and_get_value():
my_obj = MyObject()
# WHEN
my_obj.name = "New Name"
assert my_obj.name == "New Name"
def test_independent_obj_instances():
my_obj1 = MyObject()
my_obj2 = MyObject()
# WHEN
my_obj1.name = "Hans"
my_obj2.name = "Franz"
assert my_obj1.name == "Hans"
assert my_obj2.name == "Franz"
def test_does_not_trigger_if_value_unchanged():
observer = Observer()
my_obj = MyObject()
my_obj.name = "CONSTANT"
bind(my_obj, "name", observer)
assert not observer.called
# WHEN
my_obj.name = "CONSTANT"
assert not observer.called
def test_gc_entries_are_collected():
obj = MyObject()
obj.name = "Some Name"
# Keeps referenced objects
gc.collect()
assert len(MyObject.name.obs) == 1
# delete ref and trigger gc
del obj
gc.collect()
# No left overs
assert len(MyObject.name.obs) == 0
def test_gc_keeps_bound_methods():
observer = Observer()
obj = MyObject()
obj.name = "Some Name"
bind(obj, "name", observer.call)
assert len(MyObject.name.obs[obj].listeners) == 1
del observer
gc.collect()
assert len(MyObject.name.obs[obj].listeners) == 1
def test_gc_keeps_temp_methods():
obj = MyObject()
obj.name = "Some Name"
calls = []
def callback(*args, **kwargs):
calls.append((args, kwargs))
bind(obj, "name", callback)
assert len(MyObject.name.obs[obj].listeners) == 1
del callback
assert len(MyObject.name.obs[obj].listeners) == 1
| 17.908333 | 53 | 0.632387 |
4a23fda8c83b4fde7242e2f2b2013b39a598b50d | 6,124 | py | Python | ros/src/waypoint_updater/waypoint_updater.py | zhengyu920/CarND-Capstone | cf24cc317379159f466ccd58e77923e192cf4c3c | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | zhengyu920/CarND-Capstone | cf24cc317379159f466ccd58e77923e192cf4c3c | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | zhengyu920/CarND-Capstone | cf24cc317379159f466ccd58e77923e192cf4c3c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
import numpy as np
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
from scipy.spatial import KDTree
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number
MAX_DECEL = 0.5
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
# TODO: Add other member variables you need below
self.pose = None
# self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.base_lane = None
self.stopline_wp_idx = -1
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
self.loop() # control the publish rate at 50Hz
# replace rospy.spin()
# def loop(self):
# rate = rospy.Rate(50)
# while not rospy.is_shutdown():
# if self.pose and self.base_waypoints:
# # Get closest waypoint
# closest_waypoint_idx = self.get_closest_waypoint_idx()
# self.publish_waypoints(closest_waypoint_idx)
# # rospy.loginfo("published to final_waypoint")
# # else:
# # rospy.logwarn("failed to find pose or basewaypoint in waypoint updater")
# rate.sleep()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose and self.base_lane:
self.publish_waypoints()
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
# Check if closest is ahead or behind vehicle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self):
# lane = Lane()
# lane.header = self.pose.header
# lane.waypoints = self.base_waypoints.waypoints[closest_waypoint_idx : closest_waypoint_idx + LOOKAHEAD_WPS]
# # rospy.logwarn(lane)
# self.final_waypoints_pub.publish(lane)
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
lane = Lane()
closest_idx = self.get_closest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_lane.waypoints[closest_idx : farthest_idx]
if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):
lane.waypoints = base_waypoints
else:
lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)
return lane
def decelerate_waypoints(self, base_waypoints, closest_idx):
temp = []
for i, wp in enumerate(base_waypoints):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(self.stopline_wp_idx - closest_idx - 2, 0) # two waypoints back from the line so front of car stops at line
dist = self.distance(base_waypoints, i, stop_idx)
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel < 1.0:
vel = 0.
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
return temp
def pose_cb(self, msg):
# TODO: Implement
# rospy.loginfo("updated pose in waypoint updater: %s", msg)
self.pose = msg
def waypoints_cb(self, waypoints):
# TODO: Implement
# rospy.loginfo('updated in waypoints_cb')
self.base_lane = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
self.stopline_wp_idx = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 35.398844 | 134 | 0.650065 |
4a24001f15fa6b82dd3c41ab172da0df1dbe2970 | 98 | py | Python | contrib/tools/cython/Cython/Distutils/__init__.py | HeyLey/catboost | f472aed90604ebe727537d9d4a37147985e10ec2 | [
"Apache-2.0"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | contrib/tools/cython/Cython/Distutils/__init__.py | HeyLey/catboost | f472aed90604ebe727537d9d4a37147985e10ec2 | [
"Apache-2.0"
] | 3,094 | 2015-01-01T15:44:13.000Z | 2022-03-31T19:49:57.000Z | contrib/tools/cython/Cython/Distutils/__init__.py | HeyLey/catboost | f472aed90604ebe727537d9d4a37147985e10ec2 | [
"Apache-2.0"
] | 1,425 | 2015-01-12T07:21:27.000Z | 2022-03-30T14:10:40.000Z | from Cython.Distutils.build_ext import build_ext
from Cython.Distutils.extension import Extension
| 32.666667 | 48 | 0.877551 |
4a240188dad97f59c2a68af5f144eee998d0b310 | 5,567 | py | Python | cloudshell/tg/breaking_point/helpers/bp_cs_reservation_details.py | QualiSystems/cloudshell-tg-breaking-point | 15399c231c0b51df492ddd4a7854f42a5f81a3da | [
"Apache-2.0"
] | null | null | null | cloudshell/tg/breaking_point/helpers/bp_cs_reservation_details.py | QualiSystems/cloudshell-tg-breaking-point | 15399c231c0b51df492ddd4a7854f42a5f81a3da | [
"Apache-2.0"
] | 9 | 2017-04-25T09:16:43.000Z | 2019-07-31T08:55:47.000Z | cloudshell/tg/breaking_point/helpers/bp_cs_reservation_details.py | QualiSystems/cloudshell-tg-breaking-point | 15399c231c0b51df492ddd4a7854f42a5f81a3da | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
from cloudshell.tg.breaking_point.bp_exception import BPException
class BPCSReservationDetails(object):
PORT_FAMILY = ['Port', 'Virtual Port', 'Breaking Point Virtual Port', 'CS_TrafficGeneratorPort',
'CS_VirtualTrafficGeneratorPort']
STATIC_VM_PORT = {"family_name": "CS_Port", "model_name": "BP vBlade.GenericVPort"}
CHASSIS_FAMILY = ['Traffic Generator Chassis', 'Virtual Traffic Generator Chassis', 'CS_TrafficGeneratorChassis',
'CS_VirtualTrafficGeneratorChassis']
STATIC_VM_CHASSIS = {"family_name": "CS_GenericAppFamily", "model_name": "BP vChassis"}
PORT_ATTRIBUTE = 'Logical Name'
USERNAME_ATTRIBUTE = 'User'
PASSWORD_ATTRIBUTE = 'Password'
def __init__(self, reservation_id, logger, api):
self._reservation_id = reservation_id
self._logger = logger
self._api = api
self._reservation_details = None
self._resource_details_dict = {}
self.__chassis_resource = None
@property
def api(self):
return self._api
@property
def logger(self):
return self._logger
@property
def _chassis_resource(self):
if not self.__chassis_resource:
self.__chassis_resource = self._find_chassis_resource()
return self.__chassis_resource
@property
def _chassis_name(self):
return self._chassis_resource.Name
@property
def _chassis_model_name(self):
return self._chassis_resource.ResourceModelName
def _find_chassis_resource(self):
chassis_resource = None
for resource in self._get_reservation_details().ReservationDescription.Resources:
if (resource.ResourceFamilyName in self.CHASSIS_FAMILY or
resource.ResourceFamilyName == self.STATIC_VM_CHASSIS["family_name"] and
resource.ResourceModelName == self.STATIC_VM_CHASSIS["model_name"]):
chassis_resource = resource
if not chassis_resource:
raise BPException(self.__class__.__name__,
'Cannot find {0} in the reservation'.format(', '.join(self.CHASSIS_FAMILY)))
return chassis_resource
def _get_reservation_details(self):
if not self._reservation_details:
self._reservation_details = self.api.GetReservationDetails(reservationId=self._reservation_id)
return self._reservation_details
def _get_resource_details(self, resource_name):
details = self._resource_details_dict.get(resource_name, None)
if not details:
details = self.api.GetResourceDetails(resource_name)
self._resource_details_dict[resource_name] = details
return details
def _get_attribute_value(self, resource_name, attribute_name):
resource_details = self._get_resource_details(resource_name)
model_attribute_2g = '{}.{}'.format(resource_details.ResourceModelName, attribute_name)
family_attribute_2g = '{}.{}'.format(resource_details.ResourceFamilyName, attribute_name)
for attribute in resource_details.ResourceAttributes:
if attribute.Name == attribute_name or attribute.Name == model_attribute_2g or attribute.Name == family_attribute_2g:
return attribute.Value
def get_chassis_address(self):
return self._chassis_resource.FullAddress
def get_chassis_ports(self):
self.logger.debug('Api: {}'.format(self.api))
reserved_ports = {}
port_pattern = r'{}[\\/]M(?P<module>\d+)/P(?P<port>\d+)'.format(self.get_chassis_address())
for resource in self._get_reservation_details().ReservationDescription.Resources:
if (resource.ResourceFamilyName in self.PORT_FAMILY or
resource.ResourceFamilyName == self.STATIC_VM_PORT["family_name"] and
resource.ResourceModelName == self.STATIC_VM_PORT["model_name"]):
result = re.search(port_pattern, resource.FullAddress)
if result:
logical_name = self._get_attribute_value(resource.Name, self.PORT_ATTRIBUTE)
if logical_name:
reserved_ports[logical_name.lower()] = (result.group('module'), result.group('port'))
if resource.ResourceFamilyName == self.STATIC_VM_PORT["family_name"] and \
resource.ResourceModelName == self.STATIC_VM_PORT["model_name"]:
result = re.search(port_pattern, resource.FullAddress)
if result:
logical_name = self._get_attribute_value(resource.Name, self.PORT_ATTRIBUTE)
# 'BP vBlade.GenericVPort.Logical Name'
if logical_name:
reserved_ports[logical_name.lower()] = (result.group('module'), result.group('port'))
self.logger.debug('Chassis ports {}'.format(reserved_ports))
return reserved_ports
def get_chassis_user(self):
username = self._get_attribute_value(self._chassis_name, self.USERNAME_ATTRIBUTE)
self.logger.debug('Chassis username {}'.format(username))
return username
def get_chassis_password(self):
encrypted_password = self._get_attribute_value(self._chassis_name, self.PASSWORD_ATTRIBUTE)
chassis_password = self.api.DecryptPassword(encrypted_password).Value
self.logger.debug('Chassis Password {}'.format(chassis_password))
return chassis_password
| 45.631148 | 129 | 0.671816 |
4a2402c00f3201ae651624d08496df0d38886e41 | 1,626 | py | Python | vsts/vsts/extension_management/v4_1/models/contribution_base.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/extension_management/v4_1/models/contribution_base.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/extension_management/v4_1/models/contribution_base.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ContributionBase(Model):
"""ContributionBase.
:param description: Description of the contribution/type
:type description: str
:param id: Fully qualified identifier of the contribution/type
:type id: str
:param visible_to: VisibleTo can be used to restrict whom can reference a given contribution/type. This value should be a list of publishers or extensions access is restricted too. Examples: "ms" - Means only the "ms" publisher can reference this. "ms.vss-web" - Means only the "vss-web" extension from the "ms" publisher can reference this.
:type visible_to: list of str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'visible_to': {'key': 'visibleTo', 'type': '[str]'}
}
def __init__(self, description=None, id=None, visible_to=None):
super(ContributionBase, self).__init__()
self.description = description
self.id = id
self.visible_to = visible_to
| 47.823529 | 346 | 0.574416 |
4a2402c18b727d7f6d00c6d5eae0dc7088faf393 | 3,524 | py | Python | python/bench.py | teepark/mummy | 33cad46c5d30d5da615ca038f1b857ca4689c810 | [
"BSD-3-Clause"
] | null | null | null | python/bench.py | teepark/mummy | 33cad46c5d30d5da615ca038f1b857ca4689c810 | [
"BSD-3-Clause"
] | null | null | null | python/bench.py | teepark/mummy | 33cad46c5d30d5da615ca038f1b857ca4689c810 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import random
import sys
import time
import mummy
import oldmummy
try:
import cPickle
except ImportError:
import pickle as cPickle
import string
try:
import bson
except ImportError:
bson = None
try:
import yajl
except ImportError:
yajl = None
try:
import cjson
except ImportError:
cjson = None
try:
import simplejson
except ImportError:
simplejson = None
try:
import wbin
except ImportError:
wbin = None
try:
import msgpack
except ImportError:
msgpack = None
test_data = [
"'this is a test'",
'''[{
"name": "foo",
"type": "bar",
"count": 1,
"info": {
"x": 203,
"y": 102,
"z": list(range(5))
}
}] * 100''',
"{'x': 203, 'y': 102, 'z': list(range(5))}",
"[0, 1, 2, 3, 4]",
"{'a': {}}",
#"[]",
#"[[]] * 500",
#"[random.random() for i in xrange(1000)]",
#"[None] * 5000",
#"[dict.fromkeys(map(str, range(20)), 14.3)] * 100",
]
def ttt(f, data=None, x=10*1000):
start = time.time()
while x:
x -= 1
foo = f(data)
return time.time()-start
def profile(serial, deserial, data, x=10*1000):
squashed = serial(data)
return (ttt(serial, data, x), ttt(deserial, squashed, x), len(squashed))
def equalish(a, b):
if isinstance(a, (tuple, list)) and isinstance(b, (tuple, list)):
a, b = tuple(a), tuple(b)
for suba, subb in zip(a, b):
if not equalish(suba, subb):
return False
return True
if isinstance(a, dict) and isinstance(b, dict):
return equalish(a.items(), b.items())
return a == b
def test(serial, deserial, data):
assert equalish(deserial(serial(data)), data)
def format(flt, prec=3):
s = str(round(flt, prec))
return padright(s, s.index(".") + 4, "0")
def padright(s, upto, padchar=" "):
return s + (padchar * (upto - len(s)))
contenders = [
('mummy', (lambda s: mummy.dumps(s), mummy.loads)),
('oldmummy', (lambda s: oldmummy.dumps(s), oldmummy.loads))]
if wbin:
contenders.append(('wirebin', (wbin.serialize, wbin.deserialize)))
if msgpack:
contenders.append(('msgpack', (msgpack.dumps, msgpack.loads)))
if yajl:
contenders.append(('py-yajl', (yajl.dumps, yajl.loads)))
if cjson:
contenders.append(('cjson', (cjson.encode, cjson.decode)))
if bson:
contenders.append(('bson', (bson.BSON.encode, lambda s: bson.BSON(s).decode())))
#contenders.append(('cPickle (protocol 2)',
# (lambda x: cPickle.dumps(x, 2), cPickle.loads)))
#contenders.append(('cPickle (protocol 1)',
# (lambda x: cPickle.dumps(x, 1), cPickle.loads)))
#contenders.append(('cPickle (protocol 0)', (cPickle.dumps, cPickle.loads)))
#if simplejson:
# contenders.append(('simplejson', (simplejson.dumps, simplejson.loads)))
#contenders.append(('repr/eval', (repr, eval)))
#contenders.append(('mummy pure-python',
# (mummy.pure_python_dumps, mummy.pure_python_loads)))
if __name__ == '__main__':
tmpl = string.Template(
"$name serialize: $ser deserialize: $des total: $tot size: $size")
for sdata in test_data:
print sdata
data = eval(sdata)
for name, (serial, deserial) in contenders:
test(serial, deserial, data)
x, y, size = profile(serial, deserial, data)
print(tmpl.substitute(
name=padright(name, 20),
ser=format(x, 6),
des=format(y, 6),
tot=format(x + y, 6),
size=size,
))
print
| 24.472222 | 84 | 0.594211 |
4a2402d719dcac6a23e08b09b43f9ff2bd60e9c1 | 477 | py | Python | src/accounts/migrations/0002_auto_20190720_1006.py | littleprodigy/Twitter | 25ef96a291d295bb91f824f331fd6a648dc79117 | [
"MIT"
] | null | null | null | src/accounts/migrations/0002_auto_20190720_1006.py | littleprodigy/Twitter | 25ef96a291d295bb91f824f331fd6a648dc79117 | [
"MIT"
] | null | null | null | src/accounts/migrations/0002_auto_20190720_1006.py | littleprodigy/Twitter | 25ef96a291d295bb91f824f331fd6a648dc79117 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2019-07-20 10:06
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='following',
field=models.ManyToManyField(blank=True, related_name='followed_by', to=settings.AUTH_USER_MODEL),
),
]
| 23.85 | 110 | 0.645702 |
4a24036474cd644389667ac5a12b53adff944b2d | 518 | py | Python | main.py | rageofgods/test-flask | dbb7eb2bf88c31139e5a1380fd818fb61fdef97e | [
"MIT"
] | null | null | null | main.py | rageofgods/test-flask | dbb7eb2bf88c31139e5a1380fd818fb61fdef97e | [
"MIT"
] | null | null | null | main.py | rageofgods/test-flask | dbb7eb2bf88c31139e5a1380fd818fb61fdef97e | [
"MIT"
] | null | null | null | from flask import Flask, render_template
app = Flask(__name__)
# two decorators, same function
@app.route('/')
@app.route('/index.html')
def index():
return render_template('index.html', the_title='Tiger Home Page')
@app.route('/symbol.html')
def symbol():
return render_template('symbol.html', the_title='Tiger As Symbol')
@app.route('/myth.html')
def myth():
return render_template('myth.html', the_title='Tiger in Myth and Legend')
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True)
| 25.9 | 77 | 0.700772 |
4a24054fc555794ca32c6e3b3884558a02618265 | 505 | py | Python | lab8.py | seanstanaway/IA241-github | ce69c99d02e52919d6bca947769901ed596e73f5 | [
"MIT"
] | null | null | null | lab8.py | seanstanaway/IA241-github | ce69c99d02e52919d6bca947769901ed596e73f5 | [
"MIT"
] | null | null | null | lab8.py | seanstanaway/IA241-github | ce69c99d02e52919d6bca947769901ed596e73f5 | [
"MIT"
] | null | null | null | '''
lab 8 function
'''
#3.1
def count_words(inpu_str):
return len( inpu_str.split() )
#3.2
demo_str = 'hello world!'
print(count_words(demo_str))
#3.3
def find_min(input_list):
min_item = input_list[0]
for num in input_list:
if type(num) is not str:
if min_item>=num:
min_item = num
return(min_item)
#3.4
demo_list = [1,2,3,4,5,6]
print(find_min(demo_list))
#3.5
mix_list = [1,2,3,'a',4,5,6]
print ( find_min(mix_list))
| 12.317073 | 35 | 0.582178 |
4a2407163452084a112d2adc300d75282ee51552 | 8,077 | py | Python | test/test_conda.py | robertmaynard/hpc-container-maker | fdf20b9881eb41f92b7d73c85b20f5f75ddfe262 | [
"Apache-2.0"
] | 340 | 2018-03-26T00:11:21.000Z | 2022-03-21T03:04:27.000Z | test/test_conda.py | robertmaynard/hpc-container-maker | fdf20b9881eb41f92b7d73c85b20f5f75ddfe262 | [
"Apache-2.0"
] | 103 | 2018-03-24T04:34:24.000Z | 2022-03-31T18:49:57.000Z | test/test_conda.py | robertmaynard/hpc-container-maker | fdf20b9881eb41f92b7d73c85b20f5f75ddfe262 | [
"Apache-2.0"
] | 75 | 2018-05-10T15:42:11.000Z | 2022-03-28T16:51:14.000Z | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the conda module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ppc64le, ubuntu, x86_64
from hpccm.building_blocks.conda import conda
class Test_conda(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@x86_64
@ubuntu
@docker
def test_defaults_ubuntu(self):
"""Default conda building block"""
c = conda(eula=True, packages=['numpy'])
self.assertEqual(str(c),
r'''# Anaconda
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp http://repo.anaconda.com/miniconda/Miniconda3-py38_4.8.3-Linux-x86_64.sh && \
bash /var/tmp/Miniconda3-py38_4.8.3-Linux-x86_64.sh -b -p /usr/local/anaconda && \
/usr/local/anaconda/bin/conda init && \
ln -s /usr/local/anaconda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
. /usr/local/anaconda/etc/profile.d/conda.sh && \
conda activate base && \
conda install -y numpy && \
/usr/local/anaconda/bin/conda clean -afy && \
rm -rf /var/tmp/Miniconda3-py38_4.8.3-Linux-x86_64.sh''')
@x86_64
@centos
@docker
def test_defaults_centos(self):
"""Default conda building block"""
c = conda(eula=True, packages=['numpy'])
self.assertEqual(str(c),
r'''# Anaconda
RUN yum install -y \
ca-certificates \
wget && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp http://repo.anaconda.com/miniconda/Miniconda3-py38_4.8.3-Linux-x86_64.sh && \
bash /var/tmp/Miniconda3-py38_4.8.3-Linux-x86_64.sh -b -p /usr/local/anaconda && \
/usr/local/anaconda/bin/conda init && \
ln -s /usr/local/anaconda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
. /usr/local/anaconda/etc/profile.d/conda.sh && \
conda activate base && \
conda install -y numpy && \
/usr/local/anaconda/bin/conda clean -afy && \
rm -rf /var/tmp/Miniconda3-py38_4.8.3-Linux-x86_64.sh''')
@ppc64le
@ubuntu
@docker
def test_ppc64le(self):
"""ppc64le"""
c = conda(eula=True, version='4.7.12')
self.assertEqual(str(c),
r'''# Anaconda
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp http://repo.anaconda.com/miniconda/Miniconda3-4.7.12-Linux-ppc64le.sh && \
bash /var/tmp/Miniconda3-4.7.12-Linux-ppc64le.sh -b -p /usr/local/anaconda && \
/usr/local/anaconda/bin/conda init && \
ln -s /usr/local/anaconda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
/usr/local/anaconda/bin/conda clean -afy && \
rm -rf /var/tmp/Miniconda3-4.7.12-Linux-ppc64le.sh''')
@x86_64
@ubuntu
@docker
def test_channels(self):
"""channels"""
c = conda(channels=['conda-forge', 'nvidia'], eula=True,
version='4.7.12')
self.assertEqual(str(c),
r'''# Anaconda
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp http://repo.anaconda.com/miniconda/Miniconda3-4.7.12-Linux-x86_64.sh && \
bash /var/tmp/Miniconda3-4.7.12-Linux-x86_64.sh -b -p /usr/local/anaconda && \
/usr/local/anaconda/bin/conda init && \
ln -s /usr/local/anaconda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
. /usr/local/anaconda/etc/profile.d/conda.sh && \
conda activate base && \
conda config --add channels conda-forge --add channels nvidia && \
/usr/local/anaconda/bin/conda clean -afy && \
rm -rf /var/tmp/Miniconda3-4.7.12-Linux-x86_64.sh''')
@x86_64
@ubuntu
@docker
def test_environment(self):
"""environment"""
c = conda(eula=True, environment='foo/environment.yml',
version='4.7.12')
self.assertEqual(str(c),
r'''# Anaconda
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
wget && \
rm -rf /var/lib/apt/lists/*
COPY foo/environment.yml /var/tmp/environment.yml
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp http://repo.anaconda.com/miniconda/Miniconda3-4.7.12-Linux-x86_64.sh && \
bash /var/tmp/Miniconda3-4.7.12-Linux-x86_64.sh -b -p /usr/local/anaconda && \
/usr/local/anaconda/bin/conda init && \
ln -s /usr/local/anaconda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
. /usr/local/anaconda/etc/profile.d/conda.sh && \
conda activate base && \
conda env update -f /var/tmp/environment.yml && \
rm -rf /var/tmp/environment.yml && \
/usr/local/anaconda/bin/conda clean -afy && \
rm -rf /var/tmp/Miniconda3-4.7.12-Linux-x86_64.sh''')
@x86_64
@ubuntu
@docker
def test_python2(self):
"""python 2"""
c = conda(eula=True, python2=True, version='4.7.12')
self.assertEqual(str(c),
r'''# Anaconda
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp http://repo.anaconda.com/miniconda/Miniconda2-4.7.12-Linux-x86_64.sh && \
bash /var/tmp/Miniconda2-4.7.12-Linux-x86_64.sh -b -p /usr/local/anaconda && \
/usr/local/anaconda/bin/conda init && \
ln -s /usr/local/anaconda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
/usr/local/anaconda/bin/conda clean -afy && \
rm -rf /var/tmp/Miniconda2-4.7.12-Linux-x86_64.sh''')
@x86_64
@ubuntu
@docker
def test_python_subversion(self):
"""python subversion"""
c = conda(eula=True, python_subversion='py37', version='4.8.3')
self.assertEqual(str(c),
r'''# Anaconda
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp http://repo.anaconda.com/miniconda/Miniconda3-py37_4.8.3-Linux-x86_64.sh && \
bash /var/tmp/Miniconda3-py37_4.8.3-Linux-x86_64.sh -b -p /usr/local/anaconda && \
/usr/local/anaconda/bin/conda init && \
ln -s /usr/local/anaconda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
/usr/local/anaconda/bin/conda clean -afy && \
rm -rf /var/tmp/Miniconda3-py37_4.8.3-Linux-x86_64.sh''')
@x86_64
@ubuntu
@docker
def test_runtime(self):
"""runtime"""
c = conda(eula=True)
r = c.runtime()
self.assertEqual(r,
r'''# Anaconda
COPY --from=0 /usr/local/anaconda /usr/local/anaconda
RUN /usr/local/anaconda/bin/conda init && \
ln -s /usr/local/anaconda/etc/profile.d/conda.sh /etc/profile.d/conda.sh''')
| 40.18408 | 149 | 0.649251 |
4a2407afda767dd2e413e354fe518ecc9f563ad4 | 62 | py | Python | main/owner/routers.py | MahanBi/Back-End | 5074ac1d341ad2addd1750e4aea2e6800be2bfef | [
"MIT"
] | null | null | null | main/owner/routers.py | MahanBi/Back-End | 5074ac1d341ad2addd1750e4aea2e6800be2bfef | [
"MIT"
] | null | null | null | main/owner/routers.py | MahanBi/Back-End | 5074ac1d341ad2addd1750e4aea2e6800be2bfef | [
"MIT"
] | 1 | 2021-12-06T21:36:28.000Z | 2021-12-06T21:36:28.000Z | from django.urls import re_path
websocket_urlpatterns = [
]
| 10.333333 | 31 | 0.774194 |
4a2407c531c3f876297bb65a1ec6ca37341a5273 | 9,663 | py | Python | venv/lib/python3.8/site-packages/joblib/externals/loky/backend/reduction.py | avrumnoor/NewsSummarizer | a963497ef9bc62d2148aa28e624ea32955992f57 | [
"MIT"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | venv/lib/python3.8/site-packages/joblib/externals/loky/backend/reduction.py | avrumnoor/NewsSummarizer | a963497ef9bc62d2148aa28e624ea32955992f57 | [
"MIT"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | venv/lib/python3.8/site-packages/joblib/externals/loky/backend/reduction.py | avrumnoor/NewsSummarizer | a963497ef9bc62d2148aa28e624ea32955992f57 | [
"MIT"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | ###############################################################################
# Customizable Pickler with some basic reducers
#
# author: Thomas Moreau
#
# adapted from multiprocessing/reduction.py (17/02/2017)
# * Replace the ForkingPickler with a similar _LokyPickler,
# * Add CustomizableLokyPickler to allow customizing pickling process
# on the fly.
#
import io
import os
import sys
import functools
from multiprocessing import util
import types
try:
# Python 2 compat
from cPickle import loads as pickle_loads
except ImportError:
from pickle import loads as pickle_loads
import copyreg
from pickle import HIGHEST_PROTOCOL
if sys.platform == "win32":
if sys.version_info[:2] > (3, 3):
from multiprocessing.reduction import duplicate
else:
from multiprocessing.forking import duplicate
###############################################################################
# Enable custom pickling in Loky.
# To allow instance customization of the pickling process, we use 2 classes.
# _ReducerRegistry gives module level customization and CustomizablePickler
# permits to use instance base custom reducers. Only CustomizablePickler
# should be used.
class _ReducerRegistry(object):
"""Registry for custom reducers.
HIGHEST_PROTOCOL is selected by default as this pickler is used
to pickle ephemeral datastructures for interprocess communication
hence no backward compatibility is required.
"""
# We override the pure Python pickler as its the only way to be able to
# customize the dispatch table without side effects in Python 2.6
# to 3.2. For Python 3.3+ leverage the new dispatch_table
# feature from http://bugs.python.org/issue14166 that makes it possible
# to use the C implementation of the Pickler which is faster.
dispatch_table = {}
@classmethod
def register(cls, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table."""
if sys.version_info < (3,):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(cls, obj):
reduced = reduce_func(obj)
cls.save_reduce(obj=obj, *reduced)
cls.dispatch_table[type] = dispatcher
else:
cls.dispatch_table[type] = reduce_func
###############################################################################
# Registers extra pickling routines to improve picklization for loky
register = _ReducerRegistry.register
# make methods picklable
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
class _C:
def f(self):
pass
@classmethod
def h(cls):
pass
register(type(_C().f), _reduce_method)
register(type(_C.h), _reduce_method)
if not hasattr(sys, "pypy_version_info"):
# PyPy uses functions instead of method_descriptors and wrapper_descriptors
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)
# Make partial func pickable
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return functools.partial(func, *args, **keywords)
register(functools.partial, _reduce_partial)
if sys.platform != "win32":
from ._posix_reduction import _mk_inheritable # noqa: F401
else:
from . import _win_reduction # noqa: F401
# global variable to change the pickler behavior
try:
from joblib.externals import cloudpickle # noqa: F401
DEFAULT_ENV = "cloudpickle"
except ImportError:
# If cloudpickle is not present, fallback to pickle
DEFAULT_ENV = "pickle"
ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV)
_LokyPickler = None
_loky_pickler_name = None
def set_loky_pickler(loky_pickler=None):
global _LokyPickler, _loky_pickler_name
if loky_pickler is None:
loky_pickler = ENV_LOKY_PICKLER
loky_pickler_cls = None
# The default loky_pickler is cloudpickle
if loky_pickler in ["", None]:
loky_pickler = "cloudpickle"
if loky_pickler == _loky_pickler_name:
return
if loky_pickler == "cloudpickle":
from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls
else:
try:
from importlib import import_module
module_pickle = import_module(loky_pickler)
loky_pickler_cls = module_pickle.Pickler
except (ImportError, AttributeError) as e:
extra_info = ("\nThis error occurred while setting loky_pickler to"
" '{}', as required by the env variable LOKY_PICKLER"
" or the function set_loky_pickler."
.format(loky_pickler))
e.args = (e.args[0] + extra_info,) + e.args[1:]
e.msg = e.args[0]
raise e
util.debug("Using '{}' for serialization."
.format(loky_pickler if loky_pickler else "cloudpickle"))
class CustomizablePickler(loky_pickler_cls):
_loky_pickler_cls = loky_pickler_cls
def _set_dispatch_table(self, dispatch_table):
for ancestor_class in self._loky_pickler_cls.mro():
dt_attribute = getattr(ancestor_class, "dispatch_table", None)
if isinstance(dt_attribute, types.MemberDescriptorType):
# Ancestor class (typically _pickle.Pickler) has a
# member_descriptor for its "dispatch_table" attribute. Use
# it to set the dispatch_table as a member instead of a
# dynamic attribute in the __dict__ of the instance,
# otherwise it will not be taken into account by the C
# implementation of the dump method if a subclass defines a
# class-level dispatch_table attribute as was done in
# cloudpickle 1.6.0:
# https://github.com/joblib/loky/pull/260
dt_attribute.__set__(self, dispatch_table)
break
# On top of member descriptor set, also use setattr such that code
# that directly access self.dispatch_table gets a consistent view
# of the same table.
self.dispatch_table = dispatch_table
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
loky_pickler_cls.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if sys.version_info < (3,):
self.dispatch = loky_pickler_cls.dispatch.copy()
self.dispatch.update(_ReducerRegistry.dispatch_table)
else:
if hasattr(self, "dispatch_table"):
# Force a copy that we will update without mutating the
# any class level defined dispatch_table.
loky_dt = dict(self.dispatch_table)
else:
# Use standard reducers as bases
loky_dt = copyreg.dispatch_table.copy()
# Register loky specific reducers
loky_dt.update(_ReducerRegistry.dispatch_table)
# Set the new dispatch table, taking care of the fact that we
# need to use the member_descriptor when we inherit from a
# subclass of the C implementation of the Pickler base class
# with an class level dispatch_table attribute.
self._set_dispatch_table(loky_dt)
# Register custom reducers
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table.
"""
if sys.version_info < (3,):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
_LokyPickler = CustomizablePickler
_loky_pickler_name = loky_pickler
def get_loky_pickler_name():
global _loky_pickler_name
return _loky_pickler_name
def get_loky_pickler():
global _LokyPickler
return _LokyPickler
# Set it to its default value
set_loky_pickler()
def loads(buf):
# Compat for python2.7 version
if sys.version_info < (3, 3) and isinstance(buf, io.BytesIO):
buf = buf.getvalue()
return pickle_loads(buf)
def dump(obj, file, reducers=None, protocol=None):
'''Replacement for pickle.dump() using _LokyPickler.'''
global _LokyPickler
_LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
def dumps(obj, reducers=None, protocol=None):
global _LokyPickler
buf = io.BytesIO()
dump(obj, buf, reducers=reducers, protocol=protocol)
if sys.version_info < (3, 3):
return buf.getvalue()
return buf.getbuffer()
__all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"]
if sys.platform == "win32":
__all__ += ["duplicate"]
| 34.3879 | 81 | 0.639863 |
4a2407c981fd56f724aa08b3a74d9b5ea6881215 | 2,067 | py | Python | pysrc/importfinder.py | CrackerCat/xed | 428712c28e831573579b7f749db63d3a58dcdbd9 | [
"Apache-2.0"
] | 1,261 | 2016-12-16T14:29:30.000Z | 2022-03-30T20:21:25.000Z | pysrc/importfinder.py | CrackerCat/xed | 428712c28e831573579b7f749db63d3a58dcdbd9 | [
"Apache-2.0"
] | 190 | 2016-12-17T13:44:09.000Z | 2022-03-27T09:28:13.000Z | pysrc/importfinder.py | CrackerCat/xed | 428712c28e831573579b7f749db63d3a58dcdbd9 | [
"Apache-2.0"
] | 155 | 2016-12-16T22:17:20.000Z | 2022-02-16T20:53:59.000Z | #!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
# Returns a list of imported modules but it is unacceptably slow. For
# the execution of "pysrc/importfinder.py generator pysrc" it takes 23
# seconds.
from __future__ import print_function
import os
import sys
import modulefinder
def _get_modules(fn):
finder = modulefinder.ModuleFinder()
finder.run_script(fn)
all = []
for m in finder.modules.values():
if not isinstance(m, modulefinder.Module):
continue
if not m.__file__:
continue
# skip shared object files
if m.__file__.endswith('.so'):
continue
# skip mac system stuff...
# FIXME: would need to augment with other OS's system stuff
if m.__file__.startswith('/Library/Frameworks'):
continue
all.append(m)
return all
def find(root_module):
worklist = []
d = {} # remember what we've seen
all = [] # output: list of path-prefixed modules
mods = _get_modules(root_module)
worklist.extend(mods)
while worklist:
x = worklist.pop(0)
for m in _get_modules(x.__file__):
if m.__name__ not in d:
worklist.append(m)
all.append(m.__file__)
d[m.__name__]=True
all.sort()
return all
if __name__ == "__main__":
sys.path = [sys.argv[2]] + sys.path
print(find(os.path.join(sys.argv[2],sys.argv[1]+'.py')))
| 29.956522 | 75 | 0.651185 |
4a2407ed5b53b3fa1dc4e4f56370a12f55aa20a8 | 2,417 | py | Python | tests/parsers/xchatlog.py | GalacticMaster/plaso | 0e3eed472e4ef67f619e89de9fcebba87d1923aa | [
"Apache-2.0"
] | null | null | null | tests/parsers/xchatlog.py | GalacticMaster/plaso | 0e3eed472e4ef67f619e89de9fcebba87d1923aa | [
"Apache-2.0"
] | null | null | null | tests/parsers/xchatlog.py | GalacticMaster/plaso | 0e3eed472e4ef67f619e89de9fcebba87d1923aa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the xchatlog parser."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import xchatlog as _ # pylint: disable=unused-import
from plaso.parsers import xchatlog
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class XChatLogUnitTest(test_lib.ParserTestCase):
"""Tests for the xchatlog parser."""
@shared_test_lib.skipUnlessHasTestFile(['xchat.log'])
def testParse(self):
"""Tests the Parse function."""
parser = xchatlog.XChatLogParser()
storage_writer = self._ParseFile(
['xchat.log'], parser, timezone='Europe/Rome')
self.assertEqual(storage_writer.number_of_events, 9)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2011-12-31 20:11:55.000000')
expected_message = 'XChat start logging'
self._TestGetMessageStrings(event, expected_message, expected_message)
event = events[1]
expected_message = '--> You are now talking on #gugle'
self._TestGetMessageStrings(event, expected_message, expected_message)
event = events[2]
expected_message = '--- Topic for #gugle is plaso, a difficult word'
self._TestGetMessageStrings(event, expected_message, expected_message)
event = events[3]
expected_message = 'Topic for #gugle set by Kristinn'
self._TestGetMessageStrings(event, expected_message, expected_message)
event = events[4]
expected_message = '--- Joachim gives voice to fpi'
self._TestGetMessageStrings(event, expected_message, expected_message)
event = events[5]
expected_message = '* XChat here'
self._TestGetMessageStrings(event, expected_message, expected_message)
event = events[6]
expected_message = '[nickname: fpi] ola plas-ing guys!'
self._TestGetMessageStrings(event, expected_message, expected_message)
event = events[7]
self.CheckTimestamp(event.timestamp, '2011-12-31 22:00:00.000000')
expected_message = '[nickname: STRANGER] \u65e5\u672c'
self._TestGetMessageStrings(event, expected_message, expected_message)
event = events[8]
self.CheckTimestamp(event.timestamp, '2011-12-31 22:59:00.000000')
expected_message = 'XChat end logging'
self._TestGetMessageStrings(event, expected_message, expected_message)
if __name__ == '__main__':
unittest.main()
| 30.987179 | 75 | 0.736036 |
4a2408fbdd09725ef63265a3934f3148e97e1584 | 2,163 | py | Python | Drivers/Series_2380_DC_Electronic_Load/Win10_Python_Using_VISA/Model_2380_Example_01_CC_Setup.py | 398786172/keithley | f78c5220841775a45ae60645c774e8b443b02ec3 | [
"BSD-Source-Code"
] | 31 | 2019-04-11T14:25:39.000Z | 2022-03-18T15:09:33.000Z | Drivers/Series_2380_DC_Electronic_Load/Win10_Python_Using_VISA/Model_2380_Example_01_CC_Setup.py | 398786172/keithley | f78c5220841775a45ae60645c774e8b443b02ec3 | [
"BSD-Source-Code"
] | 27 | 2019-04-10T20:21:52.000Z | 2021-12-09T01:59:32.000Z | Drivers/Series_2380_DC_Electronic_Load/Win10_Python_Using_VISA/Model_2380_Example_01_CC_Setup.py | 398786172/keithley | f78c5220841775a45ae60645c774e8b443b02ec3 | [
"BSD-Source-Code"
] | 30 | 2019-06-08T09:38:20.000Z | 2022-03-18T15:10:37.000Z | import visa
import struct
import math
import time
import Keithley_Model_2380_VISA_Driver as kei2380
from ISStreamer.Streamer import Streamer
def writeToInitialState(myCurr, myVolt):
streamer.log("CURR", myCurr)
streamer.log("VOLT", myVolt)
return
#===== MAIN PROGRAM STARTS HERE =====
rm = visa.ResourceManager() # Opens the resource manager and sets it to variabl
Inst_1 = "GPIB0::6::INSTR"
# Instrument ID String examples...
# LAN -> TCPIP0::134.63.71.209::inst0::INSTR
# USB -> USB0::0x05E6::0x2450::01419962::INSTR
# GPIB -> GPIB0::16::INSTR
# Serial -> ASRL4::INSTR
timeout = 20000
bucketName = time.strftime("CR123A_Discharge_%Y-%m-%d_%H-%M-%S")
myAccessKey = "ist_nQyQRT8qhhCZ3mOlVDfxsqVaX4QJLzLd"
streamer = Streamer(bucket_name=bucketName,
access_key=myAccessKey)
KEI2380 = kei2380.LOAD2380()
myID = KEI2380.Connect(rm, Inst_1, timeout, 1, 1, 1)
KEI2380.echoCmd = 0
t1 = time.time()
# set up for CC of a CR123A Lithium Battery
KEI2380.Set_DisplayMode(KEI2380.DisplayMode.TEXT)
KEI2380.Set_DisplayText(0, "Dischrg Batt in 3 ")
time.sleep(1.0)
KEI2380.Set_DisplayText(0, "Dischrg Batt in 2 ")
time.sleep(1.0)
KEI2380.Set_DisplayText(0, "Dischrg Batt in 1 ")
time.sleep(1.0)
KEI2380.Set_DisplayMode(KEI2380.DisplayMode.NORMAL)
KEI2380.Set_Function(KEI2380.Function.CC)
KEI2380.Set_Level(0.5)
KEI2380.Set_Range(1.5)
KEI2380.Set_OutputState(KEI2380.State.ON)
# delay to settle...
time.sleep(1.0)
# Now loop to capture discharge current and voltage
j = 1000
while(j > 1.0):
myCurr = float(KEI2380.Get_Current())
myVolt = float(KEI2380.Get_Voltage())
print("Current {:6.6f}; Voltage {:6.6f}".format(myCurr, myVolt))
writeToInitialState(myCurr, myVolt)
time.sleep(1.0)
j = myVolt
time.sleep(5.0)
KEI2380.Set_OutputState(KEI2380.State.OFF)
#KEI2380.SetMeasure_FilterState(DMM6500.DmmState.ON)
#time.sleep(1.0)
KEI2380.Disconnect()
rm.close()
t2 = time.time()
# Notify the user of completion and the test time achieved.
print("done")
print("{0:.6f} s".format(t2-t1))
input("Press Enter to continue...")
exit()
| 28.460526 | 79 | 0.701803 |
4a240a926008dfad2dd7c6f094782e0e348081ca | 797 | py | Python | trigger_job.py | pourabkarchaudhuri/tif2obj-convertor | b910e59c4dc8c0dad88ef74cbe9139a91d3c5dd8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2021-10-01T09:24:42.000Z | 2021-10-01T09:24:42.000Z | trigger_job.py | pourabkarchaudhuri/tif2obj-convertor | b910e59c4dc8c0dad88ef74cbe9139a91d3c5dd8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2021-06-08T20:41:57.000Z | 2022-03-12T00:07:49.000Z | trigger_job.py | pourabkarchaudhuri/tif2obj-convertor | b910e59c4dc8c0dad88ef74cbe9139a91d3c5dd8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | import argparse
import convert_tif2stl
import convert_stl2obj
import os
def execute_job(path):
OUTPUT_EXPORT_PATH = os.path.join(os.getcwd(), 'output')
if not os.path.exists(OUTPUT_EXPORT_PATH):
os.makedirs(OUTPUT_EXPORT_PATH)
# print("Input at : {}".format(path))
STL_PATH = convert_tif2stl.convert(path)
OBJ_PATH = convert_stl2obj.convert(STL_PATH)
print("Final .OBJ filename : {}".format(OBJ_PATH))
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Covert TIF sequence to OBJ Mesh')
parser.add_argument('--path', required=True)
args = parser.parse_args()
if args.path == "":
print("Blank")
else:
print(args.path)
execute_job(args.path)
print("Complete") | 23.441176 | 83 | 0.662484 |
4a240b993bf686ce75c84014dafb1e0f821ae00a | 2,752 | py | Python | online_judges/nim/nim_challenge.py | stephank007/python_challenges | dfd8d18c03a06735f6e4e02b0660007fe2d02f07 | [
"Apache-2.0"
] | null | null | null | online_judges/nim/nim_challenge.py | stephank007/python_challenges | dfd8d18c03a06735f6e4e02b0660007fe2d02f07 | [
"Apache-2.0"
] | null | null | null | online_judges/nim/nim_challenge.py | stephank007/python_challenges | dfd8d18c03a06735f6e4e02b0660007fe2d02f07 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Challenge Notebook
# ## Problem: Determine whether you can win the Nim game given the remaining stones.
#
# See the [LeetCode](https://leetcode.com/problems/nim-game/) problem page.
#
# You are playing the following Nim Game with your friend: There is a heap of stones on the table, each time one of you take turns to remove 1 to 3 stones. The one who removes the last stone will be the winner. You will take the first turn to remove the stones.
#
# Both of you are very clever and have optimal strategies for the game. Write a function to determine whether you can win the game given the number of stones in the heap.
#
# For example, if there are 4 stones in the heap, then you will never win the game: no matter 1, 2, or 3 stones you remove, the last stone will always be removed by your friend.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# * [Solution Notebook](#Solution-Notebook)
# ## Constraints
#
# * Is the input an int?
# * Yes
# * Is the output a boolean?
# * Yes
# * Can we assume the inputs are valid?
# * No
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# * None -> TypeError
# * 1, 2, or 3 -> True
# * 4 -> False
# * 7 -> True
# * 40 -> False
# ## Algorithm
#
# Refer to the [Solution Notebook](). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
# ## Code
# In[ ]:
class Solution(object):
def can_win_nim(self, num_stones_left):
# TODO: Implement me
pass
# ## Unit Test
# **The following unit test is expected to fail until you solve the challenge.**
# In[ ]:
# %load test_can_win_nim.py
import unittest
class TestSolution(unittest.TestCase):
def test_can_win_nim(self):
solution = Solution()
self.assertRaises(TypeError, solution.can_win_nim, None)
self.assertEqual(solution.can_win_nim(1), True)
self.assertEqual(solution.can_win_nim(2), True)
self.assertEqual(solution.can_win_nim(3), True)
self.assertEqual(solution.can_win_nim(4), False)
self.assertEqual(solution.can_win_nim(7), True)
self.assertEqual(solution.can_win_nim(40), False)
print('Success: test_can_win_nim')
def main():
test = TestSolution()
test.test_can_win_nim()
if __name__ == '__main__':
main()
# ## Solution Notebook
#
# Review the [Solution Notebook]() for a discussion on algorithms and code solutions.
| 28.371134 | 261 | 0.686047 |
4a240b9f81e15d700478c59fc70a149b79717f1f | 2,898 | py | Python | tickets/views.py | CarlesLopezMagem/tickets-django-api | 3b1c7668ab467cd42781f83f7656b59c24753b74 | [
"MIT"
] | null | null | null | tickets/views.py | CarlesLopezMagem/tickets-django-api | 3b1c7668ab467cd42781f83f7656b59c24753b74 | [
"MIT"
] | null | null | null | tickets/views.py | CarlesLopezMagem/tickets-django-api | 3b1c7668ab467cd42781f83f7656b59c24753b74 | [
"MIT"
] | null | null | null | from rest_framework import viewsets, mixins
from . import serializers, models, permissions
from django.contrib.auth.models import User
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import status
from django.db.models import F, Func
from django.db.models import Q
class TicketsViewSet(viewsets.ModelViewSet):
queryset = models.Ticket.objects.all()
serializer_class = serializers.TicketSerializer
permission_classes = [permissions.IsOwnerOrReadOnly]
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.validated_data['owner'] = request.user
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def get_queryset(self):
"""
Optionally restricts the returned purchases to a given user,
by filtering against a `username` query parameter in the URL.
"""
queryset = models.Ticket.objects.all()
order_by = ['-published']
zip_code = self.request.query_params.get('zip', None)
if zip_code is not None:
queryset = queryset.annotate(
score=(
Func(
(F('zip_code') - zip_code),
function='ABS'
)
))
order_by = ['score', *order_by]
iam_owner = self.request.query_params.get('iamOwner', False)
if iam_owner:
queryset = queryset.filter(owner=self.request.user)
return queryset.order_by(*order_by)
class DialogViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = models.Dialog.objects.all()
serializer_class = serializers.DialogSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return models.Dialog.objects.filter(Q(ticket__owner=self.request.user) | Q(voluntary=self.request.user))
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.validated_data['ticket'] = models.Ticket.objects.get(
pk=request.data['ticket']['id'])
serializer.validated_data['voluntary'] = request.user
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = serializers.UserSerializer
| 40.25 | 112 | 0.676329 |
4a240bf438ef2a8adba4257e0dd764e5804c062d | 5,176 | py | Python | ros/src/waypoint_updater/waypoint_updater.py | craftGhost/CarND-Capstone | a3571d3461585fddf1e3e3704bf1225d574526b6 | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | craftGhost/CarND-Capstone | a3571d3461585fddf1e3e3704bf1225d574526b6 | [
"MIT"
] | 7 | 2020-01-28T23:04:52.000Z | 2022-02-10T00:22:31.000Z | ros/src/waypoint_updater/waypoint_updater.py | craftGhost/CarND-Capstone | a3571d3461585fddf1e3e3704bf1225d574526b6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
from std_msgs.msg import Int32
import numpy as np
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 50 # Number of waypoints we will publish. You can change this number
MAX_DECEL = 0.5
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.stopline_wp_idx = -1
self.loop()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose and self.waypoint_tree:
closest_waypoint_idx = self.get_closest_waypoint_idx()
self.publish_waypoints(closest_waypoint_idx)
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
# find closest waypoint index
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
# check if the closest waypoint is ahead or behind the car
closest_pt = self.waypoints_2d[closest_idx]
pre_pt = self.waypoints_2d[closest_idx - 1]
cl_vect = np.array(closest_pt)
pre_vect = np.array(pre_pt)
pos_vect = np.array([x, y])
val = np.dot(cl_vect-pre_vect, pos_vect-cl_vect)
if val > 0:
closest_idx = (closest_idx+1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self, closest_idx):
lane = Lane()
lane.header = self.base_waypoints.header
farthest_idx = closest_idx + LOOKAHEAD_WPS
lane.waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx]
if self.stopline_wp_idx == -1 or self.stopline_wp_idx >= farthest_idx:
self.final_waypoints_pub.publish(lane)
else:
lane.waypoints = self.decelerate_waypoints(lane.waypoints, closest_idx)
self.final_waypoints_pub.publish(lane)
def decelerate_waypoints(self, waypoints, closest_idx):
temp = []
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(self.stopline_wp_idx-closest_idx-2, 0)
dist = self.distance(waypoints, i, stop_idx)
vel = (2*MAX_DECEL*dist)**0.5
if vel < 1.0:
vel = 0.0
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
return temp
def pose_cb(self, msg):
# TODO: Implement
self.pose = msg
def waypoints_cb(self, waypoints):
# TODO: Implement
self.base_waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
self.stopline_wp_idx = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 35.210884 | 132 | 0.647025 |
4a240c805d7b6537f469b4397977406ea2dbe37c | 209 | py | Python | Tan_ShinYi/Assignments/flaskolympics/olympics2/server.py | webguru001/Python-Django-Web | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | [
"MIT"
] | 5 | 2019-05-17T01:30:02.000Z | 2021-06-17T21:02:58.000Z | Tan_ShinYi/Assignments/flaskolympics/olympics2/server.py | curest0x1021/Python-Django-Web | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | [
"MIT"
] | null | null | null | Tan_ShinYi/Assignments/flaskolympics/olympics2/server.py | curest0x1021/Python-Django-Web | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def myfirstfunction():
return render_template('index.html', name="Mike")
if __name__ == '__main__':
app.run(debug = True)
| 20.9 | 53 | 0.708134 |
4a240cb6a5e23f5e760e7a411762c925262d62fa | 77,631 | py | Python | sandbox/rocky/tf/core/layers.py | wjh720/EMI | 01570087003ee83288069e520d1c2a13c8a0b79e | [
"MIT"
] | 36 | 2019-05-15T03:06:57.000Z | 2022-02-23T07:09:42.000Z | sandbox/rocky/tf/core/layers.py | jaekyeom/EMI-1 | 83ea5ee0035e6c918c956a3931102c1447370f73 | [
"MIT"
] | null | null | null | sandbox/rocky/tf/core/layers.py | jaekyeom/EMI-1 | 83ea5ee0035e6c918c956a3931102c1447370f73 | [
"MIT"
] | 12 | 2019-07-20T09:34:17.000Z | 2021-05-23T20:49:15.000Z | # -*- coding: utf-8 -*-
import functools
import numpy as np
import math
import tensorflow as tf
from tensorflow.python.training import moving_averages
from collections import OrderedDict
from collections import deque
from itertools import chain
from inspect import getargspec
from difflib import get_close_matches
from warnings import warn
class G(object):
pass
G._n_layers = 0
def create_param(spec, shape, name, trainable=True, regularizable=True):
if not hasattr(spec, '__call__'):
assert isinstance(spec, (tf.Tensor, tf.Variable))
return spec
assert hasattr(spec, '__call__')
if regularizable:
# use the default regularizer
regularizer = None
else:
# do not regularize this variable
regularizer = lambda _: tf.constant(0.)
return tf.get_variable(
name=name, shape=shape, initializer=spec, trainable=trainable,
regularizer=regularizer, dtype=tf.float32
)
def as_tuple(x, N, t=None):
try:
X = tuple(x)
except TypeError:
X = (x,) * N
if (t is not None) and not all(isinstance(v, t) for v in X):
raise TypeError("expected a single value or an iterable "
"of {0}, got {1} instead".format(t.__name__, x))
if len(X) != N:
raise ValueError("expected a single value or an iterable "
"with length {0}, got {1} instead".format(N, x))
return X
def conv_output_length(input_length, filter_size, stride, pad=0):
"""Helper function to compute the output size of a convolution operation
This function computes the length along a single axis, which corresponds
to a 1D convolution. It can also be used for convolutions with higher
dimensionalities by using it individually for each axis.
Parameters
----------
input_length : int or None
The size of the input.
filter_size : int
The size of the filter.
stride : int
The stride of the convolution operation.
pad : int, 'full' or 'same' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
both borders.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size on both sides (one less on
the second side for an even filter size). When ``stride=1``, this
results in an output size equal to the input size.
Returns
-------
int or None
The output size corresponding to the given convolution parameters, or
``None`` if `input_size` is ``None``.
Raises
------
ValueError
When an invalid padding is specified, a `ValueError` is raised.
"""
if input_length is None:
return None
if pad == 'valid':
output_length = input_length - filter_size + 1
elif pad == 'full':
output_length = input_length + filter_size - 1
elif pad == 'same':
output_length = input_length
elif isinstance(pad, int):
output_length = input_length + 2 * pad - filter_size + 1
else:
raise ValueError('Invalid pad: {0}'.format(pad))
# This is the integer arithmetic equivalent to
# np.ceil(output_length / stride)
output_length = (output_length + stride - 1) // stride
return output_length
class Layer(object):
def __init__(self, incoming, name=None, variable_reuse=None, weight_normalization=False, **kwargs):
if isinstance(incoming, tuple):
self.input_shape = incoming
self.input_layer = None
else:
self.input_shape = incoming.output_shape
self.input_layer = incoming
self.params = OrderedDict()
self.weight_normalization = weight_normalization
if name is None:
name = "%s_%d" % (type(self).__name__, G._n_layers)
G._n_layers += 1
self.name = name
self.variable_reuse = variable_reuse
self.get_output_kwargs = []
if any(d is not None and d <= 0 for d in self.input_shape):
raise ValueError((
"Cannot create Layer with a non-positive input_shape "
"dimension. input_shape=%r, self.name=%r") % (
self.input_shape, self.name))
@property
def output_shape(self):
shape = self.get_output_shape_for(self.input_shape)
if any(isinstance(s, (tf.Variable, tf.Tensor)) for s in shape):
raise ValueError("%s returned a symbolic output shape from its "
"get_output_shape_for() method: %r. This is not "
"allowed; shapes must be tuples of integers for "
"fixed-size dimensions and Nones for variable "
"dimensions." % (self.__class__.__name__, shape))
return shape
def get_output_shape_for(self, input_shape):
raise NotImplementedError
def get_output_for(self, input, **kwargs):
raise NotImplementedError
def add_param_plain(self, spec, shape, name, **tags):
with tf.variable_scope(self.name, reuse=self.variable_reuse):
tags['trainable'] = tags.get('trainable', True)
tags['regularizable'] = tags.get('regularizable', True)
param = create_param(spec, shape, name, **tags)
self.params[param] = set(tag for tag, value in list(tags.items()) if value)
return param
def add_param(self, spec, shape, name, **kwargs):
param = self.add_param_plain(spec, shape, name, **kwargs)
if name is not None and name.startswith("W") and self.weight_normalization:
# Hacky: check if the parameter is a weight matrix. If so, apply weight normalization
if len(param.get_shape()) == 2:
v = param
g = self.add_param_plain(tf.ones_initializer(), (shape[1],), name=name + "_wn/g")
param = v * (tf.reshape(g, (1, -1)) / tf.sqrt(tf.reduce_sum(tf.square(v), 0, keep_dims=True)))
elif len(param.get_shape()) == 4:
v = param
g = self.add_param_plain(tf.ones_initializer(), (shape[3],), name=name + "_wn/g")
param = v * (tf.reshape(g, (1, 1, 1, -1)) / tf.sqrt(tf.reduce_sum(tf.square(v), [0, 1, 2],
keep_dims=True)))
else:
raise NotImplementedError
return param
def get_params(self, **tags):
result = list(self.params.keys())
only = set(tag for tag, value in list(tags.items()) if value)
if only:
# retain all parameters that have all of the tags in `only`
result = [param for param in result
if not (only - self.params[param])]
exclude = set(tag for tag, value in list(tags.items()) if not value)
if exclude:
# retain all parameters that have none of the tags in `exclude`
result = [param for param in result
if not (self.params[param] & exclude)]
return result
class InputLayer(Layer):
def __init__(self, shape, input_var=None, **kwargs):
super(InputLayer, self).__init__(shape, **kwargs)
self.shape = shape
if input_var is None:
if self.name is not None:
with tf.variable_scope(self.name):
input_var = tf.placeholder(tf.float32, shape=shape, name="input")
else:
input_var = tf.placeholder(tf.float32, shape=shape, name="input")
self.input_var = input_var
@Layer.output_shape.getter
def output_shape(self):
return self.shape
class MergeLayer(Layer):
def __init__(self, incomings, name=None, **kwargs):
self.input_shapes = [incoming if isinstance(incoming, tuple)
else incoming.output_shape
for incoming in incomings]
self.input_layers = [None if isinstance(incoming, tuple)
else incoming
for incoming in incomings]
self.name = name
self.params = OrderedDict()
self.get_output_kwargs = []
@Layer.output_shape.getter
def output_shape(self):
shape = self.get_output_shape_for(self.input_shapes)
if any(isinstance(s, (tf.Variable, tf.Tensor)) for s in shape):
raise ValueError("%s returned a symbolic output shape from its "
"get_output_shape_for() method: %r. This is not "
"allowed; shapes must be tuples of integers for "
"fixed-size dimensions and Nones for variable "
"dimensions." % (self.__class__.__name__, shape))
return shape
def get_output_shape_for(self, input_shapes):
raise NotImplementedError
def get_output_for(self, inputs, **kwargs):
raise NotImplementedError
class ConcatLayer(MergeLayer):
"""
Concatenates multiple inputs along the specified axis. Inputs should have
the same shape except for the dimension specified in axis, which can have
different sizes.
Parameters
-----------
incomings : a list of :class:`Layer` instances or tuples
The layers feeding into this layer, or expected input shapes
axis : int
Axis which inputs are joined over
"""
def __init__(self, incomings, axis=1, **kwargs):
super(ConcatLayer, self).__init__(incomings, **kwargs)
self.axis = axis
def get_output_shape_for(self, input_shapes):
# Infer the output shape by grabbing, for each axis, the first
# input size that is not `None` (if there is any)
output_shape = [next((s for s in sizes if s is not None), None)
for sizes in zip(*input_shapes)]
def match(shape1, shape2):
return (len(shape1) == len(shape2) and
all(i == self.axis or s1 is None or s2 is None or s1 == s2
for i, (s1, s2) in enumerate(zip(shape1, shape2))))
# Check for compatibility with inferred output shape
if not all(match(shape, output_shape) for shape in input_shapes):
raise ValueError("Mismatch: input shapes must be the same except "
"in the concatenation axis")
# Infer output shape on concatenation axis and return
sizes = [input_shape[self.axis] for input_shape in input_shapes]
concat_size = None if any(s is None for s in sizes) else sum(sizes)
output_shape[self.axis] = concat_size
return tuple(output_shape)
def get_output_for(self, inputs, **kwargs):
dtypes = [x.dtype.as_numpy_dtype for x in inputs]
if len(set(dtypes)) > 1:
# need to convert to common data type
common_dtype = np.core.numerictypes.find_common_type([], dtypes)
inputs = [tf.cast(x, common_dtype) for x in inputs]
return tf.concat(axis=self.axis, values=inputs)
concat = ConcatLayer # shortcut
class XavierUniformInitializer(object):
def __call__(self, shape, dtype=tf.float32, *args, **kwargs):
if len(shape) == 2:
n_inputs, n_outputs = shape
else:
receptive_field_size = np.prod(shape[:2])
n_inputs = shape[-2] * receptive_field_size
n_outputs = shape[-1] * receptive_field_size
init_range = math.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range, dtype=dtype)(shape)
class HeUniformInitializer(object):
def __call__(self, shape, dtype=tf.float32, *args, **kwargs):
if len(shape) == 2:
n_inputs, _ = shape
else:
receptive_field_size = np.prod(shape[:2])
n_inputs = shape[-2] * receptive_field_size
init_range = math.sqrt(1.0 / n_inputs)
return tf.random_uniform_initializer(-init_range, init_range, dtype=dtype)(shape)
def py_ortho_init(scale):
def _init(shape):
u, s, v = np.linalg.svd(np.random.uniform(size=shape))
return np.cast['float32'](u * scale)
return _init
class OrthogonalInitializer(object):
def __init__(self, scale=1.1):
self.scale = scale
def __call__(self, shape, dtype=tf.float32, *args, **kwargs):
result, = tf.py_func(py_ortho_init(self.scale), [shape], [tf.float32])
result.set_shape(shape)
return result
class ParamLayer(Layer):
def __init__(self, incoming, num_units, param=tf.zeros_initializer(),
trainable=True, **kwargs):
super(ParamLayer, self).__init__(incoming, **kwargs)
self.num_units = num_units
self.param = self.add_param(
param,
(num_units,),
name="param",
trainable=trainable
)
def get_output_shape_for(self, input_shape):
return input_shape[:-1] + (self.num_units,)
def get_output_for(self, input, **kwargs):
ndim = input.get_shape().ndims
reshaped_param = tf.reshape(self.param, (1,) * (ndim - 1) + (self.num_units,))
tile_arg = tf.concat(axis=0, values=[tf.shape(input)[:ndim - 1], [1]])
tiled = tf.tile(reshaped_param, tile_arg)
return tiled
class OpLayer(MergeLayer):
def __init__(self, incoming, op,
shape_op=lambda x: x, extras=None, **kwargs):
if extras is None:
extras = []
incomings = [incoming] + extras
super(OpLayer, self).__init__(incomings, **kwargs)
self.op = op
self.shape_op = shape_op
self.incomings = incomings
def get_output_shape_for(self, input_shapes):
return self.shape_op(*input_shapes)
def get_output_for(self, inputs, **kwargs):
return self.op(*inputs)
class DenseLayer(Layer):
def __init__(self, incoming, num_units, nonlinearity=None, W=XavierUniformInitializer(), b=tf.zeros_initializer(),
**kwargs):
super(DenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = tf.identity if nonlinearity is None else nonlinearity
self.num_units = num_units
num_inputs = int(np.prod(self.input_shape[1:]))
self.W = self.add_param(W, (num_inputs, num_units), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_units,), name="b", regularizable=False)
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.num_units)
def get_output_for(self, input, **kwargs):
# It is None for session tensors.
if input.get_shape().ndims is not None and input.get_shape().ndims > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input = tf.reshape(input, tf.stack([tf.shape(input)[0], -1]))
activation = tf.matmul(input, self.W)
if self.b is not None:
activation = activation + tf.expand_dims(self.b, 0)
return self.nonlinearity(activation)
class BaseConvLayer(Layer):
def __init__(self, incoming, num_filters, filter_size, stride=1, pad="VALID",
untie_biases=False,
W=XavierUniformInitializer(), b=tf.zeros_initializer(),
nonlinearity=tf.nn.relu, n=None, **kwargs):
"""
Input is assumed to be of shape batch*height*width*channels
"""
super(BaseConvLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = tf.identity
else:
self.nonlinearity = nonlinearity
if n is None:
n = len(self.input_shape) - 2
elif n != len(self.input_shape) - 2:
raise ValueError("Tried to create a %dD convolution layer with "
"input shape %r. Expected %d input dimensions "
"(batchsize, channels, %d spatial dimensions)." %
(n, self.input_shape, n + 2, n))
self.n = n
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, n, int)
self.stride = as_tuple(stride, n, int)
self.untie_biases = untie_biases
self.pad = pad
if pad == 'SAME':
if any(s % 2 == 0 for s in self.filter_size):
raise NotImplementedError(
'`same` padding requires odd filter size.')
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
if self.untie_biases:
biases_shape = self.output_shape[1:3] + (num_filters,) # + self.output_shape[2:]
else:
biases_shape = (num_filters,)
self.b = self.add_param(b, biases_shape, name="b",
regularizable=False)
def get_W_shape(self):
"""Get the shape of the weight matrix `W`.
Returns
-------
tuple of int
The shape of the weight matrix.
"""
num_input_channels = self.input_shape[-1]
return self.filter_size + (num_input_channels, self.num_filters)
def get_output_shape_for(self, input_shape):
if self.pad == 'SAME':
pad = ('same',) * self.n
elif self.pad == 'VALID':
pad = (0,) * self.n
elif self.pad == 'FULL':
pad = ('full',) * self.n
else:
import ipdb;
ipdb.set_trace()
raise NotImplementedError
# pad = self.pad if isinstance(self.pad, tuple) else (self.pad,) * self.n
batchsize = input_shape[0]
return ((batchsize,) +
tuple(conv_output_length(input, filter, stride, p)
for input, filter, stride, p
in zip(input_shape[1:3], self.filter_size,
self.stride, pad))) + (self.num_filters,)
def get_output_for(self, input, **kwargs):
conved = self.convolve(input, **kwargs)
if self.b is None:
activation = conved
elif self.untie_biases:
# raise NotImplementedError
activation = conved + tf.expand_dims(self.b, 0)
else:
activation = conved + tf.reshape(self.b, (1, 1, 1, self.num_filters))
return self.nonlinearity(activation)
def convolve(self, input, **kwargs):
"""
Symbolically convolves `input` with ``self.W``, producing an output of
shape ``self.output_shape``. To be implemented by subclasses.
Parameters
----------
input : Theano tensor
The input minibatch to convolve
**kwargs
Any additional keyword arguments from :meth:`get_output_for`
Returns
-------
Theano tensor
`input` convolved according to the configuration of this layer,
without any bias or nonlinearity applied.
"""
raise NotImplementedError("BaseConvLayer does not implement the "
"convolve() method. You will want to "
"use a subclass such as Conv2DLayer.")
class Conv2DLayer(BaseConvLayer):
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
pad="VALID", untie_biases=False,
W=XavierUniformInitializer(), b=tf.zeros_initializer(),
nonlinearity=tf.nn.relu,
convolution=tf.nn.conv2d, **kwargs):
super(Conv2DLayer, self).__init__(incoming=incoming, num_filters=num_filters, filter_size=filter_size,
stride=stride, pad=pad, untie_biases=untie_biases, W=W, b=b,
nonlinearity=nonlinearity, n=2, **kwargs)
self.convolution = convolution
def convolve(self, input, **kwargs):
padding = self.pad
if padding == 'FULL':
size_to_pad = [
[0, 0], # N
[self.filter_size[0]-1, self.filter_size[0]-1], # H
[self.filter_size[1]-1, self.filter_size[1]-1], # W
[0, 0], # C
]
input = tf.pad(
input,
tf.constant(size_to_pad),
mode='CONSTANT',
)
padding = 'VALID'
conved = self.convolution(input, self.W, strides=(1,) + self.stride + (1,), padding=padding)
return conved
def pool_output_length(input_length, pool_size, stride, pad):
if input_length is None or pool_size is None:
return None
if pad == "SAME":
return int(np.ceil(float(input_length) / float(stride)))
return int(np.ceil(float(input_length - pool_size + 1) / float(stride)))
class Pool2DLayer(Layer):
def __init__(self, incoming, pool_size, stride=None, pad="VALID", mode='max', **kwargs):
super(Pool2DLayer, self).__init__(incoming, **kwargs)
self.pool_size = as_tuple(pool_size, 2)
if len(self.input_shape) != 4:
raise ValueError("Tried to create a 2D pooling layer with "
"input shape %r. Expected 4 input dimensions "
"(batchsize, 2 spatial dimensions, channels)."
% (self.input_shape,))
if stride is None:
self.stride = self.pool_size
else:
self.stride = as_tuple(stride, 2)
self.pad = pad
self.mode = mode
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape) # copy / convert to mutable list
output_shape[1] = pool_output_length(input_shape[1],
pool_size=self.pool_size[0],
stride=self.stride[0],
pad=self.pad,
)
output_shape[2] = pool_output_length(input_shape[2],
pool_size=self.pool_size[1],
stride=self.stride[1],
pad=self.pad,
)
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
assert self.mode == "max"
pooled = tf.nn.max_pool(
input,
ksize=(1,) + self.pool_size + (1,),
strides=(1,) + self.stride + (1,),
padding=self.pad,
)
return pooled
def spatial_expected_softmax(x, temp=1):
assert len(x.get_shape()) == 4
vals = []
for dim in [0, 1]:
dim_val = x.get_shape()[dim + 1].value
lin = tf.linspace(-1.0, 1.0, dim_val)
lin = tf.expand_dims(lin, 1 - dim)
lin = tf.expand_dims(lin, 0)
lin = tf.expand_dims(lin, 3)
m = tf.reduce_max(x, [1, 2], keep_dims=True)
e = tf.exp((x - m) / temp) + 1e-5
val = tf.reduce_sum(e * lin, [1, 2]) / (tf.reduce_sum(e, [1, 2]))
vals.append(tf.expand_dims(val, 2))
return tf.reshape(tf.concat(axis=2, values=vals), [-1, x.get_shape()[-1].value * 2])
class SpatialExpectedSoftmaxLayer(Layer):
"""
Computes the softmax across a spatial region, separately for each channel, followed by an expectation operation.
"""
def __init__(self, incoming, **kwargs):
super().__init__(incoming, **kwargs)
# self.temp = self.add_param(tf.ones_initializer, shape=(), name="temperature")
def get_output_shape_for(self, input_shape):
return (input_shape[0], input_shape[-1] * 2)
def get_output_for(self, input, **kwargs):
return spatial_expected_softmax(input)#, self.temp)
# max_ = tf.reduce_max(input, reduction_indices=[1, 2], keep_dims=True)
# exp = tf.exp(input - max_) + 1e-5
# vals = []
#
# for dim in [0, 1]:
# dim_val = input.get_shape()[dim + 1].value
# lin = tf.linspace(-1.0, 1.0, dim_val)
# lin = tf.expand_dims(lin, 1 - dim)
# lin = tf.expand_dims(lin, 0)
# lin = tf.expand_dims(lin, 3)
# m = tf.reduce_max(input, [1, 2], keep_dims=True)
# e = tf.exp(input - m) + 1e-5
# val = tf.reduce_sum(e * lin, [1, 2]) / (tf.reduce_sum(e, [1, 2]))
# vals.append(tf.expand_dims(val, 2))
#
# return tf.reshape(tf.concat(2, vals), [-1, input.get_shape()[-1].value * 2])
# import ipdb; ipdb.set_trace()
# input.get_shape()
# exp / tf.reduce_sum(exp, reduction_indices=[1, 2], keep_dims=True)
# import ipdb;
# ipdb.set_trace()
# spatial softmax?
# for dim in range(2):
# val = obs.get_shape()[dim + 1].value
# lin = tf.linspace(-1.0, 1.0, val)
# lin = tf.expand_dims(lin, 1 - dim)
# lin = tf.expand_dims(lin, 0)
# lin = tf.expand_dims(lin, 3)
# m = tf.reduce_max(e, [1, 2], keep_dims=True)
# e = tf.exp(e - m) + 1e-3
# val = tf.reduce_sum(e * lin, [1, 2]) / (tf.reduce_sum(e, [1, 2]))
class DropoutLayer(Layer):
def __init__(self, incoming, p=0.5, rescale=True, **kwargs):
super(DropoutLayer, self).__init__(incoming, **kwargs)
self.p = p
self.rescale = rescale
def get_output_for(self, input, deterministic=False, **kwargs):
"""
Parameters
----------
input : tensor
output from the previous layer
deterministic : bool
If true dropout and scaling is disabled, see notes
"""
if deterministic or self.p == 0:
return input
else:
# Using theano constant to prevent upcasting
# one = T.constant(1)
retain_prob = 1. - self.p
if self.rescale:
input /= retain_prob
# use nonsymbolic shape for dropout mask if possible
return tf.nn.dropout(input, keep_prob=retain_prob)
def get_output_shape_for(self, input_shape):
return input_shape
# TODO: add Conv3DLayer
class FlattenLayer(Layer):
"""
A layer that flattens its input. The leading ``outdim-1`` dimensions of
the output will have the same shape as the input. The remaining dimensions
are collapsed into the last dimension.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape.
outdim : int
The number of dimensions in the output.
See Also
--------
flatten : Shortcut
"""
def __init__(self, incoming, outdim=2, **kwargs):
super(FlattenLayer, self).__init__(incoming, **kwargs)
self.outdim = outdim
if outdim < 1:
raise ValueError('Dim must be >0, was %i', outdim)
def get_output_shape_for(self, input_shape):
to_flatten = input_shape[self.outdim - 1:]
if any(s is None for s in to_flatten):
flattened = None
else:
flattened = int(np.prod(to_flatten))
return input_shape[:self.outdim - 1] + (flattened,)
def get_output_for(self, input, **kwargs):
# total_entries = tf.reduce_prod(tf.shape(input))
pre_shape = tf.shape(input)[:self.outdim - 1]
to_flatten = tf.reduce_prod(tf.shape(input)[self.outdim - 1:])
return tf.reshape(input, tf.concat(axis=0, values=[pre_shape, tf.stack([to_flatten])]))
flatten = FlattenLayer # shortcut
class ReshapeLayer(Layer):
def __init__(self, incoming, shape, **kwargs):
super(ReshapeLayer, self).__init__(incoming, **kwargs)
shape = tuple(shape)
for s in shape:
if isinstance(s, int):
if s == 0 or s < - 1:
raise ValueError("`shape` integers must be positive or -1")
elif isinstance(s, list):
if len(s) != 1 or not isinstance(s[0], int) or s[0] < 0:
raise ValueError("`shape` input references must be "
"single-element lists of int >= 0")
elif isinstance(s, (tf.Tensor, tf.Variable)): # T.TensorVariable):
raise NotImplementedError
# if s.ndim != 0:
# raise ValueError(
# "A symbolic variable in a shape specification must be "
# "a scalar, but had %i dimensions" % s.ndim)
else:
raise ValueError("`shape` must be a tuple of int and/or [int]")
if sum(s == -1 for s in shape) > 1:
raise ValueError("`shape` cannot contain multiple -1")
self.shape = shape
# try computing the output shape once as a sanity check
self.get_output_shape_for(self.input_shape)
def get_output_shape_for(self, input_shape, **kwargs):
# Initialize output shape from shape specification
output_shape = list(self.shape)
# First, replace all `[i]` with the corresponding input dimension, and
# mask parts of the shapes thus becoming irrelevant for -1 inference
masked_input_shape = list(input_shape)
masked_output_shape = list(output_shape)
for dim, o in enumerate(output_shape):
if isinstance(o, list):
if o[0] >= len(input_shape):
raise ValueError("specification contains [%d], but input "
"shape has %d dimensions only" %
(o[0], len(input_shape)))
output_shape[dim] = input_shape[o[0]]
masked_output_shape[dim] = input_shape[o[0]]
if (input_shape[o[0]] is None) \
and (masked_input_shape[o[0]] is None):
# first time we copied this unknown input size: mask
# it, we have a 1:1 correspondence between out[dim] and
# in[o[0]] and can ignore it for -1 inference even if
# it is unknown.
masked_input_shape[o[0]] = 1
masked_output_shape[dim] = 1
# Secondly, replace all symbolic shapes with `None`, as we cannot
# infer their size here.
for dim, o in enumerate(output_shape):
if isinstance(o, (tf.Tensor, tf.Variable)): # T.TensorVariable):
raise NotImplementedError
# output_shape[dim] = None
# masked_output_shape[dim] = None
# From the shapes, compute the sizes of the input and output tensor
input_size = (None if any(x is None for x in masked_input_shape)
else np.prod(masked_input_shape))
output_size = (None if any(x is None for x in masked_output_shape)
else np.prod(masked_output_shape))
del masked_input_shape, masked_output_shape
# Finally, infer value for -1 if needed
if -1 in output_shape:
dim = output_shape.index(-1)
if (input_size is None) or (output_size is None):
output_shape[dim] = None
output_size = None
else:
output_size *= -1
output_shape[dim] = input_size // output_size
output_size *= output_shape[dim]
# Sanity check
if (input_size is not None) and (output_size is not None) \
and (input_size != output_size):
raise ValueError("%s cannot be reshaped to specification %s. "
"The total size mismatches." %
(input_shape, self.shape))
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
# Replace all `[i]` with the corresponding input dimension
output_shape = list(self.shape)
for dim, o in enumerate(output_shape):
if isinstance(o, list):
output_shape[dim] = tf.shape(input)[o[0]]
# Everything else is handled by Theano
return tf.reshape(input, tf.stack(output_shape))
reshape = ReshapeLayer # shortcut
class SliceLayer(Layer):
def __init__(self, incoming, indices, axis=-1, **kwargs):
super(SliceLayer, self).__init__(incoming, **kwargs)
self.slice = indices
self.axis = axis
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
if isinstance(self.slice, int):
del output_shape[self.axis]
elif input_shape[self.axis] is not None:
output_shape[self.axis] = len(
list(range(*self.slice.indices(input_shape[self.axis]))))
else:
output_shape[self.axis] = None
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
axis = self.axis
ndims = input.get_shape().ndims
if axis < 0:
axis += ndims
if isinstance(self.slice, int) and self.slice < 0:
return tf.reverse(input, [self.axis + 1])[
(slice(None),) * axis + (-1 - self.slice,) + (slice(None),) * (ndims - axis - 1)
]
# import ipdb; ipdb.set_trace()
return input[(slice(None),) * axis + (self.slice,) + (slice(None),) * (ndims - axis - 1)]
class DimshuffleLayer(Layer):
def __init__(self, incoming, pattern, **kwargs):
super(DimshuffleLayer, self).__init__(incoming, **kwargs)
# Sanity check the pattern
used_dims = set()
for p in pattern:
if isinstance(p, int):
# Dimension p
if p in used_dims:
raise ValueError("pattern contains dimension {0} more "
"than once".format(p))
used_dims.add(p)
elif p == 'x':
# Broadcast
pass
else:
raise ValueError("pattern should only contain dimension"
"indices or 'x', not {0}".format(p))
self.pattern = pattern
# try computing the output shape once as a sanity check
self.get_output_shape_for(self.input_shape)
def get_output_shape_for(self, input_shape):
# Build output shape while keeping track of the dimensions that we are
# attempting to collapse, so we can ensure that they are broadcastable
output_shape = []
dims_used = [False] * len(input_shape)
for p in self.pattern:
if isinstance(p, int):
if p < 0 or p >= len(input_shape):
raise ValueError("pattern contains {0}, but input shape "
"has {1} dimensions "
"only".format(p, len(input_shape)))
# Dimension p
o = input_shape[p]
dims_used[p] = True
elif p == 'x':
# Broadcast; will be of size 1
o = 1
output_shape.append(o)
for i, (dim_size, used) in enumerate(zip(input_shape, dims_used)):
if not used and dim_size != 1 and dim_size is not None:
raise ValueError(
"pattern attempted to collapse dimension "
"{0} of size {1}; dimensions with size != 1/None are not"
"broadcastable and cannot be "
"collapsed".format(i, dim_size))
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
return tf.transpose(input, self.pattern)
dimshuffle = DimshuffleLayer # shortcut
def apply_ln(layer):
def _normalize(x, prefix):
EPS = 1e-5
dim = x.get_shape()[-1].value
bias_name = prefix + "_ln/bias"
scale_name = prefix + "_ln/scale"
if bias_name not in layer.norm_params:
layer.norm_params[bias_name] = layer.add_param(
tf.zeros_initializer(), (dim,), name=bias_name, regularizable=False)
if scale_name not in layer.norm_params:
layer.norm_params[scale_name] = layer.add_param(
tf.ones_initializer(), (dim,), name=scale_name)
bias = layer.norm_params[bias_name]
scale = layer.norm_params[scale_name]
mean, var = tf.nn.moments(x, axes=[1], keep_dims=True)
x_normed = (x - mean) / tf.sqrt(var + EPS)
return x_normed * scale + bias
return _normalize
class GRULayer(Layer):
"""
A gated recurrent unit implements the following update mechanism:
Reset gate: r(t) = f_r(x(t) @ W_xr + h(t-1) @ W_hr + b_r)
Update gate: u(t) = f_u(x(t) @ W_xu + h(t-1) @ W_hu + b_u)
Cell gate: c(t) = f_c(x(t) @ W_xc + r(t) * (h(t-1) @ W_hc) + b_c)
New hidden state: h(t) = (1 - u(t)) * h(t-1) + u_t * c(t)
Note that the reset, update, and cell vectors must have the same dimension as the hidden state
"""
def __init__(self, incoming, num_units, hidden_nonlinearity,
gate_nonlinearity=tf.nn.sigmoid, W_x_init=XavierUniformInitializer(), W_h_init=OrthogonalInitializer(),
b_init=tf.zeros_initializer(), hidden_init=tf.zeros_initializer(), hidden_init_trainable=False,
layer_normalization=False, **kwargs):
if hidden_nonlinearity is None:
hidden_nonlinearity = tf.identity
if gate_nonlinearity is None:
gate_nonlinearity = tf.identity
super(GRULayer, self).__init__(incoming, **kwargs)
input_shape = self.input_shape[2:]
input_dim = np.prod(input_shape)
self.layer_normalization = layer_normalization
# Weights for the initial hidden state
self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
regularizable=False)
# Weights for the reset gate
self.W_xr = self.add_param(W_x_init, (input_dim, num_units), name="W_xr")
self.W_hr = self.add_param(W_h_init, (num_units, num_units), name="W_hr")
self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
# Weights for the update gate
self.W_xu = self.add_param(W_x_init, (input_dim, num_units), name="W_xu")
self.W_hu = self.add_param(W_h_init, (num_units, num_units), name="W_hu")
self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
# Weights for the cell gate
self.W_xc = self.add_param(W_x_init, (input_dim, num_units), name="W_xc")
self.W_hc = self.add_param(W_h_init, (num_units, num_units), name="W_hc")
self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
self.W_x_ruc = tf.concat(axis=1, values=[self.W_xr, self.W_xu, self.W_xc])
self.W_h_ruc = tf.concat(axis=1, values=[self.W_hr, self.W_hu, self.W_hc])
self.W_x_ru = tf.concat(axis=1, values=[self.W_xr, self.W_xu])
self.W_h_ru = tf.concat(axis=1, values=[self.W_hr, self.W_hu])
self.b_ruc = tf.concat(axis=0, values=[self.b_r, self.b_u, self.b_c])
self.gate_nonlinearity = gate_nonlinearity
self.num_units = num_units
self.nonlinearity = hidden_nonlinearity
self.norm_params = dict()
# pre-run the step method to initialize the normalization parameters
h_dummy = tf.placeholder(dtype=tf.float32, shape=(None, num_units), name="h_dummy")
x_dummy = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name="x_dummy")
self.step(h_dummy, x_dummy)
def step(self, hprev, x):
if self.layer_normalization:
ln = apply_ln(self)
x_ru = ln(tf.matmul(x, self.W_x_ru), "x_ru")
h_ru = ln(tf.matmul(hprev, self.W_h_ru), "h_ru")
x_r, x_u = tf.split(axis=1, num_or_size_splits=2, value=x_ru)
h_r, h_u = tf.split(axis=1, num_or_size_splits=2, value=h_ru)
x_c = ln(tf.matmul(x, self.W_xc), "x_c")
h_c = ln(tf.matmul(hprev, self.W_hc), "h_c")
r = self.gate_nonlinearity(x_r + h_r)
u = self.gate_nonlinearity(x_u + h_u)
c = self.nonlinearity(x_c + r * h_c)
h = (1 - u) * hprev + u * c
return h
else:
xb_ruc = tf.matmul(x, self.W_x_ruc) + tf.reshape(self.b_ruc, (1, -1))
h_ruc = tf.matmul(hprev, self.W_h_ruc)
xb_r, xb_u, xb_c = tf.split(axis=1, num_or_size_splits=3, value=xb_ruc)
h_r, h_u, h_c = tf.split(axis=1, num_or_size_splits=3, value=h_ruc)
r = self.gate_nonlinearity(xb_r + h_r)
u = self.gate_nonlinearity(xb_u + h_u)
c = self.nonlinearity(xb_c + r * h_c)
h = (1 - u) * hprev + u * c
return h
def get_step_layer(self, l_in, l_prev_hidden, name=None):
return GRUStepLayer(incomings=[l_in, l_prev_hidden], recurrent_layer=self, name=name)
def get_output_shape_for(self, input_shape):
n_batch, n_steps = input_shape[:2]
return n_batch, n_steps, self.num_units
def get_output_for(self, input, **kwargs):
input_shape = tf.shape(input)
n_batches = input_shape[0]
n_steps = input_shape[1]
input = tf.reshape(input, tf.stack([n_batches, n_steps, -1]))
if 'recurrent_state' in kwargs and self in kwargs['recurrent_state']:
h0s = kwargs['recurrent_state'][self]
else:
h0s = tf.tile(
tf.reshape(self.h0, (1, self.num_units)),
(n_batches, 1)
)
# flatten extra dimensions
shuffled_input = tf.transpose(input, (1, 0, 2))
hs = tf.scan(
self.step,
elems=shuffled_input,
initializer=h0s
)
shuffled_hs = tf.transpose(hs, (1, 0, 2))
if 'recurrent_state_output' in kwargs:
kwargs['recurrent_state_output'][self] = shuffled_hs
return shuffled_hs
class GRUStepLayer(MergeLayer):
def __init__(self, incomings, recurrent_layer, **kwargs):
super(GRUStepLayer, self).__init__(incomings, **kwargs)
self._gru_layer = recurrent_layer
def get_params(self, **tags):
return self._gru_layer.get_params(**tags)
def get_output_shape_for(self, input_shapes):
n_batch = input_shapes[0][0]
return n_batch, self._gru_layer.num_units
def get_output_for(self, inputs, **kwargs):
x, hprev = inputs
n_batch = tf.shape(x)[0]
x = tf.reshape(x, tf.stack([n_batch, -1]))
x.set_shape((None, self.input_shapes[0][1]))
return self._gru_layer.step(hprev, x)
class TfGRULayer(Layer):
"""
Use TensorFlow's built-in GRU implementation
"""
def __init__(self, incoming, num_units, hidden_nonlinearity, horizon=None, hidden_init_trainable=False,
**kwargs):
assert len(incoming.output_shape) == 3
input_dim = incoming.shape[2]
gru = tf.nn.rnn_cell.GRUCell(num_units=num_units, activation=hidden_nonlinearity)
self.num_units = num_units
self.horizon = horizon
self.gru = gru
self.hidden_nonlinearity = hidden_nonlinearity
Layer.__init__(self, incoming=incoming, **kwargs)
# dummy input variable
input_dummy = tf.placeholder(tf.float32, (None, input_dim), "input_dummy")
hidden_dummy = tf.placeholder(tf.float32, (None, num_units), "hidden_dummy")
with tf.variable_scope(self.name) as vs:
gru(input_dummy, hidden_dummy, scope=vs)
vs.reuse_variables()
self.scope = vs
all_vars = [v for v in tf.global_variables() if v.name.startswith(vs.name)]
trainable_vars = [v for v in tf.trainable_variables() if v.name.startswith(vs.name)]
for var in trainable_vars:
self.add_param(spec=var, shape=None, name=None, trainable=True)
for var in set(all_vars) - set(trainable_vars):
self.add_param(spec=var, shape=None, name=None, trainable=False)
self.h0 = self.add_param(tf.zeros_initializer(), (num_units,), name="h0", trainable=hidden_init_trainable,
regularizable=False)
def step(self, hprev, x):
return self.gru(x, hprev, scope=self.scope)[1]
def get_output_for(self, input, **kwargs):
input_shape = tf.shape(input)
n_batches = input_shape[0]
state = tf.tile(
tf.reshape(self.h0, (1, self.num_units)),
(n_batches, 1)
)
state.set_shape((None, self.num_units))
if self.horizon is not None:
outputs = []
for idx in range(self.horizon):
output, state = self.gru(input[:, idx, :], state, scope=self.scope) # self.name)
outputs.append(tf.expand_dims(output, 1))
outputs = tf.concat(axis=1, values=outputs)
return outputs
else:
n_steps = input_shape[1]
input = tf.reshape(input, tf.stack([n_batches, n_steps, -1]))
# flatten extra dimensions
shuffled_input = tf.transpose(input, (1, 0, 2))
shuffled_input.set_shape((None, None, self.input_shape[-1]))
hs = tf.scan(
self.step,
elems=shuffled_input,
initializer=state
)
shuffled_hs = tf.transpose(hs, (1, 0, 2))
return shuffled_hs
def get_output_shape_for(self, input_shape):
n_batch, n_steps = input_shape[:2]
return n_batch, n_steps, self.num_units
def get_step_layer(self, l_in, l_prev_hidden, name=None):
return GRUStepLayer(incomings=[l_in, l_prev_hidden], recurrent_layer=self, name=name)
class PseudoLSTMLayer(Layer):
"""
A Pseudo LSTM unit implements the following update mechanism:
Incoming gate: i(t) = σ(W_hi @ h(t-1)) + W_xi @ x(t) + b_i)
Forget gate: f(t) = σ(W_hf @ h(t-1)) + W_xf @ x(t) + b_f)
Out gate: o(t) = σ(W_ho @ h(t-1)) + W_xo @ x(t) + b_o)
New cell gate: c_new(t) = ϕ(W_hc @ (o(t) * h(t-1)) + W_xc @ x(t) + b_c)
Cell gate: c(t) = f(t) * c(t-1) + i(t) * c_new(t)
Hidden state: h(t) = ϕ(c(t))
Output: out = h(t)
If gate_squash_inputs is set to True, we have the following updates instead:
Out gate: o(t) = σ(W_ho @ h(t-1)) + W_xo @ x(t) + b_o)
Incoming gate: i(t) = σ(W_hi @ (o(t) * h(t-1)) + W_xi @ x(t) + b_i)
Forget gate: f(t) = σ(W_hf @ (o(t) * h(t-1)) + W_xf @ x(t) + b_f)
New cell gate: c_new(t) = ϕ(W_hc @ (o(t) * h(t-1)) + W_xc @ x(t) + b_c)
Cell state: c(t) = f(t) * c(t-1) + i(t) * c_new(t)
Hidden state: h(t) = ϕ(c(t))
Output: out = h(t)
Note that the incoming, forget, cell, and out vectors must have the same dimension as the hidden state
The notation is slightly different from
http://r2rt.com/written-memories-understanding-deriving-and-extending-the-lstm.html: here we introduce the cell
gate and swap its role with the hidden state, so that the output is the same as the hidden state (and we can use
this as a drop-in replacement for LSTMLayer).
"""
def __init__(self, incoming, num_units, hidden_nonlinearity=tf.tanh,
gate_nonlinearity=tf.nn.sigmoid, W_x_init=XavierUniformInitializer(), W_h_init=OrthogonalInitializer(),
forget_bias=1.0, b_init=tf.zeros_initializer(), hidden_init=tf.zeros_initializer(),
hidden_init_trainable=False, cell_init=tf.zeros_initializer(), cell_init_trainable=False,
gate_squash_inputs=False, layer_normalization=False, **kwargs):
if hidden_nonlinearity is None:
hidden_nonlinearity = tf.identity
if gate_nonlinearity is None:
gate_nonlinearity = tf.identity
super(PseudoLSTMLayer, self).__init__(incoming, **kwargs)
self.layer_normalization = layer_normalization
input_shape = self.input_shape[2:]
input_dim = np.prod(input_shape)
# Weights for the initial hidden state (this is actually not used, since the initial hidden state is
# determined by the initial cell state via h0 = self.nonlinearity(c0)). It is here merely for
# interface convenience
self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
regularizable=False)
# Weights for the initial cell state
self.c0 = self.add_param(cell_init, (num_units,), name="c0", trainable=cell_init_trainable,
regularizable=False)
# Weights for the incoming gate
self.W_xi = self.add_param(W_x_init, (input_dim, num_units), name="W_xi")
self.W_hi = self.add_param(W_h_init, (num_units, num_units), name="W_hi")
self.b_i = self.add_param(b_init, (num_units,), name="b_i", regularizable=False)
# Weights for the forget gate
self.W_xf = self.add_param(W_x_init, (input_dim, num_units), name="W_xf")
self.W_hf = self.add_param(W_h_init, (num_units, num_units), name="W_hf")
self.b_f = self.add_param(b_init, (num_units,), name="b_f", regularizable=False)
# Weights for the out gate
self.W_xo = self.add_param(W_x_init, (input_dim, num_units), name="W_xo")
self.W_ho = self.add_param(W_h_init, (num_units, num_units), name="W_ho")
self.b_o = self.add_param(b_init, (num_units,), name="b_o", regularizable=False)
# Weights for the cell gate
self.W_xc = self.add_param(W_x_init, (input_dim, num_units), name="W_xc")
self.W_hc = self.add_param(W_h_init, (num_units, num_units), name="W_hc")
self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
self.gate_nonlinearity = gate_nonlinearity
self.num_units = num_units
self.nonlinearity = hidden_nonlinearity
self.forget_bias = forget_bias
self.gate_squash_inputs = gate_squash_inputs
self.W_x_ifo = tf.concat(axis=1, values=[self.W_xi, self.W_xf, self.W_xo])
self.W_h_ifo = tf.concat(axis=1, values=[self.W_hi, self.W_hf, self.W_ho])
self.W_x_if = tf.concat(axis=1, values=[self.W_xi, self.W_xf])
self.W_h_if = tf.concat(axis=1, values=[self.W_hi, self.W_hf])
self.norm_params = dict()
def step(self, hcprev, x):
hprev = hcprev[:, :self.num_units]
cprev = hcprev[:, self.num_units:]
if self.layer_normalization:
ln = apply_ln(self)
else:
ln = lambda x, *args: x
if self.gate_squash_inputs:
"""
Out gate: o(t) = σ(W_ho @ h(t-1)) + W_xo @ x(t) + b_o)
Incoming gate: i(t) = σ(W_hi @ (o(t) * h(t-1)) + W_xi @ x(t) + b_i)
Forget gate: f(t) = σ(W_hf @ (o(t) * h(t-1)) + W_xf @ x(t) + b_f)
New cell gate: c_new(t) = ϕ(W_hc @ (o(t) * h(t-1)) + W_xc @ x(t) + b_c)
Cell state: c(t) = f(t) * c(t-1) + i(t) * c_new(t)
Hidden state: h(t) = ϕ(c(t))
Output: out = h(t)
"""
o = self.nonlinearity(
ln(tf.matmul(hprev, self.W_ho), "h_o") +
ln(tf.matmul(x, self.W_xo), "x_o") + self.b_o
)
x_if = ln(tf.matmul(x, self.W_x_if), "x_if")
h_if = ln(tf.matmul(o * hprev, self.W_h_if), "h_if")
x_i, x_f = tf.split(axis=1, num_or_size_splits=2, value=x_if)
h_i, h_f = tf.split(axis=1, num_or_size_splits=2, value=h_if)
i = self.gate_nonlinearity(x_i + h_i + self.b_i)
f = self.gate_nonlinearity(x_f + h_f + self.b_f + self.forget_bias)
c_new = self.nonlinearity(
ln(tf.matmul(o * hprev, self.W_hc), "h_c") +
ln(tf.matmul(x, self.W_xc), "x_c") +
self.b_c
)
c = f * cprev + i * c_new
h = self.nonlinearity(ln(c, "c"))
return tf.concat(axis=1, values=[h, c])
else:
"""
Incoming gate: i(t) = σ(W_hi @ h(t-1)) + W_xi @ x(t) + b_i)
Forget gate: f(t) = σ(W_hf @ h(t-1)) + W_xf @ x(t) + b_f)
Out gate: o(t) = σ(W_ho @ h(t-1)) + W_xo @ x(t) + b_o)
New cell gate: c_new(t) = ϕ(W_hc @ (o(t) * h(t-1)) + W_xc @ x(t) + b_c)
Cell gate: c(t) = f(t) * c(t-1) + i(t) * c_new(t)
Hidden state: h(t) = ϕ(c(t))
Output: out = h(t)
"""
x_ifo = ln(tf.matmul(x, self.W_x_ifo), "x_ifo")
h_ifo = ln(tf.matmul(hprev, self.W_h_ifo), "h_ifo")
x_i, x_f, x_o = tf.split(axis=1, num_or_size_splits=3, value=x_ifo)
h_i, h_f, h_o = tf.split(axis=1, num_or_size_splits=3, value=h_ifo)
i = self.gate_nonlinearity(x_i + h_i + self.b_i)
f = self.gate_nonlinearity(x_f + h_f + self.b_f + self.forget_bias)
o = self.gate_nonlinearity(x_o + h_o + self.b_o)
c_new = self.nonlinearity(
ln(tf.matmul(o * hprev, self.W_hc), "h_c") +
ln(tf.matmul(x, self.W_xc), "x_c") +
self.b_c
)
c = f * cprev + i * c_new
h = self.nonlinearity(ln(c, "c"))
return tf.concat(axis=1, values=[h, c])
def get_step_layer(self, l_in, l_prev_state, name=None):
return LSTMStepLayer(incomings=[l_in, l_prev_state], recurrent_layer=self, name=name)
def get_output_shape_for(self, input_shape):
n_batch, n_steps = input_shape[:2]
return n_batch, n_steps, self.num_units
def get_output_for(self, input, **kwargs):
input_shape = tf.shape(input)
n_batches = input_shape[0]
n_steps = input_shape[1]
input = tf.reshape(input, tf.stack([n_batches, n_steps, -1]))
c0s = tf.tile(
tf.reshape(self.c0, (1, self.num_units)),
(n_batches, 1)
)
h0s = self.nonlinearity(c0s)
# flatten extra dimensions
shuffled_input = tf.transpose(input, (1, 0, 2))
hcs = tf.scan(
self.step,
elems=shuffled_input,
initializer=tf.concat(axis=1, values=[h0s, c0s])
)
shuffled_hcs = tf.transpose(hcs, (1, 0, 2))
shuffled_hs = shuffled_hcs[:, :, :self.num_units]
shuffled_cs = shuffled_hcs[:, :, self.num_units:]
return shuffled_hs
class LSTMLayer(Layer):
"""
A LSTM unit implements the following update mechanism:
Incoming gate: i(t) = f_i(x(t) @ W_xi + h(t-1) @ W_hi + w_ci * c(t-1) + b_i)
Forget gate: f(t) = f_f(x(t) @ W_xf + h(t-1) @ W_hf + w_cf * c(t-1) + b_f)
Cell gate: c(t) = f(t) * c(t - 1) + i(t) * f_c(x(t) @ W_xc + h(t-1) @ W_hc + b_c)
Out gate: o(t) = f_o(x(t) @ W_xo + h(t-1) W_ho + w_co * c(t) + b_o)
New hidden state: h(t) = o(t) * f_h(c(t))
Note that the incoming, forget, cell, and out vectors must have the same dimension as the hidden state
"""
def __init__(self, incoming, num_units, hidden_nonlinearity=tf.tanh,
gate_nonlinearity=tf.nn.sigmoid, W_x_init=XavierUniformInitializer(), W_h_init=OrthogonalInitializer(),
forget_bias=1.0, use_peepholes=False, w_init=tf.random_normal_initializer(stddev=0.1),
b_init=tf.zeros_initializer(), hidden_init=tf.zeros_initializer(), hidden_init_trainable=False,
cell_init=tf.zeros_initializer(), cell_init_trainable=False, layer_normalization=False,
**kwargs):
if hidden_nonlinearity is None:
hidden_nonlinearity = tf.identity
if gate_nonlinearity is None:
gate_nonlinearity = tf.identity
super(LSTMLayer, self).__init__(incoming, **kwargs)
self.layer_normalization = layer_normalization
input_shape = self.input_shape[2:]
input_dim = np.prod(input_shape)
# Weights for the initial hidden state
self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
regularizable=False)
# Weights for the initial cell state
self.c0 = self.add_param(cell_init, (num_units,), name="c0", trainable=cell_init_trainable,
regularizable=False)
# Weights for the incoming gate
self.W_xi = self.add_param(W_x_init, (input_dim, num_units), name="W_xi")
self.W_hi = self.add_param(W_h_init, (num_units, num_units), name="W_hi")
if use_peepholes:
self.w_ci = self.add_param(w_init, (num_units,), name="w_ci")
self.b_i = self.add_param(b_init, (num_units,), name="b_i", regularizable=False)
# Weights for the forget gate
self.W_xf = self.add_param(W_x_init, (input_dim, num_units), name="W_xf")
self.W_hf = self.add_param(W_h_init, (num_units, num_units), name="W_hf")
if use_peepholes:
self.w_cf = self.add_param(w_init, (num_units,), name="w_cf")
self.b_f = self.add_param(b_init, (num_units,), name="b_f", regularizable=False)
# Weights for the cell gate
self.W_xc = self.add_param(W_x_init, (input_dim, num_units), name="W_xc")
self.W_hc = self.add_param(W_h_init, (num_units, num_units), name="W_hc")
self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
# Weights for the reset gate
self.W_xr = self.add_param(W_x_init, (input_dim, num_units), name="W_xr")
self.W_hr = self.add_param(W_h_init, (num_units, num_units), name="W_hr")
self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
# Weights for the out gate
self.W_xo = self.add_param(W_x_init, (input_dim, num_units), name="W_xo")
self.W_ho = self.add_param(W_h_init, (num_units, num_units), name="W_ho")
if use_peepholes:
self.w_co = self.add_param(w_init, (num_units,), name="w_co")
self.b_o = self.add_param(b_init, (num_units,), name="b_o", regularizable=False)
self.gate_nonlinearity = gate_nonlinearity
self.num_units = num_units
self.nonlinearity = hidden_nonlinearity
self.forget_bias = forget_bias
self.use_peepholes = use_peepholes
self.W_x_ifco = tf.concat(axis=1, values=[self.W_xi, self.W_xf, self.W_xc, self.W_xo])
self.W_h_ifco = tf.concat(axis=1, values=[self.W_hi, self.W_hf, self.W_hc, self.W_ho])
if use_peepholes:
self.w_c_ifo = tf.concat(axis=0, values=[self.w_ci, self.w_cf, self.w_co])
self.norm_params = dict()
def step(self, hcprev, x):
"""
Incoming gate: i(t) = f_i(x(t) @ W_xi + h(t-1) @ W_hi + w_ci * c(t-1) + b_i)
Forget gate: f(t) = f_f(x(t) @ W_xf + h(t-1) @ W_hf + w_cf * c(t-1) + b_f)
Cell gate: c(t) = f(t) * c(t - 1) + i(t) * f_c(x(t) @ W_xc + h(t-1) @ W_hc + b_c)
Out gate: o(t) = f_o(x(t) @ W_xo + h(t-1) W_ho + w_co * c(t) + b_o)
New hidden state: h(t) = o(t) * f_h(c(t))
"""
hprev = hcprev[:, :self.num_units]
cprev = hcprev[:, self.num_units:]
if self.layer_normalization:
ln = apply_ln(self)
else:
ln = lambda x, *args: x
x_ifco = ln(tf.matmul(x, self.W_x_ifco), "x_ifco")
h_ifco = ln(tf.matmul(hprev, self.W_h_ifco), "h_ifco")
x_i, x_f, x_c, x_o = tf.split(axis=1, num_or_size_splits=4, value=x_ifco)
h_i, h_f, h_c, h_o = tf.split(axis=1, num_or_size_splits=4, value=h_ifco)
if self.use_peepholes:
i = self.gate_nonlinearity(x_i + h_i + self.w_ci * cprev + self.b_i)
f = self.gate_nonlinearity(x_f + h_f + self.w_cf * cprev + self.b_f + self.forget_bias)
o = self.gate_nonlinearity(x_o + h_o + self.w_co * cprev + self.b_o)
else:
i = self.gate_nonlinearity(x_i + h_i + self.b_i)
f = self.gate_nonlinearity(x_f + h_f + self.b_f + self.forget_bias)
o = self.gate_nonlinearity(x_o + h_o + self.b_o)
c = f * cprev + i * self.nonlinearity(x_c + h_c + self.b_c)
h = o * self.nonlinearity(ln(c, "c"))
return tf.concat(axis=1, values=[h, c])
def get_step_layer(self, l_in, l_prev_state, name=None):
return LSTMStepLayer(incomings=[l_in, l_prev_state], recurrent_layer=self, name=name)
def get_output_shape_for(self, input_shape):
n_batch, n_steps = input_shape[:2]
return n_batch, n_steps, self.num_units
def get_output_for(self, input, **kwargs):
input_shape = tf.shape(input)
n_batches = input_shape[0]
n_steps = input_shape[1]
input = tf.reshape(input, tf.stack([n_batches, n_steps, -1]))
h0s = tf.tile(
tf.reshape(self.h0, (1, self.num_units)),
(n_batches, 1)
)
c0s = tf.tile(
tf.reshape(self.c0, (1, self.num_units)),
(n_batches, 1)
)
# flatten extra dimensions
shuffled_input = tf.transpose(input, (1, 0, 2))
hcs = tf.scan(
self.step,
elems=shuffled_input,
initializer=tf.concat(axis=1, values=[h0s, c0s])
)
shuffled_hcs = tf.transpose(hcs, (1, 0, 2))
shuffled_hs = shuffled_hcs[:, :, :self.num_units]
shuffled_cs = shuffled_hcs[:, :, self.num_units:]
if 'recurrent_state_output' in kwargs:
kwargs['recurrent_state_output'][self] = shuffled_hcs
return shuffled_hs
class LSTMStepLayer(MergeLayer):
def __init__(self, incomings, recurrent_layer, **kwargs):
super(LSTMStepLayer, self).__init__(incomings, **kwargs)
self._recurrent_layer = recurrent_layer
def get_params(self, **tags):
return self._recurrent_layer.get_params(**tags)
def get_output_shape_for(self, input_shapes):
n_batch = input_shapes[0][0]
return n_batch, 2 * self._recurrent_layer.num_units
def get_output_for(self, inputs, **kwargs):
x, hcprev = inputs
n_batch = tf.shape(x)[0]
x = tf.reshape(x, tf.stack([n_batch, -1]))
hc = self._recurrent_layer.step(hcprev, x)
return hc
class TfBasicLSTMLayer(Layer):
"""
Use TensorFlow's built-in (basic) LSTM implementation
"""
def __init__(self, incoming, num_units, hidden_nonlinearity, horizon=None, hidden_init_trainable=False,
forget_bias=1.0, use_peepholes=False, **kwargs):
assert not use_peepholes, "Basic LSTM does not support peepholes!"
assert len(incoming.output_shape) == 3
input_dim = incoming.shape[2]
lstm = tf.contrib.rnn.BasicLSTMCell(
num_units=num_units,
activation=hidden_nonlinearity,
state_is_tuple=True,
forget_bias=forget_bias
)
self.num_units = num_units
self.horizon = horizon
self.lstm = lstm
self.hidden_nonlinearity = hidden_nonlinearity
Layer.__init__(self, incoming=incoming, **kwargs)
# dummy input variable
input_dummy = tf.placeholder(tf.float32, (None, input_dim), "input_dummy")
hidden_dummy = tf.placeholder(tf.float32, (None, num_units), "hidden_dummy")
cell_dummy = tf.placeholder(tf.float32, (None, num_units), "cell_dummy")
with tf.variable_scope(self.name) as vs:
lstm(input_dummy, (cell_dummy, hidden_dummy), scope=vs)
vs.reuse_variables()
self.scope = vs
all_vars = [v for v in tf.global_variables() if v.name.startswith(vs.name)]
trainable_vars = [v for v in tf.trainable_variables() if v.name.startswith(vs.name)]
for var in trainable_vars:
self.add_param(spec=var, shape=None, name=None, trainable=True)
for var in set(all_vars) - set(trainable_vars):
self.add_param(spec=var, shape=None, name=None, trainable=False)
self.h0 = self.add_param(tf.zeros_initializer(), (num_units,), name="h0", trainable=hidden_init_trainable,
regularizable=False)
self.c0 = self.add_param(tf.zeros_initializer(), (num_units,), name="c0", trainable=hidden_init_trainable,
regularizable=False)
def step(self, hcprev, x):
hprev = hcprev[:, :self.num_units]
cprev = hcprev[:, self.num_units:]
x.set_shape((None, self.input_shape[-1]))
c, h = self.lstm(x, (cprev, hprev), scope=self.scope)[1]
return tf.concat(axis=1, values=[h, c])
def get_output_for(self, input, **kwargs):
input_shape = tf.shape(input)
n_batches = input_shape[0]
h0s = tf.tile(
tf.reshape(self.h0, (1, self.num_units)),
(n_batches, 1)
)
h0s.set_shape((None, self.num_units))
c0s = tf.tile(
tf.reshape(self.c0, (1, self.num_units)),
(n_batches, 1)
)
c0s.set_shape((None, self.num_units))
state = (c0s, h0s)
if self.horizon is not None:
outputs = []
for idx in range(self.horizon):
output, state = self.lstm(input[:, idx, :], state, scope=self.scope) # self.name)
outputs.append(tf.expand_dims(output, 1))
outputs = tf.concat(axis=1, values=outputs)
return outputs
else:
n_steps = input_shape[1]
input = tf.reshape(input, tf.stack([n_batches, n_steps, -1]))
# flatten extra dimensions
shuffled_input = tf.transpose(input, (1, 0, 2))
shuffled_input.set_shape((None, None, self.input_shape[-1]))
hcs = tf.scan(
self.step,
elems=shuffled_input,
initializer=tf.concat(axis=1, values=[h0s, c0s]),
)
shuffled_hcs = tf.transpose(hcs, (1, 0, 2))
shuffled_hs = shuffled_hcs[:, :, :self.num_units]
shuffled_cs = shuffled_hcs[:, :, self.num_units:]
return shuffled_hs
def get_output_shape_for(self, input_shape):
n_batch, n_steps = input_shape[:2]
return n_batch, n_steps, self.num_units
def get_step_layer(self, l_in, l_prev_state, name=None):
return LSTMStepLayer(incomings=[l_in, l_prev_state], recurrent_layer=self, name=name)
def get_all_layers(layer, treat_as_input=None):
"""
:type layer: Layer | list[Layer]
:rtype: list[Layer]
"""
# We perform a depth-first search. We add a layer to the result list only
# after adding all its incoming layers (if any) or when detecting a cycle.
# We use a LIFO stack to avoid ever running into recursion depth limits.
try:
queue = deque(layer)
except TypeError:
queue = deque([layer])
seen = set()
done = set()
result = []
# If treat_as_input is given, we pretend we've already collected all their
# incoming layers.
if treat_as_input is not None:
seen.update(treat_as_input)
while queue:
# Peek at the leftmost node in the queue.
layer = queue[0]
if layer is None:
# Some node had an input_layer set to `None`. Just ignore it.
queue.popleft()
elif layer not in seen:
# We haven't seen this node yet: Mark it and queue all incomings
# to be processed first. If there are no incomings, the node will
# be appended to the result list in the next iteration.
seen.add(layer)
if hasattr(layer, 'input_layers'):
queue.extendleft(reversed(layer.input_layers))
elif hasattr(layer, 'input_layer'):
queue.appendleft(layer.input_layer)
else:
# We've been here before: Either we've finished all its incomings,
# or we've detected a cycle. In both cases, we remove the layer
# from the queue and append it to the result list.
queue.popleft()
if layer not in done:
result.append(layer)
done.add(layer)
return result
class NonlinearityLayer(Layer):
def __init__(self, incoming, nonlinearity=tf.nn.relu, **kwargs):
super(NonlinearityLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (tf.identity if nonlinearity is None
else nonlinearity)
def get_output_for(self, input, **kwargs):
return self.nonlinearity(input)
def get_output_shape_for(self, input_shape):
return input_shape
class BatchNormLayer(Layer):
def __init__(self, incoming, center=True, scale=False, epsilon=0.001, decay=0.9,
beta=tf.zeros_initializer(), gamma=tf.ones_initializer(), moving_mean=tf.zeros_initializer(),
moving_variance=tf.ones_initializer(), **kwargs):
super(BatchNormLayer, self).__init__(incoming, **kwargs)
self.center = center
self.scale = scale
self.epsilon = epsilon
self.decay = decay
input_shape = incoming.output_shape
axis = list(range(len(input_shape) - 1))
params_shape = input_shape[-1:]
if center:
self.beta = self.add_param(beta, shape=params_shape, name='beta', trainable=True, regularizable=False)
else:
self.beta = None
if scale:
self.gamma = self.add_param(gamma, shape=params_shape, name='gamma', trainable=True, regularizable=True)
else:
self.gamma = None
self.moving_mean = self.add_param(moving_mean, shape=params_shape, name='moving_mean', trainable=False,
regularizable=False)
self.moving_variance = self.add_param(moving_variance, shape=params_shape, name='moving_variance',
trainable=False, regularizable=False)
self.axis = axis
def get_output_for(self, input, phase='train', **kwargs):
if phase == 'train':
# Calculate the moments based on the individual batch.
mean, variance = tf.nn.moments(input, self.axis, shift=self.moving_mean)
# Update the moving_mean and moving_variance moments.
update_moving_mean = moving_averages.assign_moving_average(
self.moving_mean, mean, self.decay)
update_moving_variance = moving_averages.assign_moving_average(
self.moving_variance, variance, self.decay)
# Make sure the updates are computed here.
with tf.control_dependencies([update_moving_mean,
update_moving_variance]):
output = tf.nn.batch_normalization(
input, mean, variance, self.beta, self.gamma, self.epsilon)
else:
output = tf.nn.batch_normalization(
input, self.moving_mean, self.moving_variance, self.beta, self.gamma, self.epsilon)
output.set_shape(self.input_shape)
return output
def get_output_shape_for(self, input_shape):
return input_shape
def batch_norm(layer, **kwargs):
nonlinearity = getattr(layer, 'nonlinearity', None)
scale = True
if nonlinearity is not None:
layer.nonlinearity = tf.identity
if nonlinearity is tf.nn.relu:
scale = False
if hasattr(layer, 'b') and layer.b is not None:
del layer.params[layer.b]
layer.b = None
bn_name = (kwargs.pop('name', None) or
(getattr(layer, 'name', None) and layer.name + '_bn'))
layer = BatchNormLayer(layer, name=bn_name, scale=scale, **kwargs)
if nonlinearity is not None:
nonlin_name = bn_name and bn_name + '_nonlin'
layer = NonlinearityLayer(layer, nonlinearity=nonlinearity, name=nonlin_name)
return layer
class ElemwiseSumLayer(MergeLayer):
def __init__(self, incomings, **kwargs):
super(ElemwiseSumLayer, self).__init__(incomings, **kwargs)
def get_output_for(self, inputs, **kwargs):
return functools.reduce(tf.add, inputs)
def get_output_shape_for(self, input_shapes):
assert len(set(input_shapes)) == 1
return input_shapes[0]
def get_output(layer_or_layers, inputs=None, **kwargs):
# track accepted kwargs used by get_output_for
accepted_kwargs = {'deterministic'}
# obtain topological ordering of all layers the output layer(s) depend on
treat_as_input = list(inputs.keys()) if isinstance(inputs, dict) else []
all_layers = get_all_layers(layer_or_layers, treat_as_input)
# initialize layer-to-expression mapping from all input layers
all_outputs = dict((layer, layer.input_var)
for layer in all_layers
if isinstance(layer, InputLayer) and
layer not in treat_as_input)
# update layer-to-expression mapping from given input(s), if any
if isinstance(inputs, dict):
all_outputs.update((layer, tf.convert_to_tensor(expr))
for layer, expr in list(inputs.items()))
elif inputs is not None:
if len(all_outputs) > 1:
raise ValueError("get_output() was called with a single input "
"expression on a network with multiple input "
"layers. Please call it with a dictionary of "
"input expressions instead.")
for input_layer in all_outputs:
all_outputs[input_layer] = tf.convert_to_tensor(inputs)
# update layer-to-expression mapping by propagating the inputs
for layer in all_layers:
if layer not in all_outputs:
try:
if isinstance(layer, MergeLayer):
layer_inputs = [all_outputs[input_layer]
for input_layer in layer.input_layers]
else:
layer_inputs = all_outputs[layer.input_layer]
except KeyError:
# one of the input_layer attributes must have been `None`
raise ValueError("get_output() was called without giving an "
"input expression for the free-floating "
"layer %r. Please call it with a dictionary "
"mapping this layer to an input expression."
% layer)
all_outputs[layer] = layer.get_output_for(layer_inputs, **kwargs)
try:
names, _, _, defaults = getargspec(layer.get_output_for)
except TypeError:
# If introspection is not possible, skip it
pass
else:
if defaults is not None:
accepted_kwargs |= set(names[-len(defaults):])
accepted_kwargs |= set(layer.get_output_kwargs)
unused_kwargs = set(kwargs.keys()) - accepted_kwargs
if unused_kwargs:
suggestions = []
for kwarg in unused_kwargs:
suggestion = get_close_matches(kwarg, accepted_kwargs)
if suggestion:
suggestions.append('%s (perhaps you meant %s)'
% (kwarg, suggestion[0]))
else:
suggestions.append(kwarg)
warn("get_output() was called with unused kwargs:\n\t%s"
% "\n\t".join(suggestions))
# return the output(s) of the requested layer(s) only
try:
return [all_outputs[layer] for layer in layer_or_layers]
except TypeError:
return all_outputs[layer_or_layers]
def unique(l):
"""Filters duplicates of iterable.
Create a new list from l with duplicate entries removed,
while preserving the original order.
Parameters
----------
l : iterable
Input iterable to filter of duplicates.
Returns
-------
list
A list of elements of `l` without duplicates and in the same order.
"""
new_list = []
seen = set()
for el in l:
if el not in seen:
new_list.append(el)
seen.add(el)
return new_list
def get_all_params(layer, **tags):
"""
:type layer: Layer|list[Layer]
"""
layers = get_all_layers(layer)
params = chain.from_iterable(l.get_params(**tags) for l in layers)
return unique(params)
| 41.536116 | 120 | 0.584831 |
4a240ce96e1fc23c8038fc83a2d841611f8a2d39 | 971 | py | Python | bot.py | Shetty073/world-of-trucks-bot | dcbd563fd7f7ba750eba015741deefbbf56368c9 | [
"MIT"
] | null | null | null | bot.py | Shetty073/world-of-trucks-bot | dcbd563fd7f7ba750eba015741deefbbf56368c9 | [
"MIT"
] | null | null | null | bot.py | Shetty073/world-of-trucks-bot | dcbd563fd7f7ba750eba015741deefbbf56368c9 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import configparser
# Get the token from configuration file
config = configparser.ConfigParser()
config.read("config/config.ini")
TOKEN = config["BOT"]["token"]
# Bot code begins
bot = commands.Bot(command_prefix='?', description="just `.ask` for help", case_insensitive=True)
# Message on terminal when the bot starts
@bot.event
async def on_ready():
print(bot.user.name)
print("Bot running..")
await bot.change_presence(activity=discord.Game("just ?ask for help"))
# Cogs list
extensions = [
"cogs.info",
"cogs.plates",
"cogs.ask"
]
# Load all cogs
if __name__ == "__main__":
for extension in extensions:
try:
bot.load_extension(extension)
print(f"{extension} loaded..")
except Exception as error:
print(f"ERROR: {extension} could not be loaded. {error}")
bot.remove_command("help") # Remove the default help command
bot.run(TOKEN)
| 24.275 | 97 | 0.682801 |
4a240cf49c830f997ca8c53661bedf77679444dc | 2,301 | py | Python | uob/apps/UDPEchoWithMeasurements/util/MySQLListener.py | tinyos-io/tinyos-3.x-contrib | 3aaf036722a2afc0c0aad588459a5c3e00bd3c01 | [
"BSD-3-Clause",
"MIT"
] | 1 | 2020-02-28T20:35:09.000Z | 2020-02-28T20:35:09.000Z | uob/apps/UDPEchoWithMeasurements/util/MySQLListener.py | tinyos-io/tinyos-3.x-contrib | 3aaf036722a2afc0c0aad588459a5c3e00bd3c01 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | uob/apps/UDPEchoWithMeasurements/util/MySQLListener.py | tinyos-io/tinyos-3.x-contrib | 3aaf036722a2afc0c0aad588459a5c3e00bd3c01 | [
"BSD-3-Clause",
"MIT"
] | null | null | null |
import socket
import UdpReport
import re
import sys
import MySQLdb
port = 7000
if __name__ == '__main__':
conn = MySQLdb.connect (host = "localhost",
user = "b6lowpan",
db = "b6lowpan")
cursor = conn.cursor()
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.bind(('', port))
if len(sys.argv) < 2:
print "\tListener.py <tablename>"
sys.exit(1)
try:
drop = "DROP TABLE " + str(sys.argv[1])
cursor.execute(drop)
except:
print "Drop failed... continuing"
methods = []
create_table = "CREATE TABLE " + str(sys.argv[1]) + " ("
create_table += "ts TIMESTAMP, origin INT(4), "
insert = "INSERT INTO " + sys.argv[1] + " (origin, "
re = re.compile('^get_(.*)')
for method in dir(UdpReport.UdpReport):
result = re.search(method)
if result != None:
create_table += str(result.group(1)) + " INT(4), "
insert += str(result.group(1)) + ", "
methods.append(str(result.group(1)))
create_table = create_table[0:len(create_table) - 2]
insert = insert[0:len(insert) - 2]
create_table += ")"
insert += ") VALUES ("
print insert
print create_table
cursor.execute(create_table)
while True:
data, addr = s.recvfrom(1024)
if (len(data) > 0):
print
print str(len(data)) + ":",
for i in data:
print "0x%x" % ord(i),
print
rpt = UdpReport.UdpReport(data=data, data_length=len(data))
addr = addr[0]
AA = addr.split(":")
print addr
print rpt
thisInsert = insert
thisInsert += "0x" + AA[-1] + ", "
for m in methods:
try:
getter = getattr(rpt, 'get_' + m, None)
val = getter()
except:
val = 0
if (isinstance(val, list)):
val = val[0]
thisInsert += str(val) + ", "
thisInsert = thisInsert[0:len(thisInsert) - 2]
thisInsert += ")"
print thisInsert
cursor.execute(thisInsert)
conn.close()
| 25.285714 | 71 | 0.488918 |
4a240d759703db6192c7c9894d017e4bf9788e8e | 303 | py | Python | examples/wrap_external.py | gmerz/ArgTyper | 56e1d60ce2cc8f7d889fb8890ddbe922b85ab9f3 | [
"MIT"
] | 1 | 2021-04-26T19:46:33.000Z | 2021-04-26T19:46:33.000Z | examples/wrap_external.py | gmerz/ArgTyper | 56e1d60ce2cc8f7d889fb8890ddbe922b85ab9f3 | [
"MIT"
] | null | null | null | examples/wrap_external.py | gmerz/ArgTyper | 56e1d60ce2cc8f7d889fb8890ddbe922b85ab9f3 | [
"MIT"
] | null | null | null | import re
import argtyper
argtyper.Argument("pattern", help="The pattern to search for")(re.search)
argtyper.Argument(
"string", help="The string, in which you want to search for the pattern"
)(re.search)
at = argtyper.ArgTyper(re.search)
responses = at(return_responses=True)
print(responses[0])
| 25.25 | 76 | 0.752475 |
4a240d913486af5d4ff79bd357077939ed10b69f | 849 | py | Python | crop.py | dylan-roussin/CRAFT-pytorch | 6aa2eb9339f9245ff9614eb63a26bf9ed017568c | [
"MIT"
] | null | null | null | crop.py | dylan-roussin/CRAFT-pytorch | 6aa2eb9339f9245ff9614eb63a26bf9ed017568c | [
"MIT"
] | null | null | null | crop.py | dylan-roussin/CRAFT-pytorch | 6aa2eb9339f9245ff9614eb63a26bf9ed017568c | [
"MIT"
] | null | null | null | ### for each set of coordinates generated in the res_[filename].txt, crop and output to crop folder - dylan-roussin
# example line: '1706,20,1784,20,1784,38,1706,38'
# read as: bottom-left, bottom-right, top-right, top-left
# bottomleft(1706,20)
# bottomright (1784,20)
# topright (1784,38)
# topleft (1706,38)
import numpy as np
coor_file = "result/res_csgo.txt"
# open craft result txt
with open(coor_file) as f:
lines = f.read().split("\n") # new lines char are their own entry in resulting array
bounding_coor = np.array([line for line in lines if line.strip() != ""]) # remove new line char from array
for i in bounding_coor:
print("test")
# crop with numpy slicing to create a pointer to crop points rather than creating new images https://stackoverflow.com/questions/15589517/how-to-crop-an-image-in-opencv-using-python | 38.590909 | 181 | 0.725559 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.