text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#adapted from ColorWall by jesstesstest
_font = (
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
),
(
("........"),
("........"),
("........"),
("........"),
("........"),
("........"),
("........"),
("........")
),
(
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("........"),
("....#..."),
("........")
),
(
("...#.#.."),
("...#.#.."),
("........"),
("........"),
("........"),
("........"),
("........"),
("........")
),
(
("...#.#.."),
("...#.#.."),
(".#######"),
("...#.#.."),
(".#######"),
("...#.#.."),
("...#.#.."),
("........")
),
(
("....#..."),
("...####."),
("..#.#..."),
("...###.."),
("....#.#."),
("..####.."),
("....#..."),
("........")
),
(
("........"),
("..##..#."),
("..##.#.."),
("....#..."),
("...#.##."),
("..#..##."),
("........"),
("........")
),
(
("...##..."),
("..#.#..."),
("...#...."),
("..#.#..."),
(".#...##."),
(".#...#.."),
("..###.#."),
("........")
),
(
("....#..."),
("....#..."),
("........"),
("........"),
("........"),
("........"),
("........"),
("........")
),
(
(".....#.."),
("....#..."),
("...#...."),
("...#...."),
("...#...."),
("....#..."),
(".....#.."),
("........")
),
(
("...#...."),
("....#..."),
(".....#.."),
(".....#.."),
(".....#.."),
("....#..."),
("...#...."),
("........")
),
(
("....#..."),
(".#..#..#"),
("..#.#.#."),
("...###.."),
("..#.#.#."),
(".#..#..#"),
("....#..."),
("........")
),
(
("....#..."),
("....#..."),
("....#..."),
(".#######"),
("....#..."),
("....#..."),
("....#..."),
("........")
),
(
("........"),
("........"),
("........"),
("........"),
("....##.."),
("....##.."),
(".....#.."),
("....#...")
),
(
("........"),
("........"),
("........"),
(".#######"),
("........"),
("........"),
("........"),
("........")
),
(
("........"),
("........"),
("........"),
("........"),
("........"),
("....##.."),
("....##.."),
("........")
),
(
(".......#"),
("......#."),
(".....#.."),
("....#..."),
("...#...."),
("..#....."),
(".#......"),
("........")
),
(
("...###.."),
("..#...#."),
("..#...#."),
("..#.#.#."),
("..#...#."),
("..#...#."),
("...###.."),
("........")
),
(
(" "),
(" ..##..."),
(" ...#..."),
(" ...#..."),
(" ...#..."),
(" ...#..."),
(" ..###.."),
(" ")
),
(
("...###.."),
("..#...#."),
("......#."),
(".....#.."),
("....#..."),
("...#...."),
("..#####."),
("........")
),
(
("...###.."),
("..#...#."),
("......#."),
("....##.."),
("......#."),
("..#...#."),
("...###.."),
("........")
),
(
("....##.."),
("...#.#.."),
("..#..#.."),
("..#####."),
(".....#.."),
(".....#.."),
("....###."),
("........")
),
(
("..#####."),
("..#....."),
("..#....."),
("..####.."),
("......#."),
("..#...#."),
("...###.."),
("........")
),
(
("...###.."),
("..#...#."),
("..#....."),
("..####.."),
("..#...#."),
("..#...#."),
("...###.."),
("........")
),
(
("..#####."),
("......#."),
(".....#.."),
("....#..."),
("...#...."),
("...#...."),
("...#...."),
("........")
),
(
("...###.."),
("..#...#."),
("..#...#."),
("...###.."),
("..#...#."),
("..#...#."),
("...###.."),
("........")
),
(
("...###.."),
("..#...#."),
("..#...#."),
("...####."),
("......#."),
("..#...#."),
("...###.."),
("........")
),
(
("........"),
("....##.."),
("....##.."),
("........"),
("....##.."),
("....##.."),
("........"),
("........")
),
(
("........"),
("....##.."),
("....##.."),
("........"),
("....##.."),
("....##.."),
(".....#.."),
("....#...")
),
(
(".....#.."),
("....#..."),
("...#...."),
("..#....."),
("...#...."),
("....#..."),
(".....#.."),
("........")
),
(
("........"),
("........"),
(".#######"),
("........"),
(".#######"),
("........"),
("........"),
("........")
),
(
("..#....."),
("...#...."),
("....#..."),
(".....#.."),
("....#..."),
("...#...."),
("..#....."),
("........")
),
(
("...###.."),
("..#...#."),
("......#."),
(".....#.."),
("....#..."),
("........"),
("....#..."),
("........")
),
(
("...###.."),
("..#...#."),
("..#.###."),
("..#.#.#."),
("..#.###."),
("..#....."),
("...###.."),
("........")
),
(
("...###.."),
("..#...#."),
("..#...#."),
("..#####."),
("..#...#."),
("..#...#."),
("..#...#."),
("........")
),
(
("..####.."),
("..#...#."),
("..#...#."),
("..####.."),
("..#...#."),
("..#...#."),
("..####.."),
("........")
),
(
("...###.."),
("..#...#."),
("..#....."),
("..#....."),
("..#....."),
("..#...#."),
("...###.."),
("........")
),
(
("..####.."),
("..#...#."),
("..#...#."),
("..#...#."),
("..#...#."),
("..#...#."),
("..####.."),
("........")
),
(
("..#####."),
("..#....."),
("..#....."),
("..####.."),
("..#....."),
("..#....."),
("..#####."),
("........")
),
(
("..#####."),
("..#....."),
("..#....."),
("..#####."),
("..#....."),
("..#....."),
("..#....."),
("........")
),
(
("...###.."),
("..#...#."),
("..#....."),
("..#.###."),
("..#...#."),
("..#...#."),
("...###.."),
("........")
),
(
("..#...#."),
("..#...#."),
("..#...#."),
("..#####."),
("..#...#."),
("..#...#."),
("..#...#."),
("........")
),
(
("...###.."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("...###.."),
("........")
),
(
("....###."),
(".....#.."),
(".....#.."),
(".....#.."),
("..#..#.."),
("..#..#.."),
("...##..."),
("........")
),
(
("..#...#."),
("..#...#."),
("..#..#.."),
("..###..."),
("..#..#.."),
("..#...#."),
("..#...#."),
("........")
),
(
("...#...."),
("...#...."),
("...#...."),
("...#...."),
("...#...."),
("...#...."),
("...####."),
("........")
),
(
(".#.....#"),
(".##...##"),
(".#.#.#.#"),
(".#..#..#"),
(".#.....#"),
(".#.....#"),
(".#.....#"),
("........")
),
(
("..#...#."),
("..##..#."),
("..#.#.#."),
("..#.#.#."),
("..#..##."),
("..#...#."),
("..#...#."),
("........")
),
(
("...###.."),
("..#...#."),
("..#...#."),
("..#...#."),
("..#...#."),
("..#...#."),
("...###.."),
("........")
),
(
("...###.."),
("...#..#."),
("...#..#."),
("...###.."),
("...#...."),
("...#...."),
("...#...."),
("........")
),
(
("...###.."),
("..#...#."),
("..#...#."),
("..#...#."),
("..#...#."),
("..#...#."),
("...###.."),
(".....##.")
),
(
("..####.."),
("..#...#."),
("..#...#."),
("..####.."),
("..#.#..."),
("..#..#.."),
("..#...#."),
("........")
),
(
("...###.."),
("..#...#."),
("..#....."),
("...###.."),
("......#."),
("..#...#."),
("...###.."),
("........")
),
(
("..#####."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("........")
),
(
("..#...#."),
("..#...#."),
("..#...#."),
("..#...#."),
("..#...#."),
("..#...#."),
("...###.."),
("........")
),
(
("..#...#."),
("..#...#."),
("..#...#."),
("...#.#.."),
("...#.#.."),
("....#..."),
("....#..."),
("........")
),
(
(".#.....#"),
(".#.....#"),
(".#.....#"),
("..#.#.#."),
("..#.#.#."),
("...#.#.."),
("...#.#.."),
("........")
),
(
("..#...#."),
("..#...#."),
("...#.#.."),
("....#..."),
("...#.#.."),
("..#...#."),
("..#...#."),
("........")
),
(
("..#...#."),
("..#...#."),
("...#.#.."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("........")
),
(
("..#####."),
("......#."),
(".....#.."),
("....#..."),
("...#...."),
("..#....."),
("..#####."),
("........")
),
(
("...###.."),
("...#...."),
("...#...."),
("...#...."),
("...#...."),
("...#...."),
("...###.."),
("........")
),
(
(".#......"),
("..#....."),
("...#...."),
("....#..."),
(".....#.."),
("......#."),
(".......#"),
("........")
),
(
("...###.."),
(".....#.."),
(".....#.."),
(".....#.."),
(".....#.."),
(".....#.."),
("...###.."),
("........")
),
(
("....#..."),
("...#.#.."),
("..#...#."),
("........"),
("........"),
("........"),
("........"),
("........")
),
(
("........"),
("........"),
("........"),
("........"),
("........"),
("........"),
("........"),
(".#######")
),
(
("...#...."),
("....#..."),
("........"),
("........"),
("........"),
("........"),
("........"),
("........")
),
(
("........"),
("...###.."),
("......#."),
("...####."),
("..#...#."),
("..#...#."),
("...###.#"),
("........")
),
(
("...#...."),
("...#...."),
("...###.."),
("...#..#."),
("...#..#."),
("...#..#."),
("..#.##.."),
("........")
),
(
("........"),
("........"),
("...###.."),
("..#....."),
("..#....."),
("..#....."),
("...###.."),
("........")
),
(
("......#."),
("......#."),
("....###."),
("...#..#."),
("...#..#."),
("...#..#."),
("....##.#"),
("........")
),
(
("........"),
("........"),
("...###.."),
("..#...#."),
("..#####."),
("..#....."),
("...###.."),
("........")
),
(
("....##.."),
("...#..#."),
("...#...."),
("..###..."),
("...#...."),
("...#...."),
("...#...."),
("........")
),
(
("........"),
("........"),
("...###.#"),
("..#...#."),
("..#...#."),
("...####."),
("......#."),
("...###..")
),
(
("..#....."),
("..#....."),
("..#.##.."),
("..##..#."),
("..#...#."),
("..#...#."),
("..#...#."),
("........")
),
(
("........"),
("....#..."),
("........"),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("........")
),
(
("........"),
("....#..."),
("........"),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("..##....")
),
(
("..#....."),
("..#....."),
("..#..#.."),
("..#.#..."),
("..##...."),
("..#.#..."),
("..#..#.."),
("........")
),
(
("...##..."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("........")
),
(
("........"),
("........"),
(" .##.##."),
(".#..#..#"),
(".#..#..#"),
(".#.....#"),
(".#.....#"),
("........")
),
(
("........"),
("........"),
("..#.##.."),
("...#..#."),
("...#..#."),
("...#..#."),
("...#..#."),
("........")
),
(
("........"),
("........"),
("...###.."),
("..#...#."),
("..#...#."),
("..#...#."),
("...###.."),
("........")
),
(
("........"),
("........"),
("..#.##.."),
("...#..#."),
("...#..#."),
("...###.."),
("...#...."),
("...#....")
),
(
("........"),
("........"),
("...##.#."),
("..#..#.."),
("..#..#.."),
("...###.."),
(".....#.."),
(".....#..")
),
(
("........"),
("........"),
("..#.##.."),
("..##...."),
("..#....."),
("..#....."),
("..#....."),
("........")
),
(
("........"),
("........"),
("...###.."),
("..#....."),
("...##..."),
(".....#.."),
("..###..."),
("........")
),
(
("........"),
("....#..."),
("...###.."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("........")
),
(
("........"),
("........"),
("..#..#.."),
("..#..#.."),
("..#..#.."),
("..#..#.."),
("...##.#."),
("........")
),
(
("........"),
("........"),
("..#...#."),
("..#...#."),
("..#...#."),
("...#.#.."),
("....#..."),
("........")
),
(
("........"),
("........"),
(".#.....#"),
(".#.....#"),
(".#..#..#"),
(".#.#.#.#"),
("..#...#."),
("........")
),
(
("........"),
("........"),
("..#...#."),
("...#.#.."),
("....#..."),
("...#.#.."),
("..#...#."),
("........")
),
(
("........"),
("........"),
("...#..#."),
("...#..#."),
("...#..#."),
("....###."),
("......#."),
("...###..")
),
(
("........"),
("........"),
("..####.."),
(".....#.."),
("....#..."),
("...#...."),
("..####.."),
("........")
),
(
("....##.."),
("...#...."),
("...#...."),
("..#....."),
("...#...."),
("...#...."),
("....##.."),
("........")
),
(
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("....#..."),
("........")
),
(
("..##...."),
("....#..."),
("....#..."),
(".....#.."),
("....#..."),
("....#..."),
("..##...."),
("........")
),
(
("........"),
("........"),
("..##...."),
(".#..#..#"),
(".....##."),
("........"),
("........"),
("........")
),
(
("########"),
("##....##"),
("#.#..#.#"),
("#..##..#"),
("#..##..#"),
("#.#..#.#"),
("##....##"),
("########")
)
) | 602p/orth | os/kernel/font/font.py | Python | lgpl-3.0 | 19,506 | 0.000103 |
"""Support to interface with the Plex API."""
from __future__ import annotations
from functools import wraps
import json
import logging
import plexapi.exceptions
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.helpers.network import is_internal_request
from .const import (
COMMON_PLAYERS,
CONF_SERVER_IDENTIFIER,
DISPATCHERS,
DOMAIN as PLEX_DOMAIN,
NAME_FORMAT,
PLEX_NEW_MP_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL,
PLEX_UPDATE_SENSOR_SIGNAL,
PLEX_URI_SCHEME,
SERVERS,
TRANSIENT_DEVICE_MODELS,
)
from .media_browser import browse_media
_LOGGER = logging.getLogger(__name__)
def needs_session(func):
"""Ensure session is available for certain attributes."""
@wraps(func)
def get_session_attribute(self, *args):
if self.session is None:
return None
return func(self, *args)
return get_session_attribute
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Plex media_player from a config entry."""
server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
registry = await async_get_registry(hass)
@callback
def async_new_media_players(new_entities):
_async_add_entities(hass, registry, async_add_entities, server_id, new_entities)
unsub = async_dispatcher_connect(
hass, PLEX_NEW_MP_SIGNAL.format(server_id), async_new_media_players
)
hass.data[PLEX_DOMAIN][DISPATCHERS][server_id].append(unsub)
_LOGGER.debug("New entity listener created")
@callback
def _async_add_entities(hass, registry, async_add_entities, server_id, new_entities):
"""Set up Plex media_player entities."""
_LOGGER.debug("New entities: %s", new_entities)
entities = []
plexserver = hass.data[PLEX_DOMAIN][SERVERS][server_id]
for entity_params in new_entities:
plex_mp = PlexMediaPlayer(plexserver, **entity_params)
entities.append(plex_mp)
# Migration to per-server unique_ids
old_entity_id = registry.async_get_entity_id(
MP_DOMAIN, PLEX_DOMAIN, plex_mp.machine_identifier
)
if old_entity_id is not None:
new_unique_id = f"{server_id}:{plex_mp.machine_identifier}"
_LOGGER.debug(
"Migrating unique_id from [%s] to [%s]",
plex_mp.machine_identifier,
new_unique_id,
)
registry.async_update_entity(old_entity_id, new_unique_id=new_unique_id)
async_add_entities(entities, True)
class PlexMediaPlayer(MediaPlayerEntity):
"""Representation of a Plex device."""
def __init__(self, plex_server, device, player_source, session=None):
"""Initialize the Plex device."""
self.plex_server = plex_server
self.device = device
self.player_source = player_source
self.device_make = None
self.device_platform = None
self.device_product = None
self.device_title = None
self.device_version = None
self.machine_identifier = device.machineIdentifier
self.session_device = None
self._device_protocol_capabilities = None
self._previous_volume_level = 1 # Used in fake muting
self._volume_level = 1 # since we can't retrieve remotely
self._volume_muted = False # since we can't retrieve remotely
self._attr_available = False
self._attr_should_poll = False
self._attr_state = STATE_IDLE
self._attr_unique_id = (
f"{self.plex_server.machine_identifier}:{self.machine_identifier}"
)
# Initializes other attributes
self.session = session
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
_LOGGER.debug("Added %s [%s]", self.entity_id, self.unique_id)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL.format(self.unique_id),
self.async_refresh_media_player,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL.format(self.unique_id),
self.async_update_from_websocket,
)
)
@callback
def async_refresh_media_player(self, device, session, source):
"""Set instance objects and trigger an entity state update."""
_LOGGER.debug("Refreshing %s [%s / %s]", self.entity_id, device, session)
self.device = device
self.session = session
if source:
self.player_source = source
self.async_schedule_update_ha_state(True)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.plex_server.machine_identifier),
)
@callback
def async_update_from_websocket(self, state):
"""Update the entity based on new websocket data."""
self.update_state(state)
self.async_write_ha_state()
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.plex_server.machine_identifier),
)
def update(self):
"""Refresh key device data."""
if not self.session:
self.force_idle()
if not self.device:
self._attr_available = False
return
self._attr_available = True
try:
device_url = self.device.url("/")
except plexapi.exceptions.BadRequest:
device_url = "127.0.0.1"
if "127.0.0.1" in device_url:
self.device.proxyThroughServer()
self._device_protocol_capabilities = self.device.protocolCapabilities
for device in filter(None, [self.device, self.session_device]):
self.device_make = self.device_make or device.device
self.device_platform = self.device_platform or device.platform
self.device_product = self.device_product or device.product
self.device_title = self.device_title or device.title
self.device_version = self.device_version or device.version
name_parts = [self.device_product, self.device_title or self.device_platform]
if (self.device_product in COMMON_PLAYERS) and self.device_make:
# Add more context in name for likely duplicates
name_parts.append(self.device_make)
if self.username and self.username != self.plex_server.owner:
# Prepend username for shared/managed clients
name_parts.insert(0, self.username)
self._attr_name = NAME_FORMAT.format(" - ".join(name_parts))
def force_idle(self):
"""Force client to idle."""
self._attr_state = STATE_IDLE
if self.player_source == "session":
self.device = None
self.session_device = None
self._attr_available = False
@property
def session(self):
"""Return the active session for this player."""
return self._session
@session.setter
def session(self, session):
self._session = session
if session:
self.session_device = self.session.player
self.update_state(self.session.state)
else:
self._attr_state = STATE_IDLE
@property
@needs_session
def username(self):
"""Return the username of the client owner."""
return self.session.username
def update_state(self, state):
"""Set the state of the device, handle session termination."""
if state == "playing":
self._attr_state = STATE_PLAYING
elif state == "paused":
self._attr_state = STATE_PAUSED
elif state == "stopped":
self.session = None
self.force_idle()
else:
self._attr_state = STATE_IDLE
@property
def _is_player_active(self):
"""Report if the client is playing media."""
return self.state in (STATE_PLAYING, STATE_PAUSED)
@property
def _active_media_plexapi_type(self):
"""Get the active media type required by PlexAPI commands."""
if self.media_content_type is MEDIA_TYPE_MUSIC:
return "music"
return "video"
@property
@needs_session
def session_key(self):
"""Return current session key."""
return self.session.sessionKey
@property
@needs_session
def media_library_title(self):
"""Return the library name of playing media."""
return self.session.media_library_title
@property
@needs_session
def media_content_id(self):
"""Return the content ID of current playing media."""
return self.session.media_content_id
@property
@needs_session
def media_content_type(self):
"""Return the content type of current playing media."""
return self.session.media_content_type
@property
@needs_session
def media_content_rating(self):
"""Return the content rating of current playing media."""
return self.session.media_content_rating
@property
@needs_session
def media_artist(self):
"""Return the artist of current playing media, music track only."""
return self.session.media_artist
@property
@needs_session
def media_album_name(self):
"""Return the album name of current playing media, music track only."""
return self.session.media_album_name
@property
@needs_session
def media_album_artist(self):
"""Return the album artist of current playing media, music only."""
return self.session.media_album_artist
@property
@needs_session
def media_track(self):
"""Return the track number of current playing media, music only."""
return self.session.media_track
@property
@needs_session
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self.session.media_duration
@property
@needs_session
def media_position(self):
"""Return the duration of current playing media in seconds."""
return self.session.media_position
@property
@needs_session
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self.session.media_position_updated_at
@property
@needs_session
def media_image_url(self):
"""Return the image URL of current playing media."""
return self.session.media_image_url
@property
@needs_session
def media_summary(self):
"""Return the summary of current playing media."""
return self.session.media_summary
@property
@needs_session
def media_title(self):
"""Return the title of current playing media."""
return self.session.media_title
@property
@needs_session
def media_season(self):
"""Return the season of current playing media (TV Show only)."""
return self.session.media_season
@property
@needs_session
def media_series_title(self):
"""Return the title of the series of current playing media."""
return self.session.media_series_title
@property
@needs_session
def media_episode(self):
"""Return the episode of current playing media (TV Show only)."""
return self.session.media_episode
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self.device and "playback" in self._device_protocol_capabilities:
return (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_STOP
| SUPPORT_SEEK
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_VOLUME_MUTE
| SUPPORT_BROWSE_MEDIA
)
return SUPPORT_BROWSE_MEDIA | SUPPORT_PLAY_MEDIA
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.setVolume(int(volume * 100), self._active_media_plexapi_type)
self._volume_level = volume # store since we can't retrieve
@property
def volume_level(self):
"""Return the volume level of the client (0..1)."""
if (
self._is_player_active
and self.device
and "playback" in self._device_protocol_capabilities
):
return self._volume_level
return None
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
if self._is_player_active and self.device:
return self._volume_muted
return None
def mute_volume(self, mute):
"""Mute the volume.
Since we can't actually mute, we'll:
- On mute, store volume and set volume to 0
- On unmute, set volume to previously stored volume
"""
if not (self.device and "playback" in self._device_protocol_capabilities):
return
self._volume_muted = mute
if mute:
self._previous_volume_level = self._volume_level
self.set_volume_level(0)
else:
self.set_volume_level(self._previous_volume_level)
def media_play(self):
"""Send play command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.play(self._active_media_plexapi_type)
def media_pause(self):
"""Send pause command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.pause(self._active_media_plexapi_type)
def media_stop(self):
"""Send stop command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.stop(self._active_media_plexapi_type)
def media_seek(self, position):
"""Send the seek command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.seekTo(position * 1000, self._active_media_plexapi_type)
def media_next_track(self):
"""Send next track command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.skipNext(self._active_media_plexapi_type)
def media_previous_track(self):
"""Send previous track command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.skipPrevious(self._active_media_plexapi_type)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
if not (self.device and "playback" in self._device_protocol_capabilities):
raise HomeAssistantError(
f"Client is not currently accepting playback controls: {self.name}"
)
if not self.plex_server.has_token:
_LOGGER.warning(
"Plex integration configured without a token, playback may fail"
)
if media_id.startswith(PLEX_URI_SCHEME):
media_id = media_id[len(PLEX_URI_SCHEME) :]
if media_type == "station":
playqueue = self.plex_server.create_station_playqueue(media_id)
try:
self.device.playMedia(playqueue)
except requests.exceptions.ConnectTimeout as exc:
raise HomeAssistantError(
f"Request failed when playing on {self.name}"
) from exc
return
src = json.loads(media_id)
if isinstance(src, int):
src = {"plex_key": src}
offset = 0
if playqueue_id := src.pop("playqueue_id", None):
try:
playqueue = self.plex_server.get_playqueue(playqueue_id)
except plexapi.exceptions.NotFound as err:
raise HomeAssistantError(
f"PlayQueue '{playqueue_id}' could not be found"
) from err
else:
shuffle = src.pop("shuffle", 0)
offset = src.pop("offset", 0) * 1000
resume = src.pop("resume", False)
media = self.plex_server.lookup_media(media_type, **src)
if media is None:
raise HomeAssistantError(f"Media could not be found: {media_id}")
if resume and not offset:
offset = media.viewOffset
_LOGGER.debug("Attempting to play %s on %s", media, self.name)
playqueue = self.plex_server.create_playqueue(media, shuffle=shuffle)
try:
self.device.playMedia(playqueue, offset=offset)
except requests.exceptions.ConnectTimeout as exc:
raise HomeAssistantError(
f"Request failed when playing on {self.name}"
) from exc
@property
def extra_state_attributes(self):
"""Return the scene state attributes."""
attributes = {}
for attr in (
"media_content_rating",
"media_library_title",
"player_source",
"media_summary",
"username",
):
if value := getattr(self, attr, None):
attributes[attr] = value
return attributes
@property
def device_info(self) -> DeviceInfo:
"""Return a device description for device registry."""
if self.machine_identifier is None:
return None
if self.device_product in TRANSIENT_DEVICE_MODELS:
return DeviceInfo(
identifiers={(PLEX_DOMAIN, "plex.tv-clients")},
name="Plex Client Service",
manufacturer="Plex",
model="Plex Clients",
entry_type=DeviceEntryType.SERVICE,
)
return DeviceInfo(
identifiers={(PLEX_DOMAIN, self.machine_identifier)},
manufacturer=self.device_platform or "Plex",
model=self.device_product or self.device_make,
name=self.name,
sw_version=self.device_version,
via_device=(PLEX_DOMAIN, self.plex_server.machine_identifier),
)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
is_internal = is_internal_request(self.hass)
return await self.hass.async_add_executor_job(
browse_media,
self.plex_server,
is_internal,
media_content_type,
media_content_id,
)
| rohitranjan1991/home-assistant | homeassistant/components/plex/media_player.py | Python | mit | 19,911 | 0.000904 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) 2017 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Module to configure Lenovo Switches.
# Lenovo Networking
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: enos_config
version_added: "2.5"
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Manage Lenovo ENOS configuration sections
description:
- Lenovo ENOS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ENOS configuration sections in
a deterministic way.
extends_documentation_fragment: enos
notes:
- Tested against ENOS 8.4.1.2
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is
mutually exclusive with I(lines), I(parents).
required: false
default: null
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block', 'config']
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
comment:
description:
- Allows a commit description to be specified to be included
when the configuration is committed. If the configuration is
not changed or committed, this argument is ignored.
required: false
default: 'configured by enos_config'
admin:
description:
- Enters into administration configuration mode for making config
changes to the device.
required: false
default: false
choices: [ "yes", "no" ]
"""
EXAMPLES = """
- name: configure top level configuration
enos_config:
"lines: hostname {{ inventory_hostname }}"
- name: configure interface settings
enos_config:
lines:
- enable
- ip ospf enable
parents: interface ip 13
- name: load a config from disk and replace the current config
enos_config:
src: config.cfg
backup: yes
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/enos01.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.enos.enos import load_config, get_config
from ansible.module_utils.network.enos.enos import enos_argument_spec
from ansible.module_utils.network.enos.enos import check_args
from ansible.module_utils.network.common.config import NetworkConfig, dumps
DEFAULT_COMMIT_COMMENT = 'configured by enos_config'
def get_running_config(module):
contents = module.params['config']
if not contents:
contents = get_config(module)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
replace_config = replace == 'config'
path = module.params['parents']
comment = module.params['comment']
admin = module.params['admin']
check_mode = module.check_mode
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
contents = get_running_config(module)
configobj = NetworkConfig(contents=contents, indent=1)
commands = candidate.difference(configobj, path=path, match=match,
replace=replace)
else:
commands = candidate.items
if commands:
commands = dumps(commands, 'commands').split('\n')
if any((module.params['lines'], module.params['src'])):
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
diff = load_config(module, commands)
if diff:
result['diff'] = dict(prepared=diff)
result['changed'] = True
def main():
"""main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
config=dict(),
backup=dict(type='bool', default=False),
comment=dict(default=DEFAULT_COMMIT_COMMENT),
admin=dict(type='bool', default=False)
)
argument_spec.update(enos_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('replace', 'config', ['src'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| hkariti/ansible | lib/ansible/modules/network/enos/enos_config.py | Python | gpl-3.0 | 9,930 | 0.000604 |
__author__ = 'sekely'
'''
we are using variables almost everywhere in the code.
variables are used to store results, calculations and many more.
this of it as the famous "x" from high school
x = 5, right?
the only thing is, that in Python "x" can store anything
'''
# try this code:
x = 5
y = x + 3
print(y)
# what about this? will it work?
x = 'hello'
y = ' '
z = 'world!'
w = x + y + z
print(w)
| idosekely/python-lessons | lesson_1/variables.py | Python | mit | 403 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TopologyParameters(Model):
"""Parameters that define the representation of topology.
:param target_resource_group_name: The name of the target resource group
to perform topology on.
:type target_resource_group_name: str
:param target_virtual_network: The reference of the Virtual Network
resource.
:type target_virtual_network:
~azure.mgmt.network.v2017_11_01.models.SubResource
:param target_subnet: The reference of the Subnet resource.
:type target_subnet: ~azure.mgmt.network.v2017_11_01.models.SubResource
"""
_attribute_map = {
'target_resource_group_name': {'key': 'targetResourceGroupName', 'type': 'str'},
'target_virtual_network': {'key': 'targetVirtualNetwork', 'type': 'SubResource'},
'target_subnet': {'key': 'targetSubnet', 'type': 'SubResource'},
}
def __init__(self, target_resource_group_name=None, target_virtual_network=None, target_subnet=None):
super(TopologyParameters, self).__init__()
self.target_resource_group_name = target_resource_group_name
self.target_virtual_network = target_virtual_network
self.target_subnet = target_subnet
| AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/topology_parameters.py | Python | mit | 1,697 | 0.001768 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations(object):
"""UsagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.UsagesListResult"]
"""List network usages for a subscription.
:param location: The location where resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsagesListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.UsagesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsagesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._ ]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('UsagesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_usages_operations.py | Python | mit | 5,274 | 0.004361 |
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
## last $Author$
## last $Date$
## $Revision$
"""Work in progress"""
import copy
import numpy as N
import Biskit as B
import Biskit.tools as T
import Biskit.molUtils as MU
def index2map( index, len_i ):
"""
For example
index2map([3,5,10], 12) ==> [0,0,0, 1,1, 2,2,2,2,2, 3,3,3]
@param index: list of starting positions, e.g. [0, 3, 8]
@type index: [ int ] or N.array of int
@param len_i: length of target map, e.g. 10
@type len_i: int
@return: list mapping atom positions to residue(/chain) number,
e.g. [0,0,0, 1,1,1,1,1, 2,2] from above example
@rtype: N.array of int (and of len_i length)
"""
index = N.concatenate( (index, [len_i]) )
delta = index[1:] - index[:-1]
return N.repeat( range(len(delta)), delta)
def map2index( imap ):
"""
Identify the starting positions of each residue(/chain) from a map
giving the residue(/chain) number of each atom.
@param imap: something like [0,0,0,1,1,1,1,1,2,2,2,...]
@type imap: [ int ]
@return: list of starting positions, e.g. [0, 3, 8, ...] in above ex.
@rtype: N.array of int
"""
try:
imap = N.concatenate( (imap, [imap[-1]] ) )
delta = imap[1:] - imap[:-1]
r = N.flatnonzero( delta ) + 1
return N.concatenate( ( [0], r ) )
except IndexError:
## handle empty imap parameter
return N.zeros(0)
class Model( object ):
"""
Model is intended to become the common base class for PDBModel and
Polymer.
"""
#: [str], default profiles for atoms
ATOM_KEYS = []
#: [str], default profiles for residues
RESIDUE_KEYS = []
#: [str], default profiles for chains
CHAIN_KEYS = []
def __init__(self):
"""
Create a new empty Model instance. Don't use this constructor directly.
"""
#: starting (atom) position of each residue
self._resIndex = None
#: starting position of each chain
self._chainIndex = None
#: values associated with atoms
self.atoms = B.ProfileCollection()
#: values associated with residues
self.residues = B.ProfileCollection()
#: values associated with chains or molecules
self.chains = B.ProfileCollection()
for key in self.ATOM_KEYS:
self.atoms.set( key, [], asarray=False )
for key in self.RESIDUE_KEYS:
self.residues.set( key, [], asarray=False )
for key in self.CHAIN_KEYS:
self.chains.set( key, [], asarray=False )
#: Meta info
self.info = { 'date':T.dateSortString() }
self.__version__ = self.version()
def version( self ):
return "Model $Revision$"
def __len__( self ):
return self.lenAtoms()
def __getitem__( self, k ):
"""
Get atom profile or profile item or CrossView for one atom::
m['prof1'] <==> m.atoms.get( 'prof1' )
m['prof1','info1'] <==> m.atoms.get( 'prof1','info1' )
m[10] <==> CrossView( m.atoms, 10 )
@return: profile OR meta infos thereof OR CrossView dict
@rtype: list OR array OR any OR CrossView
"""
if type( k ) is str:
if k in self.atoms:
return self.atoms.get( k )
if k in self.residues:
return self.residues.get( k )
if k in self.chains:
return self.chains.get( k )
if k in self.info:
return self.info[ k ]
if type( k ) is tuple:
return self.profileInfo( k[0] )[ k[1] ]
return self.atoms[k]
def __setitem__( self, k, v ):
"""
Set atom profile or profile item (or meta info)::
m['prof1'] = range(10) <==> m.atoms.set( 'prof1', range(10) )
OR <==> m.residues.set( 'prof1', range(10) )
m['prof1','info1]='comment'
<==> m.atoms.setInfo('prof1',info1='comment')
OR <==> m.residues.setInfo('prof1',info1='comment')
m['version'] = '1.0.0' <==> m.info['version'] = '1.0.0'
but only if 'version' already exists in m.info
@return: item
@rtype: any
"""
if type( k ) is str:
if v is not None and len( v ) == self.lenAtoms():
return self.atoms.set( k, v )
if v is not None and len( v ) == self.lenResidues():
return self.residues.set( k, v )
if v is not None and len( v ) == self.lenChains():
return self.chains.set( k, v )
if k in self.atoms:
return self.atoms.set( k, v )
if k in self.residues:
return self.residues.set( k, v )
if k in self.chains:
return self.chains.set( k, v )
if k in self.info:
self.info[ k ] = v
raise ProfileError, \
'Value cannot clearly be assigned to either atom or '+\
'residue or chain profiles'
if type( k ) is tuple:
key, infokey = k
if key in self.atoms:
self.atoms[key, infokey] = v
return
if key in self.residues:
self.residues[key, infokey] = v
return
self.chains[key, infokey] = v
return
raise ProfileError, \
'Cannot interpret %r as profile name or profile info record' % k
def __getslice__( self, *arg ):
"""
Get list of CrossViews::
m[0:100:5] <==> [ CrossView(m.atoms, i) for i in range(0,100,5) ]
"""
return self.atoms.__getslice__( *arg )
def __iter__( self ):
return self.atoms.iterCrossViews()
def _concatHook( self, resultModel, nextModel ):
pass
def concat( self, *models ):
"""
Concatenate the given models (in given order) to this model.
Note for developers: concat is called recursively on the growing model.
The typical pattern for overriding this method is hence a bit
different. See _concatHook()!
@param models: models to concatenate
@type models: Model, Model, ...
@return: resulting model
@rtype: Model or subclass thereof
"""
if len( models ) == 0:
return self
m = models[0]
r = self.__class__()
r.residues = self.residues.concat( m.residues, )
r.atoms = self.atoms.concat( m.atoms )
r._resIndex = N.concatenate(
(self._resIndex, m._resIndex + self.lenAtoms()))
r._chainIndex =N.concatenate(
(self._chainIndex, m._chainIndex +self.lenAtoms()))
r.info = copy.deepcopy( self.info )
self._concatHook( r, m )
return r.concat( *models[1:] )
def lenAtoms(self):
"""
@return: number of atoms in this model
@rtype: int
"""
return self.atoms.profLength()
def lenResidues( self ):
"""
@return: number of residues in this model
@rtype: int
"""
if self._resIndex is None:
return 0
return len( self._resIndex )
def lenChains( self ):
"""
@return: number of chains in this model
@rtype: int
"""
if self._chainIndex is None:
return 0
return len( self._chainIndex )
def resMap( self ):
"""
Get list to map from any atom to a continuous residue numbering
(starting with 0).
@return: array [00011111122223333..], residue index for each atom
@rtype: list of int
"""
return index2map( self._resIndex, self.lenAtoms() )
def chainMap( self ):
"""
Get chain index of each atom.
@return: array 1 x N_atoms of int, e.g. [000000011111111111122222...]
@rtype: list of int
"""
return index2map( self._chainIndex, self.lenAtoms() )
def take( self, i ):
"""
Extract a Model with a subset of atoms::
take( atomIndices ) -> Polymer / sub-class.
@param i: atomIndices, positions to take in the order to take
@type i: list/array of int
@return: Model / sub-class
@rtype: Model
"""
r = self.__class__()
r.atoms = self.atoms.take( i )
## more tricky: rescue residue borders and extract residue profiles
new_resmap = N.take( self.resMap(), i )
## Note: this erases ordering information and fails for repeated residues
## -- see PDBModel version for fix
r._resIndex = map2index( new_resmap )
i_res = N.take( new_resmap, r._resIndex )
r.residues = self.residues.take( i_res )
## now the same with chains
new_chainmap = N.take( self.chainMap(), i )
## Note: this erases ordering information and fails for repeated residues
## -- see PDBModel version for fix
r._chainIndex = map2index( new_chainmap )
i_chains = N.take( new_chainmap, r._chainIndex )
r.chains = self.chains.take( i_chains )
## copy non-sequential infos
r.info = copy.deepcopy( self.info )
return r
| ostrokach/biskit | Biskit/Model.py | Python | gpl-3.0 | 10,389 | 0.02108 |
from kivy.app import App
from kivy.uix.button import Button
class RobotControlApp(App):
def build(self):
return Button(text='Hello World')
if __name__ == '__main__':
RobotControlApp().run()
| develru/RobotControlCenterKivy | main.py | Python | gpl-3.0 | 210 | 0 |
#!/usr/bin/env python3
# internal modules
import numericalmodel
# external modules
import numpy as np
EMPTY_ARRAY = np.array([])
class LinearDecayEquation(numericalmodel.equations.PrognosticEquation):
"""
Class for the linear decay equation
"""
def linear_factor(self, time = None ):
# take the "a" parameter from the input, interpolate it to the given
# "time" and return the negative value
return - self.input["a"](time)
def independent_addend(self, time = None ):
# take the "F" forcing parameter from the input, interpolate it to
# the given "time" and return it
return self.input["F"](time)
def nonlinear_addend(self, *args, **kwargs):
return 0 # nonlinear addend is always zero (LINEAR decay equation)
| nobodyinperson/python3-numericalmodel | tests/test_data.py | Python | gpl-3.0 | 794 | 0.011335 |
import datetime
import hashlib
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.auth import get_user_model
from django.db import models, IntegrityError
from django.utils.translation import ugettext_lazy as _
from django.db.models import signals
from taggit.managers import TaggableManager
from guardian.shortcuts import get_objects_for_group
class GroupProfile(models.Model):
GROUP_CHOICES = [
("public", _("Public")),
("public-invite", _("Public (invite-only)")),
("private", _("Private")),
]
access_help_text = _('Public: Any registered user can view and join a public group.<br>'
'Public (invite-only):Any registered user can view the group. '
'Only invited users can join.<br>'
'Private: Registered users cannot see any details about the group, including membership. '
'Only invited users can join.')
email_help_text = _('Email used to contact one or all group members, '
'such as a mailing list, shared email, or exchange group.')
group = models.OneToOneField(Group)
title = models.CharField(max_length=50)
slug = models.SlugField(unique=True)
logo = models.ImageField(upload_to="people_group", blank=True)
description = models.TextField()
email = models.EmailField(
_('email'),
null=True,
blank=True,
help_text=email_help_text)
keywords = TaggableManager(
_('keywords'),
help_text=_("A space or comma-separated list of keywords"),
blank=True)
access = models.CharField(
max_length=15,
default="public'",
choices=GROUP_CHOICES,
help_text=access_help_text)
last_modified = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
group, created = Group.objects.get_or_create(name=self.slug)
self.group = group
super(GroupProfile, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
Group.objects.filter(name=self.slug).delete()
super(GroupProfile, self).delete(*args, **kwargs)
@classmethod
def groups_for_user(cls, user):
"""
Returns the groups that user is a member of. If the user is a superuser, all groups are returned.
"""
if user.is_authenticated():
if user.is_superuser:
return cls.objects.all()
return cls.objects.filter(groupmember__user=user)
return []
def __unicode__(self):
return self.title
def keyword_list(self):
"""
Returns a list of the Group's keywords.
"""
return [kw.name for kw in self.keywords.all()]
def resources(self, resource_type=None):
"""
Returns a generator of objects that this group has permissions on.
:param resource_type: Filter's the queryset to objects with the same type.
"""
queryset = get_objects_for_group(
self.group, [
'base.view_resourcebase', 'base.change_resourcebase'], any_perm=True)
if resource_type:
queryset = [
item for item in queryset if hasattr(
item,
resource_type)]
for resource in queryset:
yield resource
def member_queryset(self):
return self.groupmember_set.all()
def get_managers(self):
"""
Returns a queryset of the group's managers.
"""
return get_user_model().objects.filter(
id__in=self.member_queryset().filter(
role='manager').values_list(
"user",
flat=True))
def user_is_member(self, user):
if not user.is_authenticated():
return False
return user.id in self.member_queryset().values_list("user", flat=True)
def user_is_role(self, user, role):
if not user.is_authenticated():
return False
return self.member_queryset().filter(user=user, role=role).exists()
def can_view(self, user):
if self.access == "private":
return user.is_authenticated() and self.user_is_member(user)
else:
return True
def can_invite(self, user):
if not user.is_authenticated():
return False
return self.user_is_role(user, "manager")
def join(self, user, **kwargs):
if user == user.get_anonymous():
raise ValueError("The invited user cannot be anonymous")
member, created = GroupMember.objects.get_or_create(group=self, user=user, defaults=kwargs)
if created:
user.groups.add(self.group)
else:
raise ValueError("The invited user \"{0}\" is already a member".format(user.username))
def invite(self, user, from_user, role="member", send=True):
params = dict(role=role, from_user=from_user)
if isinstance(user, get_user_model()):
params["user"] = user
params["email"] = user.email
else:
params["email"] = user
bits = [
settings.SECRET_KEY,
params["email"],
str(datetime.datetime.now()),
settings.SECRET_KEY
]
params["token"] = hashlib.sha1("".join(bits)).hexdigest()
# If an invitation already exists, re-use it.
try:
invitation = self.invitations.create(**params)
except IntegrityError:
invitation = self.invitations.get(
group=self,
email=params["email"])
if send:
invitation.send(from_user)
return invitation
@models.permalink
def get_absolute_url(self):
return ('group_detail', (), {'slug': self.slug})
@property
def class_name(self):
return self.__class__.__name__
class GroupMember(models.Model):
group = models.ForeignKey(GroupProfile)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
role = models.CharField(max_length=10, choices=[
("manager", _("Manager")),
("member", _("Member")),
])
joined = models.DateTimeField(default=datetime.datetime.now)
class GroupInvitation(models.Model):
group = models.ForeignKey(GroupProfile, related_name="invitations")
token = models.CharField(max_length=40)
email = models.EmailField()
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
related_name="pg_invitations_received")
from_user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="pg_invitations_sent")
role = models.CharField(max_length=10, choices=[
("manager", _("Manager")),
("member", _("Member")),
])
state = models.CharField(
max_length=10,
choices=(
("sent", _("Sent")),
("accepted", _("Accepted")),
("declined", _("Declined")),
),
default="sent",
)
created = models.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return "%s to %s" % (self.email, self.group.title)
class Meta:
unique_together = [("group", "email")]
# def send(self, from_user):
# current_site = Site.objects.get_current()
# domain = unicode(current_site.domain)
# ctx = {
# "invite": self,
# "group": self.group,
# "from_user": from_user,
# "domain": domain,
# }
# subject = render_to_string("groups/email/invite_user_subject.txt", ctx)
# message = render_to_string("groups/email/invite_user.txt", ctx)
# TODO Send a notification rather than a mail
# send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [self.email])
def accept(self, user):
if not user.is_authenticated() or user == user.get_anonymous():
raise ValueError("You must log in to accept invitations")
if not user.email == self.email:
raise ValueError(
"You can't accept an invitation that wasn't for you")
self.group.join(user, role=self.role)
self.state = "accepted"
self.user = user
self.save()
def decline(self, user):
if not user.is_authenticated() or user == user.get_anonymous():
raise ValueError("You must log in to decline invitations")
if not user.email == self.email:
raise ValueError(
"You can't decline an invitation that wasn't for you")
self.state = "declined"
self.save()
def group_pre_delete(instance, sender, **kwargs):
"""Make sure that the anonymous group is not deleted"""
if instance.name == 'anonymous':
raise Exception('Deletion of the anonymous group is\
not permitted as will break the geonode permissions system')
signals.pre_delete.connect(group_pre_delete, sender=Group)
| USStateDept/geonode | geonode/groups/models.py | Python | gpl-3.0 | 9,016 | 0.001331 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import versionutils
import six.moves.urllib.parse as urlparse
from sqlalchemy.orm import joinedload
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql import false
from sqlalchemy.sql import true
import nova.conf
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception
from nova.objects import base
from nova.objects import fields
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
def _parse_netloc(netloc):
"""Parse a user:pass@host:port and return a dict suitable for formatting
a cell mapping template.
"""
these = {
'username': None,
'password': None,
'hostname': None,
'port': None,
}
if '@' in netloc:
userpass, hostport = netloc.split('@', 1)
else:
hostport = netloc
userpass = ''
if hostport.startswith('['):
host_end = hostport.find(']')
if host_end < 0:
raise ValueError('Invalid IPv6 URL')
these['hostname'] = hostport[1:host_end]
these['port'] = hostport[host_end + 1:]
elif ':' in hostport:
these['hostname'], these['port'] = hostport.split(':', 1)
else:
these['hostname'] = hostport
if ':' in userpass:
these['username'], these['password'] = userpass.split(':', 1)
else:
these['username'] = userpass
return these
@base.NovaObjectRegistry.register
class CellMapping(base.NovaTimestampObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added disabled field
VERSION = '1.1'
CELL0_UUID = '00000000-0000-0000-0000-000000000000'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(),
'name': fields.StringField(nullable=True),
'transport_url': fields.StringField(),
'database_connection': fields.StringField(),
'disabled': fields.BooleanField(default=False),
}
def obj_make_compatible(self, primitive, target_version):
super(CellMapping, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 1):
if 'disabled' in primitive:
del primitive['disabled']
@property
def identity(self):
if 'name' in self and self.name:
return '%s(%s)' % (self.uuid, self.name)
else:
return self.uuid
@staticmethod
def _format_url(url, default):
default_url = urlparse.urlparse(default)
subs = {
'username': default_url.username,
'password': default_url.password,
'hostname': default_url.hostname,
'port': default_url.port,
'scheme': default_url.scheme,
'query': default_url.query,
'fragment': default_url.fragment,
'path': default_url.path.lstrip('/'),
}
# NOTE(danms): oslo.messaging has an extended format for the URL
# which we need to support:
# scheme://user:pass@host:port[,user1:pass@host1:port, ...]/path
# Encode these values, if they exist, as indexed keys like
# username1, password1, hostname1, port1.
if ',' in default_url.netloc:
netlocs = default_url.netloc.split(',')
index = 0
for netloc in netlocs:
index += 1
these = _parse_netloc(netloc)
for key in these:
subs['%s%i' % (key, index)] = these[key]
return url.format(**subs)
@staticmethod
def _format_db_url(url):
if CONF.database.connection is None:
if '{' in url:
LOG.error('Cell mapping database_connection is a template, '
'but [database]/connection is not set')
return url
try:
return CellMapping._format_url(url, CONF.database.connection)
except Exception:
LOG.exception('Failed to parse [database]/connection to '
'format cell mapping')
return url
@staticmethod
def _format_mq_url(url):
if CONF.transport_url is None:
if '{' in url:
LOG.error('Cell mapping transport_url is a template, but '
'[DEFAULT]/transport_url is not set')
return url
try:
return CellMapping._format_url(url, CONF.transport_url)
except Exception:
LOG.exception('Failed to parse [DEFAULT]/transport_url to '
'format cell mapping')
return url
@staticmethod
def _from_db_object(context, cell_mapping, db_cell_mapping):
for key in cell_mapping.fields:
val = db_cell_mapping[key]
if key == 'database_connection':
val = cell_mapping._format_db_url(val)
elif key == 'transport_url':
val = cell_mapping._format_mq_url(val)
setattr(cell_mapping, key, val)
cell_mapping.obj_reset_changes()
cell_mapping._context = context
return cell_mapping
@staticmethod
@db_api.api_context_manager.reader
def _get_by_uuid_from_db(context, uuid):
db_mapping = context.session.query(api_models.CellMapping).filter_by(
uuid=uuid).first()
if not db_mapping:
raise exception.CellMappingNotFound(uuid=uuid)
return db_mapping
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
db_mapping = cls._get_by_uuid_from_db(context, uuid)
return cls._from_db_object(context, cls(), db_mapping)
@staticmethod
@db_api.api_context_manager.writer
def _create_in_db(context, updates):
db_mapping = api_models.CellMapping()
db_mapping.update(updates)
db_mapping.save(context.session)
return db_mapping
@base.remotable
def create(self):
db_mapping = self._create_in_db(self._context, self.obj_get_changes())
self._from_db_object(self._context, self, db_mapping)
@staticmethod
@db_api.api_context_manager.writer
def _save_in_db(context, uuid, updates):
db_mapping = context.session.query(
api_models.CellMapping).filter_by(uuid=uuid).first()
if not db_mapping:
raise exception.CellMappingNotFound(uuid=uuid)
db_mapping.update(updates)
context.session.add(db_mapping)
return db_mapping
@base.remotable
def save(self):
changes = self.obj_get_changes()
db_mapping = self._save_in_db(self._context, self.uuid, changes)
self._from_db_object(self._context, self, db_mapping)
self.obj_reset_changes()
@staticmethod
@db_api.api_context_manager.writer
def _destroy_in_db(context, uuid):
result = context.session.query(api_models.CellMapping).filter_by(
uuid=uuid).delete()
if not result:
raise exception.CellMappingNotFound(uuid=uuid)
@base.remotable
def destroy(self):
self._destroy_in_db(self._context, self.uuid)
def is_cell0(self):
return self.obj_attr_is_set('uuid') and self.uuid == self.CELL0_UUID
@base.NovaObjectRegistry.register
class CellMappingList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Add get_by_disabled()
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('CellMapping'),
}
@staticmethod
@db_api.api_context_manager.reader
def _get_all_from_db(context):
return context.session.query(api_models.CellMapping).order_by(
asc(api_models.CellMapping.id)).all()
@base.remotable_classmethod
def get_all(cls, context):
db_mappings = cls._get_all_from_db(context)
return base.obj_make_list(context, cls(), CellMapping, db_mappings)
@staticmethod
@db_api.api_context_manager.reader
def _get_by_disabled_from_db(context, disabled):
if disabled:
return context.session.query(api_models.CellMapping).filter_by(
disabled=true()).order_by(asc(api_models.CellMapping.id)).all()
else:
return context.session.query(api_models.CellMapping).filter_by(
disabled=false()).order_by(asc(
api_models.CellMapping.id)).all()
@base.remotable_classmethod
def get_by_disabled(cls, context, disabled):
db_mappings = cls._get_by_disabled_from_db(context, disabled)
return base.obj_make_list(context, cls(), CellMapping, db_mappings)
@staticmethod
@db_api.api_context_manager.reader
def _get_by_project_id_from_db(context, project_id):
mappings = context.session.query(
api_models.InstanceMapping).\
filter_by(project_id=project_id).\
group_by(api_models.InstanceMapping.cell_id).\
options(joinedload('cell_mapping', innerjoin=True)).\
all()
return (mapping.cell_mapping for mapping in mappings)
@classmethod
def get_by_project_id(cls, context, project_id):
"""Return a list of CellMapping objects which correspond to cells in
which project_id has InstanceMappings.
"""
db_mappings = cls._get_by_project_id_from_db(context, project_id)
return base.obj_make_list(context, cls(), CellMapping, db_mappings)
| gooddata/openstack-nova | nova/objects/cell_mapping.py | Python | apache-2.0 | 10,122 | 0 |
# Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import nsiqcppstyle_checker
import unittest
import nsiqcppstyle_rulemanager
import nsiqcppstyle_reporter
import nsiqcppstyle_state
errors = []
def AddError(err):
errors.append(err)
def CheckErrorContent(msg):
for err in errors :
if err[1] == msg :
return True
return False
def MockError(token, category, message):
AddError((token, category, message))
print token, category, message
class nct(unittest.TestCase):
def setUp(self):
nsiqcppstyle_rulemanager.ruleManager.ResetRules()
nsiqcppstyle_rulemanager.ruleManager.ResetRegisteredRules()
nsiqcppstyle_state._nsiqcppstyle_state.verbose = True
nsiqcppstyle_reporter.Error = MockError
self.setUpRule()
global errors
errors = []
def Analyze(self, filename, data):
nsiqcppstyle_checker.ProcessFile(nsiqcppstyle_rulemanager.ruleManager, filename, data) | DLR-SC/tigl | thirdparty/nsiqcppstyle/nsiqunittest/nsiqcppstyle_unittestbase.py | Python | apache-2.0 | 2,439 | 0.00451 |
#Intentionally left blank. There should be a .pyc file by the same name at creation.
| DanielleWingler/UnderstandingDjango | TestSite/blog/__init__.py | Python | mit | 85 | 0.023529 |
import random, sys
if len(sys.argv)!= 2:
print "Usage: python generate.py <how many instructions you want>"
sys.exit()
choices = ("(", ")")
output = ""
for x in range(int(sys.argv[1])):
output += random.choice(choices)
f = open("randout", "w")
f.write(output)
f.close
print "Created an instruction set that is " + sys.argv[1] + " characters long"
| b4ux1t3/adventofcode2015 | day1/generate.py | Python | mit | 366 | 0.005464 |
#!/usr/bin/python
# UDPPingerServer.py
# We will need the following module to generate randomized lost packets
import random
from socket import *
# Create a UDP socket
# Notice the use of SOCK_DGRAM for UDP packets
serverSocket = socket(AF_INET, SOCK_DGRAM)
# Assign IP address and port number to socket
serverSocket.bind(('', 12026))
print("The Server is ready to receive!")
while True:
# Generate random number in the range of 0 to 10
rand = random.randint(0, 10)
# Receive the client packet along with the address it is coming from
message, address = serverSocket.recvfrom(2048)
# Capitalize the message from the client
message = message.upper()
# If rand is less is than 4, we consider the packet lost and do not respond
if rand < 4:
continue
# Otherwise, the server responds
serverSocket.sendto(message, address)
| jameslivulpi/socketprogramming | udpServer.py | Python | gpl-3.0 | 872 | 0.001147 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from types import GeneratorType
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import ET
from libcloud.common.types import InvalidCredsError
from libcloud.common.dimensiondata import (
DimensionDataAPIException,
NetworkDomainServicePlan,
)
from libcloud.common.dimensiondata import (
DimensionDataServerCpuSpecification,
DimensionDataServerDisk,
DimensionDataServerVMWareTools,
)
from libcloud.common.dimensiondata import DimensionDataTag, DimensionDataTagKey
from libcloud.common.dimensiondata import (
DimensionDataIpAddress,
DimensionDataIpAddressList,
DimensionDataChildIpAddressList,
DimensionDataPortList,
DimensionDataPort,
DimensionDataChildPortList,
)
from libcloud.common.dimensiondata import TYPES_URN
from libcloud.compute.drivers.dimensiondata import (
DimensionDataNodeDriver as DimensionData,
)
from libcloud.compute.drivers.dimensiondata import DimensionDataNic
from libcloud.compute.base import Node, NodeAuthPassword, NodeLocation
from libcloud.test import MockHttp, unittest
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import DIMENSIONDATA_PARAMS
from libcloud.utils.xml import fixxpath, findtext, findall
class DimensionData_v2_4_Tests(unittest.TestCase):
def setUp(self):
DimensionData.connectionCls.active_api_version = "2.4"
DimensionData.connectionCls.conn_class = DimensionDataMockHttp
DimensionDataMockHttp.type = None
self.driver = DimensionData(*DIMENSIONDATA_PARAMS)
def test_invalid_region(self):
with self.assertRaises(ValueError):
DimensionData(*DIMENSIONDATA_PARAMS, region="blah")
def test_invalid_creds(self):
DimensionDataMockHttp.type = "UNAUTHORIZED"
with self.assertRaises(InvalidCredsError):
self.driver.list_nodes()
def test_get_account_details(self):
DimensionDataMockHttp.type = None
ret = self.driver.connection.get_account_details()
self.assertEqual(ret.full_name, "Test User")
self.assertEqual(ret.first_name, "Test")
self.assertEqual(ret.email, "[email protected]")
def test_list_locations_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
self.assertEqual(len(ret), 5)
first_loc = ret[0]
self.assertEqual(first_loc.id, "NA3")
self.assertEqual(first_loc.name, "US - West")
self.assertEqual(first_loc.country, "US")
def test_list_nodes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 7)
def test_node_extras(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertTrue(
isinstance(ret[0].extra["vmWareTools"], DimensionDataServerVMWareTools)
)
self.assertTrue(
isinstance(ret[0].extra["cpu"], DimensionDataServerCpuSpecification)
)
self.assertTrue(isinstance(ret[0].extra["disks"], list))
self.assertTrue(isinstance(ret[0].extra["disks"][0], DimensionDataServerDisk))
self.assertEqual(ret[0].extra["disks"][0].size_gb, 10)
self.assertTrue(isinstance(ret[1].extra["disks"], list))
self.assertTrue(isinstance(ret[1].extra["disks"][0], DimensionDataServerDisk))
self.assertEqual(ret[1].extra["disks"][0].size_gb, 10)
def test_server_states(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertTrue(ret[0].state == "running")
self.assertTrue(ret[1].state == "starting")
self.assertTrue(ret[2].state == "stopping")
self.assertTrue(ret[3].state == "reconfiguring")
self.assertTrue(ret[4].state == "running")
self.assertTrue(ret[5].state == "terminated")
self.assertTrue(ret[6].state == "stopped")
self.assertEqual(len(ret), 7)
def test_list_nodes_response_PAGINATED(self):
DimensionDataMockHttp.type = "PAGINATED"
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 9)
def test_paginated_mcp2_call_EMPTY(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "EMPTY"
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2(
"server/server"
)
empty_node_list = []
for node_list in node_list_generator:
empty_node_list.extend(node_list)
self.assertTrue(len(empty_node_list) == 0)
def test_paginated_mcp2_call_PAGED_THEN_EMPTY(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "PAGED_THEN_EMPTY"
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2(
"server/server"
)
final_node_list = []
for node_list in node_list_generator:
final_node_list.extend(node_list)
self.assertTrue(len(final_node_list) == 2)
def test_paginated_mcp2_call_with_page_size(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "PAGESIZE50"
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2(
"server/server", page_size=50
)
self.assertTrue(isinstance(node_list_generator, GeneratorType))
# We're making sure here the filters make it to the URL
# See _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_ALLFILTERS for asserts
def test_list_nodes_response_strings_ALLFILTERS(self):
DimensionDataMockHttp.type = "ALLFILTERS"
ret = self.driver.list_nodes(
ex_location="fake_loc",
ex_name="fake_name",
ex_ipv6="fake_ipv6",
ex_ipv4="fake_ipv4",
ex_vlan="fake_vlan",
ex_image="fake_image",
ex_deployed=True,
ex_started=True,
ex_state="fake_state",
ex_network="fake_network",
ex_network_domain="fake_network_domain",
)
self.assertTrue(isinstance(ret, list))
self.assertEqual(len(ret), 7)
node = ret[3]
self.assertTrue(isinstance(node.extra["disks"], list))
self.assertTrue(isinstance(node.extra["disks"][0], DimensionDataServerDisk))
self.assertEqual(node.size.id, "1")
self.assertEqual(node.image.id, "3ebf3c0f-90fe-4a8b-8585-6e65b316592c")
self.assertEqual(node.image.name, "WIN2008S/32")
disk = node.extra["disks"][0]
self.assertEqual(disk.id, "c2e1f199-116e-4dbc-9960-68720b832b0a")
self.assertEqual(disk.scsi_id, 0)
self.assertEqual(disk.size_gb, 50)
self.assertEqual(disk.speed, "STANDARD")
self.assertEqual(disk.state, "NORMAL")
def test_list_nodes_response_LOCATION(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
first_loc = ret[0]
ret = self.driver.list_nodes(ex_location=first_loc)
for node in ret:
self.assertEqual(node.extra["datacenterId"], "NA3")
def test_list_nodes_response_LOCATION_STR(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes(ex_location="NA3")
for node in ret:
self.assertEqual(node.extra["datacenterId"], "NA3")
def test_list_sizes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_sizes()
self.assertEqual(len(ret), 1)
size = ret[0]
self.assertEqual(size.name, "default")
def test_reboot_node_response(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = node.reboot()
self.assertTrue(ret is True)
def test_reboot_node_response_INPROGRESS(self):
DimensionDataMockHttp.type = "INPROGRESS"
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
with self.assertRaises(DimensionDataAPIException):
node.reboot()
def test_destroy_node_response(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = node.destroy()
self.assertTrue(ret is True)
def test_destroy_node_response_RESOURCE_BUSY(self):
DimensionDataMockHttp.type = "INPROGRESS"
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
with self.assertRaises(DimensionDataAPIException):
node.destroy()
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 3)
self.assertEqual(images[0].name, "RedHat 6 64-bit 2 CPU")
self.assertEqual(images[0].id, "c14b1a46-2428-44c1-9c1a-b20e6418d08c")
self.assertEqual(images[0].extra["location"].id, "NA9")
self.assertEqual(images[0].extra["cpu"].cpu_count, 2)
self.assertEqual(images[0].extra["OS_displayName"], "REDHAT6/64")
def test_clean_failed_deployment_response_with_node(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_clean_failed_deployment(node)
self.assertTrue(ret is True)
def test_clean_failed_deployment_response_with_node_id(self):
node = "e75ead52-692f-4314-8725-c8a4f4d13a87"
ret = self.driver.ex_clean_failed_deployment(node)
self.assertTrue(ret is True)
def test_ex_list_customer_images(self):
images = self.driver.ex_list_customer_images()
self.assertEqual(len(images), 3)
self.assertEqual(images[0].name, "ImportedCustomerImage")
self.assertEqual(images[0].id, "5234e5c7-01de-4411-8b6e-baeb8d91cf5d")
self.assertEqual(images[0].extra["location"].id, "NA9")
self.assertEqual(images[0].extra["cpu"].cpu_count, 4)
self.assertEqual(images[0].extra["OS_displayName"], "REDHAT6/64")
def test_create_mcp1_node_optional_param(self):
root_pw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
network = self.driver.ex_list_networks()[0]
cpu_spec = DimensionDataServerCpuSpecification(
cpu_count="4", cores_per_socket="2", performance="STANDARD"
)
disks = [DimensionDataServerDisk(scsi_id="0", speed="HIGHPERFORMANCE")]
node = self.driver.create_node(
name="test2",
image=image,
auth=root_pw,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
ex_memory_gb=8,
ex_disks=disks,
ex_cpu_specification=cpu_spec,
ex_primary_dns="10.0.0.5",
ex_secondary_dns="10.0.0.6",
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_mcp1_node_response_no_pass_random_gen(self):
image = self.driver.list_images()[0]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=None,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
self.assertTrue("password" in node.extra)
def test_create_mcp1_node_response_no_pass_customer_windows(self):
image = self.driver.ex_list_customer_images()[1]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=None,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
self.assertTrue("password" in node.extra)
def test_create_mcp1_node_response_no_pass_customer_windows_STR(self):
image = self.driver.ex_list_customer_images()[1].id
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=None,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
self.assertTrue("password" in node.extra)
def test_create_mcp1_node_response_no_pass_customer_linux(self):
image = self.driver.ex_list_customer_images()[0]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=None,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
self.assertTrue("password" not in node.extra)
def test_create_mcp1_node_response_no_pass_customer_linux_STR(self):
image = self.driver.ex_list_customer_images()[0].id
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=None,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
self.assertTrue("password" not in node.extra)
def test_create_mcp1_node_response_STR(self):
rootPw = "pass123"
image = self.driver.list_images()[0].id
network = self.driver.ex_list_networks()[0].id
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_response_network_domain(self):
rootPw = NodeAuthPassword("pass123")
location = self.driver.ex_get_location_by_id("NA9")
image = self.driver.list_images(location=location)[0]
network_domain = self.driver.ex_list_network_domains(location=location)[0]
vlan = self.driver.ex_list_vlans(location=location)[0]
cpu = DimensionDataServerCpuSpecification(
cpu_count=4, cores_per_socket=1, performance="HIGHPERFORMANCE"
)
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain=network_domain,
ex_vlan=vlan,
ex_is_started=False,
ex_cpu_specification=cpu,
ex_memory_gb=4,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_response_network_domain_STR(self):
rootPw = NodeAuthPassword("pass123")
location = self.driver.ex_get_location_by_id("NA9")
image = self.driver.list_images(location=location)[0]
network_domain = self.driver.ex_list_network_domains(location=location)[0].id
vlan = self.driver.ex_list_vlans(location=location)[0].id
cpu = DimensionDataServerCpuSpecification(
cpu_count=4, cores_per_socket=1, performance="HIGHPERFORMANCE"
)
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain=network_domain,
ex_vlan=vlan,
ex_is_started=False,
ex_cpu_specification=cpu,
ex_memory_gb=4,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_mcp1_node_no_network(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(InvalidRequestError):
self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network=None,
ex_is_started=False,
)
def test_create_node_mcp1_ipv4(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network="fakenetwork",
ex_primary_ipv4="10.0.0.1",
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_mcp1_network(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network="fakenetwork",
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_mcp2_vlan(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_vlan="fakevlan",
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_mcp2_ipv4(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_ipv4="10.0.0.1",
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_network_domain_no_vlan_or_ipv4(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fake_network_domain",
ex_is_started=False,
)
def test_create_node_response(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
ex_primary_nic_vlan="fakevlan",
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_ms_time_zone(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
ex_primary_nic_vlan="fakevlan",
ex_microsoft_time_zone="040",
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_ambigious_mcps_fail(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
ex_network="fakenetwork",
ex_primary_nic_vlan="fakevlan",
)
def test_create_node_no_network_domain_fail(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test3", image=image, auth=rootPw, ex_primary_nic_vlan="fakevlan"
)
def test_create_node_no_primary_nic_fail(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
)
def test_create_node_primary_vlan_nic(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
ex_primary_nic_vlan="fakevlan",
ex_primary_nic_network_adapter="v1000",
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_primary_ipv4(self):
rootPw = "pass123"
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_both_primary_nic_and_vlan_fail(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_primary_nic_vlan="fakevlan",
)
def test_create_node_cpu_specification(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
cpu_spec = DimensionDataServerCpuSpecification(
cpu_count="4", cores_per_socket="2", performance="STANDARD"
)
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_is_started=False,
ex_cpu_specification=cpu_spec,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_memory(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_is_started=False,
ex_memory_gb=8,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_disks(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
disks = [DimensionDataServerDisk(scsi_id="0", speed="HIGHPERFORMANCE")]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_is_started=False,
ex_disks=disks,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_disks_fail(self):
root_pw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
disks = "blah"
with self.assertRaises(TypeError):
self.driver.create_node(
name="test2",
image=image,
auth=root_pw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_is_started=False,
ex_disks=disks,
)
def test_create_node_ipv4_gateway(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_is_started=False,
ex_ipv4_gateway="10.2.2.2",
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_network_domain_no_vlan_no_ipv4_fail(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fake_network_domain",
ex_is_started=False,
)
def test_create_node_mcp2_additional_nics_legacy(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
additional_vlans = ["fakevlan1", "fakevlan2"]
additional_ipv4 = ["10.0.0.2", "10.0.0.3"]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_ipv4="10.0.0.1",
ex_additional_nics_vlan=additional_vlans,
ex_additional_nics_ipv4=additional_ipv4,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_bad_additional_nics_ipv4(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(TypeError):
self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fake_network_domain",
ex_vlan="fake_vlan",
ex_additional_nics_ipv4="badstring",
ex_is_started=False,
)
def test_create_node_additional_nics(self):
root_pw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
nic1 = DimensionDataNic(vlan="fake_vlan", network_adapter_name="v1000")
nic2 = DimensionDataNic(private_ip_v4="10.1.1.2", network_adapter_name="v1000")
additional_nics = [nic1, nic2]
node = self.driver.create_node(
name="test2",
image=image,
auth=root_pw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_additional_nics=additional_nics,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_additional_nics_vlan_ipv4_coexist_fail(self):
root_pw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
nic1 = DimensionDataNic(
private_ip_v4="10.1.1.1", vlan="fake_vlan", network_adapter_name="v1000"
)
nic2 = DimensionDataNic(
private_ip_v4="10.1.1.2", vlan="fake_vlan2", network_adapter_name="v1000"
)
additional_nics = [nic1, nic2]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test2",
image=image,
auth=root_pw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_additional_nics=additional_nics,
ex_is_started=False,
)
def test_create_node_additional_nics_invalid_input_fail(self):
root_pw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
additional_nics = "blah"
with self.assertRaises(TypeError):
self.driver.create_node(
name="test2",
image=image,
auth=root_pw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_additional_nics=additional_nics,
ex_is_started=False,
)
def test_create_node_additional_nics_vlan_ipv4_not_exist_fail(self):
root_pw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
nic1 = DimensionDataNic(network_adapter_name="v1000")
nic2 = DimensionDataNic(network_adapter_name="v1000")
additional_nics = [nic1, nic2]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test2",
image=image,
auth=root_pw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_additional_nics=additional_nics,
ex_is_started=False,
)
def test_create_node_bad_additional_nics_vlan(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(TypeError):
self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fake_network_domain",
ex_vlan="fake_vlan",
ex_additional_nics_vlan="badstring",
ex_is_started=False,
)
def test_create_node_mcp2_indicate_dns(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test node dns",
ex_network_domain="fakenetworkdomain",
ex_primary_ipv4="10.0.0.1",
ex_primary_dns="8.8.8.8",
ex_secondary_dns="8.8.4.4",
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_ex_shutdown_graceful(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_shutdown_graceful(node)
self.assertTrue(ret is True)
def test_ex_shutdown_graceful_INPROGRESS(self):
DimensionDataMockHttp.type = "INPROGRESS"
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_shutdown_graceful(node)
def test_ex_start_node(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_start_node(node)
self.assertTrue(ret is True)
def test_ex_start_node_INPROGRESS(self):
DimensionDataMockHttp.type = "INPROGRESS"
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_start_node(node)
def test_ex_power_off(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_power_off(node)
self.assertTrue(ret is True)
def test_ex_update_vm_tools(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_update_vm_tools(node)
self.assertTrue(ret is True)
def test_ex_power_off_INPROGRESS(self):
DimensionDataMockHttp.type = "INPROGRESS"
node = Node(
id="11",
name=None,
state="STOPPING",
public_ips=None,
private_ips=None,
driver=self.driver,
)
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_power_off(node)
def test_ex_reset(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_reset(node)
self.assertTrue(ret is True)
def test_ex_attach_node_to_vlan(self):
node = self.driver.ex_get_node_by_id("e75ead52-692f-4314-8725-c8a4f4d13a87")
vlan = self.driver.ex_get_vlan("0e56433f-d808-4669-821d-812769517ff8")
ret = self.driver.ex_attach_node_to_vlan(node, vlan)
self.assertTrue(ret is True)
def test_ex_destroy_nic(self):
node = self.driver.ex_destroy_nic("a202e51b-41c0-4cfc-add0-b1c62fc0ecf6")
self.assertTrue(node)
def test_list_networks(self):
nets = self.driver.list_networks()
self.assertEqual(nets[0].name, "test-net1")
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_create_network(self):
location = self.driver.ex_get_location_by_id("NA9")
net = self.driver.ex_create_network(location, "Test Network", "test")
self.assertEqual(net.id, "208e3a8e-9d2f-11e2-b29c-001517c4643e")
self.assertEqual(net.name, "Test Network")
def test_ex_create_network_NO_DESCRIPTION(self):
location = self.driver.ex_get_location_by_id("NA9")
net = self.driver.ex_create_network(location, "Test Network")
self.assertEqual(net.id, "208e3a8e-9d2f-11e2-b29c-001517c4643e")
self.assertEqual(net.name, "Test Network")
def test_ex_delete_network(self):
net = self.driver.ex_list_networks()[0]
result = self.driver.ex_delete_network(net)
self.assertTrue(result)
def test_ex_rename_network(self):
net = self.driver.ex_list_networks()[0]
result = self.driver.ex_rename_network(net, "barry")
self.assertTrue(result)
def test_ex_create_network_domain(self):
location = self.driver.ex_get_location_by_id("NA9")
plan = NetworkDomainServicePlan.ADVANCED
net = self.driver.ex_create_network_domain(
location=location, name="test", description="test", service_plan=plan
)
self.assertEqual(net.name, "test")
self.assertTrue(net.id, "f14a871f-9a25-470c-aef8-51e13202e1aa")
def test_ex_create_network_domain_NO_DESCRIPTION(self):
location = self.driver.ex_get_location_by_id("NA9")
plan = NetworkDomainServicePlan.ADVANCED
net = self.driver.ex_create_network_domain(
location=location, name="test", service_plan=plan
)
self.assertEqual(net.name, "test")
self.assertTrue(net.id, "f14a871f-9a25-470c-aef8-51e13202e1aa")
def test_ex_get_network_domain(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
self.assertEqual(net.id, "8cdfd607-f429-4df6-9352-162cfc0891be")
self.assertEqual(net.description, "test2")
self.assertEqual(net.name, "test")
def test_ex_update_network_domain(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
net.name = "new name"
net2 = self.driver.ex_update_network_domain(net)
self.assertEqual(net2.name, "new name")
def test_ex_delete_network_domain(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
result = self.driver.ex_delete_network_domain(net)
self.assertTrue(result)
def test_ex_list_networks(self):
nets = self.driver.ex_list_networks()
self.assertEqual(nets[0].name, "test-net1")
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_network_domains(self):
nets = self.driver.ex_list_network_domains()
self.assertEqual(nets[0].name, "Aurora")
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_network_domains_ALLFILTERS(self):
DimensionDataMockHttp.type = "ALLFILTERS"
nets = self.driver.ex_list_network_domains(
location="fake_location",
name="fake_name",
service_plan="fake_plan",
state="fake_state",
)
self.assertEqual(nets[0].name, "Aurora")
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_vlans(self):
vlans = self.driver.ex_list_vlans()
self.assertEqual(vlans[0].name, "Primary")
def test_ex_list_vlans_ALLFILTERS(self):
DimensionDataMockHttp.type = "ALLFILTERS"
vlans = self.driver.ex_list_vlans(
location="fake_location",
network_domain="fake_network_domain",
name="fake_name",
ipv4_address="fake_ipv4",
ipv6_address="fake_ipv6",
state="fake_state",
)
self.assertEqual(vlans[0].name, "Primary")
def test_ex_create_vlan(
self,
):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
vlan = self.driver.ex_create_vlan(
network_domain=net,
name="test",
private_ipv4_base_address="10.3.4.0",
private_ipv4_prefix_size="24",
description="test vlan",
)
self.assertEqual(vlan.id, "0e56433f-d808-4669-821d-812769517ff8")
def test_ex_create_vlan_NO_DESCRIPTION(
self,
):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
vlan = self.driver.ex_create_vlan(
network_domain=net,
name="test",
private_ipv4_base_address="10.3.4.0",
private_ipv4_prefix_size="24",
)
self.assertEqual(vlan.id, "0e56433f-d808-4669-821d-812769517ff8")
def test_ex_get_vlan(self):
vlan = self.driver.ex_get_vlan("0e56433f-d808-4669-821d-812769517ff8")
self.assertEqual(vlan.id, "0e56433f-d808-4669-821d-812769517ff8")
self.assertEqual(vlan.description, "test2")
self.assertEqual(vlan.status, "NORMAL")
self.assertEqual(vlan.name, "Production VLAN")
self.assertEqual(vlan.private_ipv4_range_address, "10.0.3.0")
self.assertEqual(vlan.private_ipv4_range_size, 24)
self.assertEqual(vlan.ipv6_range_size, 64)
self.assertEqual(vlan.ipv6_range_address, "2607:f480:1111:1153:0:0:0:0")
self.assertEqual(vlan.ipv4_gateway, "10.0.3.1")
self.assertEqual(vlan.ipv6_gateway, "2607:f480:1111:1153:0:0:0:1")
def test_ex_wait_for_state(self):
self.driver.ex_wait_for_state(
"NORMAL",
self.driver.ex_get_vlan,
vlan_id="0e56433f-d808-4669-821d-812769517ff8",
poll_interval=0.1,
)
def test_ex_wait_for_state_NODE(self):
self.driver.ex_wait_for_state(
"running",
self.driver.ex_get_node_by_id,
id="e75ead52-692f-4314-8725-c8a4f4d13a87",
poll_interval=0.1,
)
def test_ex_wait_for_state_FAIL(self):
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.ex_wait_for_state(
"starting",
self.driver.ex_get_node_by_id,
id="e75ead52-692f-4314-8725-c8a4f4d13a87",
poll_interval=0.1,
timeout=0.1,
)
self.assertEqual(context.exception.code, "running")
self.assertTrue("timed out" in context.exception.msg)
def test_ex_update_vlan(self):
vlan = self.driver.ex_get_vlan("0e56433f-d808-4669-821d-812769517ff8")
vlan.name = "new name"
vlan2 = self.driver.ex_update_vlan(vlan)
self.assertEqual(vlan2.name, "new name")
def test_ex_delete_vlan(self):
vlan = self.driver.ex_get_vlan("0e56433f-d808-4669-821d-812769517ff8")
result = self.driver.ex_delete_vlan(vlan)
self.assertTrue(result)
def test_ex_expand_vlan(self):
vlan = self.driver.ex_get_vlan("0e56433f-d808-4669-821d-812769517ff8")
vlan.private_ipv4_range_size = "23"
vlan = self.driver.ex_expand_vlan(vlan)
self.assertEqual(vlan.private_ipv4_range_size, "23")
def test_ex_add_public_ip_block_to_network_domain(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
block = self.driver.ex_add_public_ip_block_to_network_domain(net)
self.assertEqual(block.id, "9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8")
def test_ex_list_public_ip_blocks(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
blocks = self.driver.ex_list_public_ip_blocks(net)
self.assertEqual(blocks[0].base_ip, "168.128.4.18")
self.assertEqual(blocks[0].size, "2")
self.assertEqual(blocks[0].id, "9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8")
self.assertEqual(blocks[0].location.id, "NA9")
self.assertEqual(blocks[0].network_domain.id, net.id)
def test_ex_get_public_ip_block(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
block = self.driver.ex_get_public_ip_block(
"9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8"
)
self.assertEqual(block.base_ip, "168.128.4.18")
self.assertEqual(block.size, "2")
self.assertEqual(block.id, "9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8")
self.assertEqual(block.location.id, "NA9")
self.assertEqual(block.network_domain.id, net.id)
def test_ex_delete_public_ip_block(self):
block = self.driver.ex_get_public_ip_block(
"9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8"
)
result = self.driver.ex_delete_public_ip_block(block)
self.assertTrue(result)
def test_ex_list_firewall_rules(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
self.assertEqual(rules[0].id, "756cba02-b0bc-48f4-aea5-9445870b6148")
self.assertEqual(
rules[0].network_domain.id, "8cdfd607-f429-4df6-9352-162cfc0891be"
)
self.assertEqual(rules[0].name, "CCDEFAULT.BlockOutboundMailIPv4")
self.assertEqual(rules[0].action, "DROP")
self.assertEqual(rules[0].ip_version, "IPV4")
self.assertEqual(rules[0].protocol, "TCP")
self.assertEqual(rules[0].source.ip_address, "ANY")
self.assertTrue(rules[0].source.any_ip)
self.assertTrue(rules[0].destination.any_ip)
def test_ex_create_firewall_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
rule = self.driver.ex_create_firewall_rule(net, rules[0], "FIRST")
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_create_firewall_rule_with_specific_source_ip(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
specific_source_ip_rule = list(
filter(lambda x: x.name == "SpecificSourceIP", rules)
)[0]
rule = self.driver.ex_create_firewall_rule(
net, specific_source_ip_rule, "FIRST"
)
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_create_firewall_rule_with_source_ip(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
specific_source_ip_rule = list(
filter(lambda x: x.name == "SpecificSourceIP", rules)
)[0]
specific_source_ip_rule.source.any_ip = False
specific_source_ip_rule.source.ip_address = "10.0.0.1"
specific_source_ip_rule.source.ip_prefix_size = "15"
rule = self.driver.ex_create_firewall_rule(
net, specific_source_ip_rule, "FIRST"
)
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_create_firewall_rule_with_any_ip(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
specific_source_ip_rule = list(
filter(lambda x: x.name == "SpecificSourceIP", rules)
)[0]
specific_source_ip_rule.source.any_ip = True
rule = self.driver.ex_create_firewall_rule(
net, specific_source_ip_rule, "FIRST"
)
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_create_firewall_rule_ip_prefix_size(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_list_firewall_rules(net)[0]
rule.source.address_list_id = None
rule.source.any_ip = False
rule.source.ip_address = "10.2.1.1"
rule.source.ip_prefix_size = "10"
rule.destination.address_list_id = None
rule.destination.any_ip = False
rule.destination.ip_address = "10.0.0.1"
rule.destination.ip_prefix_size = "20"
self.driver.ex_create_firewall_rule(net, rule, "LAST")
def test_ex_create_firewall_rule_address_list(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_list_firewall_rules(net)[0]
rule.source.address_list_id = "12345"
rule.destination.address_list_id = "12345"
self.driver.ex_create_firewall_rule(net, rule, "LAST")
def test_ex_create_firewall_rule_port_list(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_list_firewall_rules(net)[0]
rule.source.port_list_id = "12345"
rule.destination.port_list_id = "12345"
self.driver.ex_create_firewall_rule(net, rule, "LAST")
def test_ex_create_firewall_rule_port(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_list_firewall_rules(net)[0]
rule.source.port_list_id = None
rule.source.port_begin = "8000"
rule.source.port_end = "8005"
rule.destination.port_list_id = None
rule.destination.port_begin = "7000"
rule.destination.port_end = "7005"
self.driver.ex_create_firewall_rule(net, rule, "LAST")
def test_ex_create_firewall_rule_ALL_VALUES(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
for rule in rules:
self.driver.ex_create_firewall_rule(net, rule, "LAST")
def test_ex_create_firewall_rule_WITH_POSITION_RULE(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
rule = self.driver.ex_create_firewall_rule(net, rules[-2], "BEFORE", rules[-1])
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_create_firewall_rule_WITH_POSITION_RULE_STR(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
rule = self.driver.ex_create_firewall_rule(
net, rules[-2], "BEFORE", "RULE_WITH_SOURCE_AND_DEST"
)
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_create_firewall_rule_FAIL_POSITION(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
with self.assertRaises(ValueError):
self.driver.ex_create_firewall_rule(net, rules[0], "BEFORE")
def test_ex_create_firewall_rule_FAIL_POSITION_WITH_RULE(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
with self.assertRaises(ValueError):
self.driver.ex_create_firewall_rule(
net, rules[0], "LAST", "RULE_WITH_SOURCE_AND_DEST"
)
def test_ex_get_firewall_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_set_firewall_rule_state(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
result = self.driver.ex_set_firewall_rule_state(rule, False)
self.assertTrue(result)
def test_ex_delete_firewall_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
result = self.driver.ex_delete_firewall_rule(rule)
self.assertTrue(result)
def test_ex_edit_firewall_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
rule.source.any_ip = True
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_source_ipaddresslist(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
rule.source.address_list_id = "802abc9f-45a7-4efb-9d5a-810082368222"
rule.source.any_ip = False
rule.source.ip_address = "10.0.0.1"
rule.source.ip_prefix_size = 10
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_destination_ipaddresslist(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
rule.destination.address_list_id = "802abc9f-45a7-4efb-9d5a-810082368222"
rule.destination.any_ip = False
rule.destination.ip_address = "10.0.0.1"
rule.destination.ip_prefix_size = 10
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_destination_ipaddress(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
rule.source.address_list_id = None
rule.source.any_ip = False
rule.source.ip_address = "10.0.0.1"
rule.source.ip_prefix_size = "10"
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_source_ipaddress(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
rule.destination.address_list_id = None
rule.destination.any_ip = False
rule.destination.ip_address = "10.0.0.1"
rule.destination.ip_prefix_size = "10"
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_with_relative_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
placement_rule = self.driver.ex_list_firewall_rules(network_domain=net)[-1]
result = self.driver.ex_edit_firewall_rule(
rule=rule, position="BEFORE", relative_rule_for_position=placement_rule
)
self.assertTrue(result)
def test_ex_edit_firewall_rule_with_relative_rule_by_name(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
placement_rule = self.driver.ex_list_firewall_rules(network_domain=net)[-1]
result = self.driver.ex_edit_firewall_rule(
rule=rule, position="BEFORE", relative_rule_for_position=placement_rule.name
)
self.assertTrue(result)
def test_ex_edit_firewall_rule_source_portlist(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
rule.source.port_list_id = "802abc9f-45a7-4efb-9d5a-810082368222"
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_source_port(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
rule.source.port_list_id = None
rule.source.port_begin = "3"
rule.source.port_end = "10"
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_destination_portlist(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
rule.destination.port_list_id = "802abc9f-45a7-4efb-9d5a-810082368222"
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_destination_port(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
rule.destination.port_list_id = None
rule.destination.port_begin = "3"
rule.destination.port_end = "10"
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_invalid_position_fail(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
with self.assertRaises(ValueError):
self.driver.ex_edit_firewall_rule(rule=rule, position="BEFORE")
def test_ex_edit_firewall_rule_invalid_position_relative_rule_fail(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(
net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c"
)
relative_rule = self.driver.ex_list_firewall_rules(network_domain=net)[-1]
with self.assertRaises(ValueError):
self.driver.ex_edit_firewall_rule(
rule=rule, position="FIRST", relative_rule_for_position=relative_rule
)
def test_ex_create_nat_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_create_nat_rule(net, "1.2.3.4", "4.3.2.1")
self.assertEqual(rule.id, "d31c2db0-be6b-4d50-8744-9a7a534b5fba")
def test_ex_list_nat_rules(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_nat_rules(net)
self.assertEqual(rules[0].id, "2187a636-7ebb-49a1-a2ff-5d617f496dce")
self.assertEqual(rules[0].internal_ip, "10.0.0.15")
self.assertEqual(rules[0].external_ip, "165.180.12.18")
def test_ex_get_nat_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_nat_rule(net, "2187a636-7ebb-49a1-a2ff-5d617f496dce")
self.assertEqual(rule.id, "2187a636-7ebb-49a1-a2ff-5d617f496dce")
self.assertEqual(rule.internal_ip, "10.0.0.16")
self.assertEqual(rule.external_ip, "165.180.12.19")
def test_ex_delete_nat_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_nat_rule(net, "2187a636-7ebb-49a1-a2ff-5d617f496dce")
result = self.driver.ex_delete_nat_rule(rule)
self.assertTrue(result)
def test_ex_enable_monitoring(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_enable_monitoring(node, "ADVANCED")
self.assertTrue(result)
def test_ex_disable_monitoring(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_disable_monitoring(node)
self.assertTrue(result)
def test_ex_change_monitoring_plan(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_update_monitoring_plan(node, "ESSENTIALS")
self.assertTrue(result)
def test_ex_add_storage_to_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_add_storage_to_node(node, 30, "PERFORMANCE")
self.assertTrue(result)
def test_ex_remove_storage_from_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_remove_storage_from_node(node, 0)
self.assertTrue(result)
def test_ex_change_storage_speed(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_change_storage_speed(node, 1, "PERFORMANCE")
self.assertTrue(result)
def test_ex_change_storage_size(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_change_storage_size(node, 1, 100)
self.assertTrue(result)
def test_ex_clone_node_to_image(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_clone_node_to_image(node, "my image", "a description")
self.assertTrue(result)
def test_ex_update_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_update_node(
node, "my new name", "a description", 2, 4048
)
self.assertTrue(result)
def test_ex_reconfigure_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_reconfigure_node(node, 4, 4, 1, "HIGHPERFORMANCE")
self.assertTrue(result)
def test_ex_get_location_by_id(self):
location = self.driver.ex_get_location_by_id("NA9")
self.assertTrue(location.id, "NA9")
def test_ex_get_location_by_id_NO_LOCATION(self):
location = self.driver.ex_get_location_by_id(None)
self.assertIsNone(location)
def test_ex_get_base_image_by_id(self):
image_id = self.driver.list_images()[0].id
image = self.driver.ex_get_base_image_by_id(image_id)
self.assertEqual(image.extra["OS_type"], "UNIX")
def test_ex_get_customer_image_by_id(self):
image_id = self.driver.ex_list_customer_images()[1].id
image = self.driver.ex_get_customer_image_by_id(image_id)
self.assertEqual(image.extra["OS_type"], "WINDOWS")
def test_ex_get_image_by_id_base_img(self):
image_id = self.driver.list_images()[1].id
image = self.driver.ex_get_base_image_by_id(image_id)
self.assertEqual(image.extra["OS_type"], "WINDOWS")
def test_ex_get_image_by_id_customer_img(self):
image_id = self.driver.ex_list_customer_images()[0].id
image = self.driver.ex_get_customer_image_by_id(image_id)
self.assertEqual(image.extra["OS_type"], "UNIX")
def test_ex_get_image_by_id_customer_FAIL(self):
image_id = "FAKE_IMAGE_ID"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_get_base_image_by_id(image_id)
def test_ex_create_anti_affinity_rule(self):
node_list = self.driver.list_nodes()
success = self.driver.ex_create_anti_affinity_rule([node_list[0], node_list[1]])
self.assertTrue(success)
def test_ex_create_anti_affinity_rule_TUPLE(self):
node_list = self.driver.list_nodes()
success = self.driver.ex_create_anti_affinity_rule((node_list[0], node_list[1]))
self.assertTrue(success)
def test_ex_create_anti_affinity_rule_TUPLE_STR(self):
node_list = self.driver.list_nodes()
success = self.driver.ex_create_anti_affinity_rule(
(node_list[0].id, node_list[1].id)
)
self.assertTrue(success)
def test_ex_create_anti_affinity_rule_FAIL_STR(self):
node_list = "string"
with self.assertRaises(TypeError):
self.driver.ex_create_anti_affinity_rule(node_list)
def test_ex_create_anti_affinity_rule_FAIL_EXISTING(self):
node_list = self.driver.list_nodes()
DimensionDataMockHttp.type = "FAIL_EXISTING"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_create_anti_affinity_rule((node_list[0], node_list[1]))
def test_ex_delete_anti_affinity_rule(self):
net_domain = self.driver.ex_list_network_domains()[0]
rule = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)[0]
success = self.driver.ex_delete_anti_affinity_rule(rule)
self.assertTrue(success)
def test_ex_delete_anti_affinity_rule_STR(self):
net_domain = self.driver.ex_list_network_domains()[0]
rule = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)[0]
success = self.driver.ex_delete_anti_affinity_rule(rule.id)
self.assertTrue(success)
def test_ex_delete_anti_affinity_rule_FAIL(self):
net_domain = self.driver.ex_list_network_domains()[0]
rule = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)[0]
DimensionDataMockHttp.type = "FAIL"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_delete_anti_affinity_rule(rule)
def test_ex_list_anti_affinity_rules_NETWORK_DOMAIN(self):
net_domain = self.driver.ex_list_network_domains()[0]
rules = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 2)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_NETWORK(self):
network = self.driver.list_networks()[0]
rules = self.driver.ex_list_anti_affinity_rules(network=network)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 2)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_NODE(self):
node = self.driver.list_nodes()[0]
rules = self.driver.ex_list_anti_affinity_rules(node=node)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 2)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_PAGINATED(self):
net_domain = self.driver.ex_list_network_domains()[0]
DimensionDataMockHttp.type = "PAGINATED"
rules = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 4)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_ALLFILTERS(self):
net_domain = self.driver.ex_list_network_domains()[0]
DimensionDataMockHttp.type = "ALLFILTERS"
rules = self.driver.ex_list_anti_affinity_rules(
network_domain=net_domain, filter_id="FAKE_ID", filter_state="FAKE_STATE"
)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 2)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_BAD_ARGS(self):
with self.assertRaises(ValueError):
self.driver.ex_list_anti_affinity_rules(
network="fake_network", network_domain="fake_network_domain"
)
def test_ex_create_tag_key(self):
success = self.driver.ex_create_tag_key("MyTestKey")
self.assertTrue(success)
def test_ex_create_tag_key_ALLPARAMS(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "ALLPARAMS"
success = self.driver.ex_create_tag_key(
"MyTestKey",
description="Test Key Desc.",
value_required=False,
display_on_report=False,
)
self.assertTrue(success)
def test_ex_create_tag_key_BADREQUEST(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "BADREQUEST"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_create_tag_key("MyTestKey")
def test_ex_list_tag_keys(self):
tag_keys = self.driver.ex_list_tag_keys()
self.assertTrue(isinstance(tag_keys, list))
self.assertTrue(isinstance(tag_keys[0], DimensionDataTagKey))
self.assertTrue(isinstance(tag_keys[0].id, str))
def test_ex_list_tag_keys_ALLFILTERS(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "ALLFILTERS"
self.driver.ex_list_tag_keys(
id="fake_id",
name="fake_name",
value_required=False,
display_on_report=False,
)
def test_ex_get_tag_by_id(self):
tag = self.driver.ex_get_tag_key_by_id("d047c609-93d7-4bc5-8fc9-732c85840075")
self.assertTrue(isinstance(tag, DimensionDataTagKey))
def test_ex_get_tag_by_id_NOEXIST(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "NOEXIST"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_get_tag_key_by_id("d047c609-93d7-4bc5-8fc9-732c85840075")
def test_ex_get_tag_by_name(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "SINGLE"
tag = self.driver.ex_get_tag_key_by_name("LibcloudTest")
self.assertTrue(isinstance(tag, DimensionDataTagKey))
def test_ex_get_tag_by_name_NOEXIST(self):
with self.assertRaises(ValueError):
self.driver.ex_get_tag_key_by_name("LibcloudTest")
def test_ex_modify_tag_key_NAME(self):
tag_key = self.driver.ex_list_tag_keys()[0]
DimensionDataMockHttp.type = "NAME"
success = self.driver.ex_modify_tag_key(tag_key, name="NewName")
self.assertTrue(success)
def test_ex_modify_tag_key_NOTNAME(self):
tag_key = self.driver.ex_list_tag_keys()[0]
DimensionDataMockHttp.type = "NOTNAME"
success = self.driver.ex_modify_tag_key(
tag_key, description="NewDesc", value_required=False, display_on_report=True
)
self.assertTrue(success)
def test_ex_modify_tag_key_NOCHANGE(self):
tag_key = self.driver.ex_list_tag_keys()[0]
DimensionDataMockHttp.type = "NOCHANGE"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_modify_tag_key(tag_key)
def test_ex_remove_tag_key(self):
tag_key = self.driver.ex_list_tag_keys()[0]
success = self.driver.ex_remove_tag_key(tag_key)
self.assertTrue(success)
def test_ex_remove_tag_key_NOEXIST(self):
tag_key = self.driver.ex_list_tag_keys()[0]
DimensionDataMockHttp.type = "NOEXIST"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_remove_tag_key(tag_key)
def test_ex_apply_tag_to_asset(self):
node = self.driver.list_nodes()[0]
success = self.driver.ex_apply_tag_to_asset(node, "TagKeyName", "FakeValue")
self.assertTrue(success)
def test_ex_apply_tag_to_asset_NOVALUE(self):
node = self.driver.list_nodes()[0]
DimensionDataMockHttp.type = "NOVALUE"
success = self.driver.ex_apply_tag_to_asset(node, "TagKeyName")
self.assertTrue(success)
def test_ex_apply_tag_to_asset_NOTAGKEY(self):
node = self.driver.list_nodes()[0]
DimensionDataMockHttp.type = "NOTAGKEY"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_apply_tag_to_asset(node, "TagKeyNam")
def test_ex_apply_tag_to_asset_BADASSETTYPE(self):
network = self.driver.list_networks()[0]
DimensionDataMockHttp.type = "NOTAGKEY"
with self.assertRaises(TypeError):
self.driver.ex_apply_tag_to_asset(network, "TagKeyNam")
def test_ex_remove_tag_from_asset(self):
node = self.driver.list_nodes()[0]
success = self.driver.ex_remove_tag_from_asset(node, "TagKeyName")
self.assertTrue(success)
def test_ex_remove_tag_from_asset_NOTAG(self):
node = self.driver.list_nodes()[0]
DimensionDataMockHttp.type = "NOTAG"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_remove_tag_from_asset(node, "TagKeyNam")
def test_ex_list_tags(self):
tags = self.driver.ex_list_tags()
self.assertTrue(isinstance(tags, list))
self.assertTrue(isinstance(tags[0], DimensionDataTag))
self.assertTrue(len(tags) == 3)
def test_ex_list_tags_ALLPARAMS(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "ALLPARAMS"
tags = self.driver.ex_list_tags(
asset_id="fake_asset_id",
asset_type="fake_asset_type",
location="fake_location",
tag_key_name="fake_tag_key_name",
tag_key_id="fake_tag_key_id",
value="fake_value",
value_required=False,
display_on_report=False,
)
self.assertTrue(isinstance(tags, list))
self.assertTrue(isinstance(tags[0], DimensionDataTag))
self.assertTrue(len(tags) == 3)
def test_priv_location_to_location_id(self):
location = self.driver.ex_get_location_by_id("NA9")
self.assertEqual(self.driver._location_to_location_id(location), "NA9")
def test_priv_location_to_location_id_STR(self):
self.assertEqual(self.driver._location_to_location_id("NA9"), "NA9")
def test_priv_location_to_location_id_TYPEERROR(self):
with self.assertRaises(TypeError):
self.driver._location_to_location_id([1, 2, 3])
def test_priv_image_needs_auth_os_img(self):
image = self.driver.list_images()[1]
self.assertTrue(self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_os_img_STR(self):
image = self.driver.list_images()[1].id
self.assertTrue(self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_cust_img_windows(self):
image = self.driver.ex_list_customer_images()[1]
self.assertTrue(self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_cust_img_windows_STR(self):
image = self.driver.ex_list_customer_images()[1].id
self.assertTrue(self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_cust_img_linux(self):
image = self.driver.ex_list_customer_images()[0]
self.assertTrue(not self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_cust_img_linux_STR(self):
image = self.driver.ex_list_customer_images()[0].id
self.assertTrue(not self.driver._image_needs_auth(image))
def test_summary_usage_report(self):
report = self.driver.ex_summary_usage_report("2016-06-01", "2016-06-30")
report_content = report
self.assertEqual(len(report_content), 13)
self.assertEqual(len(report_content[0]), 6)
def test_detailed_usage_report(self):
report = self.driver.ex_detailed_usage_report("2016-06-01", "2016-06-30")
report_content = report
self.assertEqual(len(report_content), 42)
self.assertEqual(len(report_content[0]), 4)
def test_audit_log_report(self):
report = self.driver.ex_audit_log_report("2016-06-01", "2016-06-30")
report_content = report
self.assertEqual(len(report_content), 25)
self.assertEqual(report_content[2][2], "OEC_SYSTEM")
def test_ex_list_ip_address_list(self):
net_domain = self.driver.ex_list_network_domains()[0]
ip_list = self.driver.ex_list_ip_address_list(ex_network_domain=net_domain)
self.assertTrue(isinstance(ip_list, list))
self.assertEqual(len(ip_list), 4)
self.assertTrue(isinstance(ip_list[0].name, str))
self.assertTrue(isinstance(ip_list[0].description, str))
self.assertTrue(isinstance(ip_list[0].ip_version, str))
self.assertTrue(isinstance(ip_list[0].state, str))
self.assertTrue(isinstance(ip_list[0].create_time, str))
self.assertTrue(isinstance(ip_list[0].child_ip_address_lists, list))
self.assertEqual(len(ip_list[1].child_ip_address_lists), 1)
self.assertTrue(isinstance(ip_list[1].child_ip_address_lists[0].name, str))
def test_ex_get_ip_address_list(self):
net_domain = self.driver.ex_list_network_domains()[0]
DimensionDataMockHttp.type = "FILTERBYNAME"
ip_list = self.driver.ex_get_ip_address_list(
ex_network_domain=net_domain.id,
ex_ip_address_list_name="Test_IP_Address_List_3",
)
self.assertTrue(isinstance(ip_list, list))
self.assertEqual(len(ip_list), 1)
self.assertTrue(isinstance(ip_list[0].name, str))
self.assertTrue(isinstance(ip_list[0].description, str))
self.assertTrue(isinstance(ip_list[0].ip_version, str))
self.assertTrue(isinstance(ip_list[0].state, str))
self.assertTrue(isinstance(ip_list[0].create_time, str))
ips = ip_list[0].ip_address_collection
self.assertEqual(len(ips), 3)
self.assertTrue(isinstance(ips[0].begin, str))
self.assertTrue(isinstance(ips[0].prefix_size, str))
self.assertTrue(isinstance(ips[2].end, str))
def test_ex_create_ip_address_list_FAIL(self):
net_domain = self.driver.ex_list_network_domains()[0]
with self.assertRaises(TypeError):
self.driver.ex_create_ip_address_list(ex_network_domain=net_domain.id)
def test_ex_create_ip_address_list(self):
name = "Test_IP_Address_List_3"
description = "Test Description"
ip_version = "IPV4"
child_ip_address_list_id = "0291ef78-4059-4bc1-b433-3f6ad698dc41"
child_ip_address_list = DimensionDataChildIpAddressList(
id=child_ip_address_list_id, name="test_child_ip_addr_list"
)
net_domain = self.driver.ex_list_network_domains()[0]
ip_address_1 = DimensionDataIpAddress(begin="190.2.2.100")
ip_address_2 = DimensionDataIpAddress(begin="190.2.2.106", end="190.2.2.108")
ip_address_3 = DimensionDataIpAddress(begin="190.2.2.0", prefix_size="24")
ip_address_collection = [ip_address_1, ip_address_2, ip_address_3]
# Create IP Address List
success = self.driver.ex_create_ip_address_list(
ex_network_domain=net_domain,
name=name,
ip_version=ip_version,
description=description,
ip_address_collection=ip_address_collection,
child_ip_address_list=child_ip_address_list,
)
self.assertTrue(success)
def test_ex_create_ip_address_list_STR(self):
name = "Test_IP_Address_List_3"
description = "Test Description"
ip_version = "IPV4"
child_ip_address_list_id = "0291ef78-4059-4bc1-b433-3f6ad698dc41"
net_domain = self.driver.ex_list_network_domains()[0]
ip_address_1 = DimensionDataIpAddress(begin="190.2.2.100")
ip_address_2 = DimensionDataIpAddress(begin="190.2.2.106", end="190.2.2.108")
ip_address_3 = DimensionDataIpAddress(begin="190.2.2.0", prefix_size="24")
ip_address_collection = [ip_address_1, ip_address_2, ip_address_3]
# Create IP Address List
success = self.driver.ex_create_ip_address_list(
ex_network_domain=net_domain.id,
name=name,
ip_version=ip_version,
description=description,
ip_address_collection=ip_address_collection,
child_ip_address_list=child_ip_address_list_id,
)
self.assertTrue(success)
def test_ex_edit_ip_address_list(self):
ip_address_1 = DimensionDataIpAddress(begin="190.2.2.111")
ip_address_collection = [ip_address_1]
child_ip_address_list = DimensionDataChildIpAddressList(
id="2221ef78-4059-4bc1-b433-3f6ad698dc41",
name="test_child_ip_address_list edited",
)
ip_address_list = DimensionDataIpAddressList(
id="1111ef78-4059-4bc1-b433-3f6ad698d111",
name="test ip address list edited",
ip_version="IPv4",
description="test",
ip_address_collection=ip_address_collection,
child_ip_address_lists=child_ip_address_list,
state="NORMAL",
create_time="2015-09-29T02:49:45",
)
success = self.driver.ex_edit_ip_address_list(
ex_ip_address_list=ip_address_list,
description="test ip address list",
ip_address_collection=ip_address_collection,
child_ip_address_lists=child_ip_address_list,
)
self.assertTrue(success)
def test_ex_edit_ip_address_list_STR(self):
ip_address_1 = DimensionDataIpAddress(begin="190.2.2.111")
ip_address_collection = [ip_address_1]
child_ip_address_list = DimensionDataChildIpAddressList(
id="2221ef78-4059-4bc1-b433-3f6ad698dc41",
name="test_child_ip_address_list edited",
)
success = self.driver.ex_edit_ip_address_list(
ex_ip_address_list="84e34850-595d- 436e-a885-7cd37edb24a4",
description="test ip address list",
ip_address_collection=ip_address_collection,
child_ip_address_lists=child_ip_address_list,
)
self.assertTrue(success)
def test_ex_delete_ip_address_list(self):
child_ip_address_list = DimensionDataChildIpAddressList(
id="2221ef78-4059-4bc1-b433-3f6ad698dc41",
name="test_child_ip_address_list edited",
)
ip_address_list = DimensionDataIpAddressList(
id="1111ef78-4059-4bc1-b433-3f6ad698d111",
name="test ip address list edited",
ip_version="IPv4",
description="test",
ip_address_collection=None,
child_ip_address_lists=child_ip_address_list,
state="NORMAL",
create_time="2015-09-29T02:49:45",
)
success = self.driver.ex_delete_ip_address_list(
ex_ip_address_list=ip_address_list
)
self.assertTrue(success)
def test_ex_delete_ip_address_list_STR(self):
success = self.driver.ex_delete_ip_address_list(
ex_ip_address_list="111ef78-4059-4bc1-b433-3f6ad698d111"
)
self.assertTrue(success)
def test_ex_list_portlist(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(ex_network_domain=net_domain)
self.assertTrue(isinstance(portlist, list))
self.assertEqual(len(portlist), 3)
self.assertTrue(isinstance(portlist[0].name, str))
self.assertTrue(isinstance(portlist[0].description, str))
self.assertTrue(isinstance(portlist[0].state, str))
self.assertTrue(isinstance(portlist[0].port_collection, list))
self.assertTrue(isinstance(portlist[0].port_collection[0].begin, str))
self.assertTrue(isinstance(portlist[0].port_collection[0].end, str))
self.assertTrue(isinstance(portlist[0].child_portlist_list, list))
self.assertTrue(isinstance(portlist[0].child_portlist_list[0].id, str))
self.assertTrue(isinstance(portlist[0].child_portlist_list[0].name, str))
self.assertTrue(isinstance(portlist[0].create_time, str))
def test_ex_get_port_list(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist_id = self.driver.ex_list_portlist(ex_network_domain=net_domain)[0].id
portlist = self.driver.ex_get_portlist(ex_portlist_id=portlist_id)
self.assertTrue(isinstance(portlist, DimensionDataPortList))
self.assertTrue(isinstance(portlist.name, str))
self.assertTrue(isinstance(portlist.description, str))
self.assertTrue(isinstance(portlist.state, str))
self.assertTrue(isinstance(portlist.port_collection, list))
self.assertTrue(isinstance(portlist.port_collection[0].begin, str))
self.assertTrue(isinstance(portlist.port_collection[0].end, str))
self.assertTrue(isinstance(portlist.child_portlist_list, list))
self.assertTrue(isinstance(portlist.child_portlist_list[0].id, str))
self.assertTrue(isinstance(portlist.child_portlist_list[0].name, str))
self.assertTrue(isinstance(portlist.create_time, str))
def test_ex_get_portlist_STR(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(ex_network_domain=net_domain)[0]
port_list = self.driver.ex_get_portlist(ex_portlist_id=portlist.id)
self.assertTrue(isinstance(port_list, DimensionDataPortList))
self.assertTrue(isinstance(port_list.name, str))
self.assertTrue(isinstance(port_list.description, str))
self.assertTrue(isinstance(port_list.state, str))
self.assertTrue(isinstance(port_list.port_collection, list))
self.assertTrue(isinstance(port_list.port_collection[0].begin, str))
self.assertTrue(isinstance(port_list.port_collection[0].end, str))
self.assertTrue(isinstance(port_list.child_portlist_list, list))
self.assertTrue(isinstance(port_list.child_portlist_list[0].id, str))
self.assertTrue(isinstance(port_list.child_portlist_list[0].name, str))
self.assertTrue(isinstance(port_list.create_time, str))
def test_ex_create_portlist_NOCHILDPORTLIST(self):
name = "Test_Port_List"
description = "Test Description"
net_domain = self.driver.ex_list_network_domains()[0]
port_1 = DimensionDataPort(begin="8080")
port_2 = DimensionDataIpAddress(begin="8899", end="9023")
port_collection = [port_1, port_2]
# Create IP Address List
success = self.driver.ex_create_portlist(
ex_network_domain=net_domain,
name=name,
description=description,
port_collection=port_collection,
)
self.assertTrue(success)
def test_ex_create_portlist(self):
name = "Test_Port_List"
description = "Test Description"
net_domain = self.driver.ex_list_network_domains()[0]
port_1 = DimensionDataPort(begin="8080")
port_2 = DimensionDataIpAddress(begin="8899", end="9023")
port_collection = [port_1, port_2]
child_port_1 = DimensionDataChildPortList(
id="333174a2-ae74-4658-9e56-50fc90e086cf", name="test port 1"
)
child_port_2 = DimensionDataChildPortList(
id="311174a2-ae74-4658-9e56-50fc90e04444", name="test port 2"
)
child_ports = [child_port_1, child_port_2]
# Create IP Address List
success = self.driver.ex_create_portlist(
ex_network_domain=net_domain,
name=name,
description=description,
port_collection=port_collection,
child_portlist_list=child_ports,
)
self.assertTrue(success)
def test_ex_create_portlist_STR(self):
name = "Test_Port_List"
description = "Test Description"
net_domain = self.driver.ex_list_network_domains()[0]
port_1 = DimensionDataPort(begin="8080")
port_2 = DimensionDataIpAddress(begin="8899", end="9023")
port_collection = [port_1, port_2]
child_port_1 = DimensionDataChildPortList(
id="333174a2-ae74-4658-9e56-50fc90e086cf", name="test port 1"
)
child_port_2 = DimensionDataChildPortList(
id="311174a2-ae74-4658-9e56-50fc90e04444", name="test port 2"
)
child_ports_ids = [child_port_1.id, child_port_2.id]
# Create IP Address List
success = self.driver.ex_create_portlist(
ex_network_domain=net_domain.id,
name=name,
description=description,
port_collection=port_collection,
child_portlist_list=child_ports_ids,
)
self.assertTrue(success)
def test_ex_edit_portlist(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(net_domain)[0]
description = "Test Description"
port_1 = DimensionDataPort(begin="8080")
port_2 = DimensionDataIpAddress(begin="8899", end="9023")
port_collection = [port_1, port_2]
child_port_1 = DimensionDataChildPortList(
id="333174a2-ae74-4658-9e56-50fc90e086cf", name="test port 1"
)
child_port_2 = DimensionDataChildPortList(
id="311174a2-ae74-4658-9e56-50fc90e04444", name="test port 2"
)
child_ports = [child_port_1.id, child_port_2.id]
# Create IP Address List
success = self.driver.ex_edit_portlist(
ex_portlist=portlist,
description=description,
port_collection=port_collection,
child_portlist_list=child_ports,
)
self.assertTrue(success)
def test_ex_edit_portlist_STR(self):
portlist_id = "484174a2-ae74-4658-9e56-50fc90e086cf"
description = "Test Description"
port_1 = DimensionDataPort(begin="8080")
port_2 = DimensionDataIpAddress(begin="8899", end="9023")
port_collection = [port_1, port_2]
child_port_1 = DimensionDataChildPortList(
id="333174a2-ae74-4658-9e56-50fc90e086cf", name="test port 1"
)
child_port_2 = DimensionDataChildPortList(
id="311174a2-ae74-4658-9e56-50fc90e04444", name="test port 2"
)
child_ports_ids = [child_port_1.id, child_port_2.id]
# Create IP Address List
success = self.driver.ex_edit_portlist(
ex_portlist=portlist_id,
description=description,
port_collection=port_collection,
child_portlist_list=child_ports_ids,
)
self.assertTrue(success)
def test_ex_delete_portlist(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(net_domain)[0]
success = self.driver.ex_delete_portlist(ex_portlist=portlist)
self.assertTrue(success)
def test_ex_delete_portlist_STR(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(net_domain)[0]
success = self.driver.ex_delete_portlist(ex_portlist=portlist.id)
self.assertTrue(success)
def test_import_image(self):
tag_dictionaries = {"tagkey1_name": "dev test", "tagkey2_name": None}
success = self.driver.import_image(
ovf_package_name="aTestGocToNGoc2_export2.mf",
name="Libcloud NGOCImage_New 2",
description="test",
cluster_id="QA1_N2_VMWARE_1-01",
is_guest_os_customization="false",
tagkey_name_value_dictionaries=tag_dictionaries,
)
self.assertTrue(success)
def test_import_image_error_too_many_choice(self):
tag_dictionaries = {"tagkey1_name": "dev test", "tagkey2_name": None}
with self.assertRaises(ValueError):
self.driver.import_image(
ovf_package_name="aTestGocToNGoc2_export2.mf",
name="Libcloud NGOCImage_New 2",
description="test",
cluster_id="QA1_N2_VMWARE_1-01",
datacenter_id="QA1_N1_VMWARE_1",
is_guest_os_customization="false",
tagkey_name_value_dictionaries=tag_dictionaries,
)
def test_import_image_error_missing_choice(self):
tag_dictionaries = {"tagkey1_name": "dev test", "tagkey2_name": None}
with self.assertRaises(ValueError):
self.driver.import_image(
ovf_package_name="aTestGocToNGoc2_export2.mf",
name="Libcloud NGOCImage_New 2",
description="test",
cluster_id=None,
datacenter_id=None,
is_guest_os_customization="false",
tagkey_name_value_dictionaries=tag_dictionaries,
)
def test_exchange_nic_vlans(self):
success = self.driver.ex_exchange_nic_vlans(
nic_id_1="a4b4b42b-ccb5-416f-b052-ce7cb7fdff12",
nic_id_2="b39d09b8-ea65-424a-8fa6-c6f5a98afc69",
)
self.assertTrue(success)
def test_change_nic_network_adapter(self):
success = self.driver.ex_change_nic_network_adapter(
nic_id="0c55c269-20a5-4fec-8054-22a245a48fe4", network_adapter_name="E1000"
)
self.assertTrue(success)
def test_ex_create_node_uncustomized_mcp2_using_vlan(self):
# Get VLAN
vlan = self.driver.ex_get_vlan("0e56433f-d808-4669-821d-812769517ff8")
# Create node using vlan instead of private IPv4
node = self.driver.ex_create_node_uncustomized(
name="test_server_05",
image="fake_customer_image",
ex_network_domain="fakenetworkdomain",
ex_is_started=False,
ex_description=None,
ex_cluster_id=None,
ex_cpu_specification=None,
ex_memory_gb=None,
ex_primary_nic_private_ipv4=None,
ex_primary_nic_vlan=vlan,
ex_primary_nic_network_adapter=None,
ex_additional_nics=None,
ex_disks=None,
ex_tagid_value_pairs=None,
ex_tagname_value_pairs=None,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
def test_ex_create_node_uncustomized_mcp2_using_ipv4(self):
node = self.driver.ex_create_node_uncustomized(
name="test_server_05",
image="fake_customer_image",
ex_network_domain="fakenetworkdomain",
ex_is_started=False,
ex_description=None,
ex_cluster_id=None,
ex_cpu_specification=None,
ex_memory_gb=None,
ex_primary_nic_private_ipv4="10.0.0.1",
ex_primary_nic_vlan=None,
ex_primary_nic_network_adapter=None,
ex_additional_nics=None,
ex_disks=None,
ex_tagid_value_pairs=None,
ex_tagname_value_pairs=None,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
class InvalidRequestError(Exception):
def __init__(self, tag):
super(InvalidRequestError, self).__init__("Invalid Request - %s" % tag)
class DimensionDataMockHttp(MockHttp):
fixtures = ComputeFileFixtures("dimensiondata")
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_report_usage(
self, method, url, body, headers
):
body = self.fixtures.load("summary_usage_report.csv")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_report_usageDetailed(
self, method, url, body, headers
):
body = self.fixtures.load("detailed_usage_report.csv")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_auditlog(
self, method, url, body, headers
):
body = self.fixtures.load("audit_log.csv")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED])
def _oec_0_9_myaccount(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_myaccount.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_myaccount.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_PAGINATED(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_myaccount.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_ALLFILTERS(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_myaccount.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_image(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_base_image.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_imageWithDiskSpeed(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_base_imageWithDiskSpeed.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11(
self, method, url, body, headers
):
body = None
action = url.split("?")[-1]
if action == "restart":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml"
)
elif action == "shutdown":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml"
)
elif action == "delete":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml"
)
elif action == "start":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml"
)
elif action == "poweroff":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_INPROGRESS(
self, method, url, body, headers
):
body = None
action = url.split("?")[-1]
if action == "restart":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml"
)
elif action == "shutdown":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml"
)
elif action == "delete":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml"
)
elif action == "start":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml"
)
elif action == "poweroff":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml"
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(
self, method, url, body, headers
):
body = self.fixtures.load(
"_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation(
self, method, url, body, headers
):
if method == "POST":
request = ET.fromstring(body)
if (
request.tag
!= "{http://oec.api.opsource.net/schemas/network}NewNetworkWithLocation"
):
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation_NA9(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_4bba37be_506f_11e3_b29c_001517c4643e(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_4bba37be_506f_11e3_b29c_001517c4643e.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSize(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSize.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSpeed(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSpeed.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1(
self, method, url, body, headers
):
action = url.split("?")[-1]
if action == "delete":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87(
self, method, url, body, headers
):
if method == "GET":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == "POST":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_POST.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_FAIL_EXISTING(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create_FAIL.xml"
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_07e3621a_a920_4a9a_943c_d8021f27f418(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_07e3621a_a920_4a9a_943c_d8021f27f418_FAIL(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete_FAIL.xml"
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(
self, method, url, body, headers
):
body = self.fixtures.load("server.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_deleteServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer_INPROGRESS(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_deleteServer_RESOURCEBUSY.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}rebootServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_rebootServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer_INPROGRESS(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}rebootServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_rebootServer_RESOURCEBUSY.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(
self, method, url, body, headers
):
if url.endswith("datacenterId=NA3"):
body = self.fixtures.load("2.4/server_server_NA3.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load("2.4/server_server.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGESIZE50(
self, method, url, body, headers
):
if not url.endswith("pageSize=50"):
raise ValueError("pageSize is not set as expected")
body = self.fixtures.load("2.4/server_server.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_EMPTY(
self, method, url, body, headers
):
body = self.fixtures.load("server_server_paginated_empty.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGED_THEN_EMPTY(
self, method, url, body, headers
):
if "pageNumber=2" in url:
body = self.fixtures.load("server_server_paginated_empty.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
body = self.fixtures.load("2.4/server_server_paginated.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGINATED(
self, method, url, body, headers
):
if "pageNumber=2" in url:
body = self.fixtures.load("2.4/server_server.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
body = self.fixtures.load("2.4/server_server_paginated.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGINATEDEMPTY(
self, method, url, body, headers
):
body = self.fixtures.load("server_server_paginated_empty.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_ALLFILTERS(
self, method, url, body, headers
):
(_, params) = url.split("?")
parameters = params.split("&")
for parameter in parameters:
(key, value) = parameter.split("=")
if key == "datacenterId":
assert value == "fake_loc"
elif key == "networkId":
assert value == "fake_network"
elif key == "networkDomainId":
assert value == "fake_network_domain"
elif key == "vlanId":
assert value == "fake_vlan"
elif key == "ipv6":
assert value == "fake_ipv6"
elif key == "privateIpv4":
assert value == "fake_ipv4"
elif key == "name":
assert value == "fake_name"
elif key == "state":
assert value == "fake_state"
elif key == "started":
assert value == "True"
elif key == "deployed":
assert value == "True"
elif key == "sourceImageId":
assert value == "fake_image"
else:
raise ValueError(
"Could not find in url parameters {0}:{1}".format(key, value)
)
body = self.fixtures.load("2.4/server_server.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule(
self, method, url, body, headers
):
body = self.fixtures.load("server_antiAffinityRule_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule_ALLFILTERS(
self, method, url, body, headers
):
(_, params) = url.split("?")
parameters = params.split("&")
for parameter in parameters:
(key, value) = parameter.split("=")
if key == "id":
assert value == "FAKE_ID"
elif key == "state":
assert value == "FAKE_STATE"
elif key == "pageSize":
assert value == "250"
elif key == "networkDomainId":
pass
else:
raise ValueError(
"Could not find in url parameters {0}:{1}".format(key, value)
)
body = self.fixtures.load("server_antiAffinityRule_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule_PAGINATED(
self, method, url, body, headers
):
if "pageNumber=2" in url:
body = self.fixtures.load("server_antiAffinityRule_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
body = self.fixtures.load("server_antiAffinityRule_list_PAGINATED.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter(
self, method, url, body, headers
):
if url.endswith("id=NA9"):
body = self.fixtures.load("infrastructure_datacenter_NA9.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load("infrastructure_datacenter.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter_ALLFILTERS(
self, method, url, body, headers
):
if url.endswith("id=NA9"):
body = self.fixtures.load("infrastructure_datacenter_NA9.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load("infrastructure_datacenter.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_updateVmwareTools(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}updateVmwareTools":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_updateVmwareTools.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}startServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_startServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer_INPROGRESS(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}startServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_startServer_INPROGRESS.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}shutdownServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_shutdownServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer_INPROGRESS(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}shutdownServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_shutdownServer_INPROGRESS.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_resetServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}resetServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_resetServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}powerOffServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_powerOffServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer_INPROGRESS(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}powerOffServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_powerOffServer_INPROGRESS.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_11_INPROGRESS(
self, method, url, body, headers
):
body = self.fixtures.load("2.4/server_GetServer.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain(
self, method, url, body, headers
):
body = self.fixtures.load("network_networkDomain.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_ALLFILTERS(
self, method, url, body, headers
):
(_, params) = url.split("?")
parameters = params.split("&")
for parameter in parameters:
(key, value) = parameter.split("=")
if key == "datacenterId":
assert value == "fake_location"
elif key == "type":
assert value == "fake_plan"
elif key == "name":
assert value == "fake_name"
elif key == "state":
assert value == "fake_state"
else:
raise ValueError(
"Could not find in url parameters {0}:{1}".format(key, value)
)
body = self.fixtures.load("network_networkDomain.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan(
self, method, url, body, headers
):
body = self.fixtures.load("network_vlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan_ALLFILTERS(
self, method, url, body, headers
):
(_, params) = url.split("?")
parameters = params.split("&")
for parameter in parameters:
(key, value) = parameter.split("=")
if key == "datacenterId":
assert value == "fake_location"
elif key == "networkDomainId":
assert value == "fake_network_domain"
elif key == "ipv6Address":
assert value == "fake_ipv6"
elif key == "privateIpv4Address":
assert value == "fake_ipv4"
elif key == "name":
assert value == "fake_name"
elif key == "state":
assert value == "fake_state"
else:
raise ValueError(
"Could not find in url parameters {0}:{1}".format(key, value)
)
body = self.fixtures.load("network_vlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deployServer":
raise InvalidRequestError(request.tag)
# Make sure the we either have a network tag with an IP or networkId
# Or Network info with a primary nic that has privateip or vlanid
network = request.find(fixxpath("network", TYPES_URN))
network_info = request.find(fixxpath("networkInfo", TYPES_URN))
if network is not None:
if network_info is not None:
raise InvalidRequestError("Request has both MCP1 and MCP2 values")
ipv4 = findtext(network, "privateIpv4", TYPES_URN)
networkId = findtext(network, "networkId", TYPES_URN)
if ipv4 is None and networkId is None:
raise InvalidRequestError(
"Invalid request MCP1 requests need privateIpv4 or networkId"
)
elif network_info is not None:
if network is not None:
raise InvalidRequestError("Request has both MCP1 and MCP2 values")
primary_nic = network_info.find(fixxpath("primaryNic", TYPES_URN))
ipv4 = findtext(primary_nic, "privateIpv4", TYPES_URN)
vlanId = findtext(primary_nic, "vlanId", TYPES_URN)
if ipv4 is None and vlanId is None:
raise InvalidRequestError(
"Invalid request MCP2 requests need privateIpv4 or vlanId"
)
else:
raise InvalidRequestError(
"Invalid request, does not have network or network_info in XML"
)
body = self.fixtures.load("server_deployServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(
self, method, url, body, headers
):
body = self.fixtures.load(
"2.4/server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deployNetworkDomain(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deployNetworkDomain":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_deployNetworkDomain.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be(
self, method, url, body, headers
):
body = self.fixtures.load(
"network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be_ALLFILTERS(
self, method, url, body, headers
):
body = self.fixtures.load(
"network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editNetworkDomain(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editNetworkDomain":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_editNetworkDomain.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteNetworkDomain(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteNetworkDomain":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_deleteNetworkDomain.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deployVlan(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deployVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_deployVlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan_0e56433f_d808_4669_821d_812769517ff8(
self, method, url, body, headers
):
body = self.fixtures.load(
"network_vlan_0e56433f_d808_4669_821d_812769517ff8.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editVlan(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_editVlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteVlan(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_deleteVlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_expandVlan(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}expandVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_expandVlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_addPublicIpBlock(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}addPublicIpBlock":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_addPublicIpBlock.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock_4487241a_f0ca_11e3_9315_d4bed9b167ba(
self, method, url, body, headers
):
body = self.fixtures.load(
"network_publicIpBlock_4487241a_f0ca_11e3_9315_d4bed9b167ba.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock(
self, method, url, body, headers
):
body = self.fixtures.load("network_publicIpBlock.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock_9945dc4a_bdce_11e4_8c14_b8ca3a5d9ef8(
self, method, url, body, headers
):
body = self.fixtures.load(
"network_publicIpBlock_9945dc4a_bdce_11e4_8c14_b8ca3a5d9ef8.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_removePublicIpBlock(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}removePublicIpBlock":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_removePublicIpBlock.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_firewallRule(
self, method, url, body, headers
):
body = self.fixtures.load("network_firewallRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createFirewallRule(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createFirewallRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_createFirewallRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_firewallRule_d0a20f59_77b9_4f28_a63b_e58496b73a6c(
self, method, url, body, headers
):
body = self.fixtures.load(
"network_firewallRule_d0a20f59_77b9_4f28_a63b_e58496b73a6c.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editFirewallRule(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editFirewallRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_editFirewallRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteFirewallRule(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteFirewallRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_deleteFirewallRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createNatRule(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createNatRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_createNatRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_natRule(
self, method, url, body, headers
):
body = self.fixtures.load("network_natRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_natRule_2187a636_7ebb_49a1_a2ff_5d617f496dce(
self, method, url, body, headers
):
body = self.fixtures.load(
"network_natRule_2187a636_7ebb_49a1_a2ff_5d617f496dce.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteNatRule(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteNatRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_deleteNatRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_addNic(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}addNic":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_addNic.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_removeNic(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}removeNic":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_removeNic.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_disableServerMonitoring(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}disableServerMonitoring":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_disableServerMonitoring.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_enableServerMonitoring(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}enableServerMonitoring":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_enableServerMonitoring.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_changeServerMonitoringPlan(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}changeServerMonitoringPlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_changeServerMonitoringPlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage(
self, method, url, body, headers
):
body = self.fixtures.load("2.4/image_osImage.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_c14b1a46_2428_44c1_9c1a_b20e6418d08c(
self, method, url, body, headers
):
body = self.fixtures.load(
"2.4/image_osImage_c14b1a46_2428_44c1_9c1a_b20e6418d08c.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_6b4fb0c7_a57b_4f58_b59c_9958f94f971a(
self, method, url, body, headers
):
body = self.fixtures.load(
"2.4/image_osImage_6b4fb0c7_a57b_4f58_b59c_9958f94f971a.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d(
self, method, url, body, headers
):
body = self.fixtures.load("image_osImage_BAD_REQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c(
self, method, url, body, headers
):
body = self.fixtures.load("image_osImage_BAD_REQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_FAKE_IMAGE_ID(
self, method, url, body, headers
):
body = self.fixtures.load("image_osImage_BAD_REQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage(
self, method, url, body, headers
):
body = self.fixtures.load("2.4/image_customerImage.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d(
self, method, url, body, headers
):
body = self.fixtures.load(
"2.4/image_customerImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c(
self, method, url, body, headers
):
body = self.fixtures.load(
"2.4/image_customerImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage_FAKE_IMAGE_ID(
self, method, url, body, headers
):
body = self.fixtures.load("image_customerImage_BAD_REQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_reconfigureServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}reconfigureServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_reconfigureServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_cleanServer(
self, method, url, body, headers
):
body = self.fixtures.load("server_cleanServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_addDisk(
self, method, url, body, headers
):
body = self.fixtures.load("server_addDisk.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_removeDisk(
self, method, url, body, headers
):
body = self.fixtures.load("server_removeDisk.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_createTagKey(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createTagKey":
raise InvalidRequestError(request.tag)
name = findtext(request, "name", TYPES_URN)
description = findtext(request, "description", TYPES_URN)
value_required = findtext(request, "valueRequired", TYPES_URN)
display_on_report = findtext(request, "displayOnReport", TYPES_URN)
if name is None:
raise ValueError("Name must have a value in the request")
if description is not None:
raise ValueError("Default description for a tag should be blank")
if value_required is None or value_required != "true":
raise ValueError("Default valueRequired should be true")
if display_on_report is None or display_on_report != "true":
raise ValueError("Default displayOnReport should be true")
body = self.fixtures.load("tag_createTagKey.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_createTagKey_ALLPARAMS(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createTagKey":
raise InvalidRequestError(request.tag)
name = findtext(request, "name", TYPES_URN)
description = findtext(request, "description", TYPES_URN)
value_required = findtext(request, "valueRequired", TYPES_URN)
display_on_report = findtext(request, "displayOnReport", TYPES_URN)
if name is None:
raise ValueError("Name must have a value in the request")
if description is None:
raise ValueError("Description should have a value")
if value_required is None or value_required != "false":
raise ValueError("valueRequired should be false")
if display_on_report is None or display_on_report != "false":
raise ValueError("displayOnReport should be false")
body = self.fixtures.load("tag_createTagKey.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_createTagKey_BADREQUEST(
self, method, url, body, headers
):
body = self.fixtures.load("tag_createTagKey_BADREQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey(
self, method, url, body, headers
):
body = self.fixtures.load("tag_tagKey_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_SINGLE(
self, method, url, body, headers
):
body = self.fixtures.load("tag_tagKey_list_SINGLE.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_ALLFILTERS(
self, method, url, body, headers
):
(_, params) = url.split("?")
parameters = params.split("&")
for parameter in parameters:
(key, value) = parameter.split("=")
if key == "id":
assert value == "fake_id"
elif key == "name":
assert value == "fake_name"
elif key == "valueRequired":
assert value == "false"
elif key == "displayOnReport":
assert value == "false"
elif key == "pageSize":
assert value == "250"
else:
raise ValueError(
"Could not find in url parameters {0}:{1}".format(key, value)
)
body = self.fixtures.load("tag_tagKey_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_d047c609_93d7_4bc5_8fc9_732c85840075(
self, method, url, body, headers
):
body = self.fixtures.load("tag_tagKey_5ab77f5f_5aa9_426f_8459_4eab34e03d54.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_d047c609_93d7_4bc5_8fc9_732c85840075_NOEXIST(
self, method, url, body, headers
):
body = self.fixtures.load(
"tag_tagKey_5ab77f5f_5aa9_426f_8459_4eab34e03d54_BADREQUEST.xml"
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_editTagKey_NAME(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editTagKey":
raise InvalidRequestError(request.tag)
name = findtext(request, "name", TYPES_URN)
description = findtext(request, "description", TYPES_URN)
value_required = findtext(request, "valueRequired", TYPES_URN)
display_on_report = findtext(request, "displayOnReport", TYPES_URN)
if name is None:
raise ValueError("Name must have a value in the request")
if description is not None:
raise ValueError("Description should be empty")
if value_required is not None:
raise ValueError("valueRequired should be empty")
if display_on_report is not None:
raise ValueError("displayOnReport should be empty")
body = self.fixtures.load("tag_editTagKey.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_editTagKey_NOTNAME(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editTagKey":
raise InvalidRequestError(request.tag)
name = findtext(request, "name", TYPES_URN)
description = findtext(request, "description", TYPES_URN)
value_required = findtext(request, "valueRequired", TYPES_URN)
display_on_report = findtext(request, "displayOnReport", TYPES_URN)
if name is not None:
raise ValueError("Name should be empty")
if description is None:
raise ValueError("Description should not be empty")
if value_required is None:
raise ValueError("valueRequired should not be empty")
if display_on_report is None:
raise ValueError("displayOnReport should not be empty")
body = self.fixtures.load("tag_editTagKey.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_editTagKey_NOCHANGE(
self, method, url, body, headers
):
body = self.fixtures.load("tag_editTagKey_BADREQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_deleteTagKey(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteTagKey":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("tag_deleteTagKey.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_deleteTagKey_NOEXIST(
self, method, url, body, headers
):
body = self.fixtures.load("tag_deleteTagKey_BADREQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_applyTags(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}applyTags":
raise InvalidRequestError(request.tag)
asset_type = findtext(request, "assetType", TYPES_URN)
asset_id = findtext(request, "assetId", TYPES_URN)
tag = request.find(fixxpath("tag", TYPES_URN))
tag_key_name = findtext(tag, "tagKeyName", TYPES_URN)
value = findtext(tag, "value", TYPES_URN)
if asset_type is None:
raise ValueError("assetType should not be empty")
if asset_id is None:
raise ValueError("assetId should not be empty")
if tag_key_name is None:
raise ValueError("tagKeyName should not be empty")
if value is None:
raise ValueError("value should not be empty")
body = self.fixtures.load("tag_applyTags.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_applyTags_NOVALUE(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}applyTags":
raise InvalidRequestError(request.tag)
asset_type = findtext(request, "assetType", TYPES_URN)
asset_id = findtext(request, "assetId", TYPES_URN)
tag = request.find(fixxpath("tag", TYPES_URN))
tag_key_name = findtext(tag, "tagKeyName", TYPES_URN)
value = findtext(tag, "value", TYPES_URN)
if asset_type is None:
raise ValueError("assetType should not be empty")
if asset_id is None:
raise ValueError("assetId should not be empty")
if tag_key_name is None:
raise ValueError("tagKeyName should not be empty")
if value is not None:
raise ValueError("value should be empty")
body = self.fixtures.load("tag_applyTags.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_applyTags_NOTAGKEY(
self, method, url, body, headers
):
body = self.fixtures.load("tag_applyTags_BADREQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_removeTags(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}removeTags":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("tag_removeTag.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_removeTags_NOTAG(
self, method, url, body, headers
):
body = self.fixtures.load("tag_removeTag_BADREQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tag(
self, method, url, body, headers
):
body = self.fixtures.load("tag_tag_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tag_ALLPARAMS(
self, method, url, body, headers
):
(_, params) = url.split("?")
parameters = params.split("&")
for parameter in parameters:
(key, value) = parameter.split("=")
if key == "assetId":
assert value == "fake_asset_id"
elif key == "assetType":
assert value == "fake_asset_type"
elif key == "valueRequired":
assert value == "false"
elif key == "displayOnReport":
assert value == "false"
elif key == "pageSize":
assert value == "250"
elif key == "datacenterId":
assert value == "fake_location"
elif key == "value":
assert value == "fake_value"
elif key == "tagKeyName":
assert value == "fake_tag_key_name"
elif key == "tagKeyId":
assert value == "fake_tag_key_id"
else:
raise ValueError(
"Could not find in url parameters {0}:{1}".format(key, value)
)
body = self.fixtures.load("tag_tag_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_ipAddressList(
self, method, url, body, headers
):
body = self.fixtures.load("ip_address_lists.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_ipAddressList_FILTERBYNAME(
self, method, url, body, headers
):
body = self.fixtures.load("ip_address_lists_FILTERBYNAME.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createIpAddressList(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" "createIpAddressList":
raise InvalidRequestError(request.tag)
net_domain = findtext(request, "networkDomainId", TYPES_URN)
if net_domain is None:
raise ValueError("Network Domain should not be empty")
name = findtext(request, "name", TYPES_URN)
if name is None:
raise ValueError("Name should not be empty")
ip_version = findtext(request, "ipVersion", TYPES_URN)
if ip_version is None:
raise ValueError("IP Version should not be empty")
ip_address_col_required = findall(request, "ipAddress", TYPES_URN)
child_ip_address_required = findall(request, "childIpAddressListId", TYPES_URN)
if 0 == len(ip_address_col_required) and 0 == len(child_ip_address_required):
raise ValueError(
"At least one ipAddress element or "
"one childIpAddressListId element must be "
"provided."
)
if ip_address_col_required[0].get("begin") is None:
raise ValueError("IP Address should not be empty")
body = self.fixtures.load("ip_address_list_create.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editIpAddressList(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" "editIpAddressList":
raise InvalidRequestError(request.tag)
ip_address_list = request.get("id")
if ip_address_list is None:
raise ValueError("IpAddressList ID should not be empty")
name = findtext(request, "name", TYPES_URN)
if name is not None:
raise ValueError("Name should not exists in request")
ip_version = findtext(request, "ipVersion", TYPES_URN)
if ip_version is not None:
raise ValueError("IP Version should not exists in request")
ip_address_col_required = findall(request, "ipAddress", TYPES_URN)
child_ip_address_required = findall(request, "childIpAddressListId", TYPES_URN)
if 0 == len(ip_address_col_required) and 0 == len(child_ip_address_required):
raise ValueError(
"At least one ipAddress element or "
"one childIpAddressListId element must be "
"provided."
)
if ip_address_col_required[0].get("begin") is None:
raise ValueError("IP Address should not be empty")
body = self.fixtures.load("ip_address_list_edit.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteIpAddressList(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" "deleteIpAddressList":
raise InvalidRequestError(request.tag)
ip_address_list = request.get("id")
if ip_address_list is None:
raise ValueError("IpAddressList ID should not be empty")
body = self.fixtures.load("ip_address_list_delete.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_portList(
self, method, url, body, headers
):
body = self.fixtures.load("port_list_lists.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_portList_c8c92ea3_2da8_4d51_8153_f39bec794d69(
self, method, url, body, headers
):
body = self.fixtures.load("port_list_get.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createPortList(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" "createPortList":
raise InvalidRequestError(request.tag)
net_domain = findtext(request, "networkDomainId", TYPES_URN)
if net_domain is None:
raise ValueError("Network Domain should not be empty")
ports_required = findall(request, "port", TYPES_URN)
child_port_list_required = findall(request, "childPortListId", TYPES_URN)
if 0 == len(ports_required) and 0 == len(child_port_list_required):
raise ValueError(
"At least one port element or one "
"childPortListId element must be provided"
)
if ports_required[0].get("begin") is None:
raise ValueError("PORT begin value should not be empty")
body = self.fixtures.load("port_list_create.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editPortList(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" "editPortList":
raise InvalidRequestError(request.tag)
ports_required = findall(request, "port", TYPES_URN)
child_port_list_required = findall(request, "childPortListId", TYPES_URN)
if 0 == len(ports_required) and 0 == len(child_port_list_required):
raise ValueError(
"At least one port element or one "
"childPortListId element must be provided"
)
if ports_required[0].get("begin") is None:
raise ValueError("PORT begin value should not be empty")
body = self.fixtures.load("port_list_edit.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deletePortList(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" "deletePortList":
raise InvalidRequestError(request.tag)
port_list = request.get("id")
if port_list is None:
raise ValueError("Port List ID should not be empty")
body = self.fixtures.load("ip_address_list_delete.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_cloneServer(
self, method, url, body, headers
):
body = self.fixtures.load("2.4/server_clone_response.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_importImage(
self, method, url, body, headers
):
body = self.fixtures.load("2.4/import_image_response.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_exchangeNicVlans(
self, method, url, body, headers
):
body = self.fixtures.load("2.4/exchange_nic_vlans_response.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_changeNetworkAdapter(
self, method, url, body, headers
):
body = self.fixtures.load("2.4/change_nic_networkadapter_response.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployUncustomizedServer(
self, method, url, body, headers
):
body = self.fixtures.load("2.4/deploy_customised_server.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
if __name__ == "__main__":
sys.exit(unittest.main())
| apache/libcloud | libcloud/test/compute/test_dimensiondata_v2_4.py | Python | apache-2.0 | 160,650 | 0.001332 |
#! /usr/bin/env python
import os
from dotfiles import Dotfiles
def main():
homedir = os.environ['HOME']
dotfilesRoot = homedir + '/dotfiles'
d = Dotfiles(dotfilesRoot)
d.setup()
if __name__ == "__main__":
main()
| xaque208/dotfiles | bin/init.py | Python | mit | 239 | 0 |
from __future__ import unicode_literals, division, absolute_import
from urlparse import urljoin, urlparse
from collections import namedtuple
from itertools import groupby
import logging
import os
import posixpath
from functools import partial
import time
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.config_schema import one_or_more
from flexget.utils.template import render_from_entry, RenderError
log = logging.getLogger('sftp')
ConnectionConfig = namedtuple('ConnectionConfig', ['host', 'port', 'username', 'password',
'private_key', 'private_key_pass'])
# retry configuration contants
CONNECT_TRIES = 3
RETRY_INTERVAL = 15
RETRY_STEP = 5
SOCKET_TIMEOUT = 15
# make separate path instances for local vs remote path styles
localpath = os.path
remotepath = posixpath #pysftp uses POSIX style paths
try:
import pysftp
logging.getLogger("paramiko").setLevel(logging.ERROR)
except:
pysftp = None
def sftp_connect(conf):
"""
Helper function to connect to an sftp server
"""
sftp = None
tries = CONNECT_TRIES
retry_interval = RETRY_INTERVAL
while not sftp:
try:
sftp = pysftp.Connection(host=conf.host, username=conf.username,
private_key=conf.private_key, password=conf.password,
port=conf.port, private_key_pass=conf.private_key_pass)
sftp.timeout = SOCKET_TIMEOUT
log.verbose('Connected to %s' % conf.host)
except Exception as e:
if not tries:
raise e
else:
log.debug('Caught exception: %s' % e)
log.warn('Failed to connect to %s; waiting %d seconds before retrying.' %
(conf.host, retry_interval))
time.sleep(retry_interval)
tries -= 1
retry_interval += RETRY_STEP
return sftp
def dependency_check():
"""
Check if pysftp module is present
"""
if not pysftp:
raise plugin.DependencyError(issued_by='sftp',
missing='pysftp',
message='sftp plugin requires the pysftp Python module.')
class SftpList(object):
"""
Generate entries from SFTP. This plugin requires the pysftp Python module and its dependencies.
Configuration:
host: Host to connect to
port: Port the remote SSH server is listening on. Defaults to port 22.
username: Username to log in as
password: The password to use. Optional if a private key is provided.
private_key: Path to the private key (if any) to log into the SSH server
private_key_pass: Password for the private key (if needed)
recursive: Indicates whether the listing should be recursive
get_size: Indicates whetern to calculate the size of the remote file/directory.
WARNING: This can be very slow when computing the size of directories!
files_only: Indicates wheter to omit diredtories from the results.
dirs: List of directories to download
Example:
sftp_list:
host: example.com
username: Username
private_key: /Users/username/.ssh/id_rsa
recursive: False
get_size: True
files_only: False
dirs:
- '/path/to/list/'
- '/another/path/'
"""
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': 22},
'files_only': {'type': 'boolean', 'default': True},
'recursive': {'type': 'boolean', 'default': False},
'get_size': {'type': 'boolean', 'default': True},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'dirs': one_or_more({'type': 'string'})
},
'additionProperties': False,
'required': ['host', 'username']
}
def prepare_config(self, config):
"""
Sets defaults for the provided configuration
"""
config.setdefault('port', 22)
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('dirs', ['.'])
return config
def on_task_input(self, task, config):
"""
Input task handler
"""
dependency_check()
config = self.prepare_config(config)
host = config['host']
port = config['port']
username = config['username']
password = config['password']
private_key = config['private_key']
private_key_pass = config['private_key_pass']
files_only = config['files_only']
recursive = config['recursive']
get_size = config['get_size']
dirs = config['dirs']
if not isinstance(dirs, list):
dirs = [dirs]
login_str = ''
port_str = ''
if username and password:
login_str = '%s:%s@' % (username, password)
elif username:
login_str = '%s@' % username
if port and port != 22:
port_str = ':%d' % port
url_prefix = 'sftp://%s%s%s/' % (login_str, host, port_str)
log.debug('Connecting to %s' % host)
conn_conf = ConnectionConfig(host, port, username, password, private_key, private_key_pass)
try:
sftp = sftp_connect(conn_conf)
except Exception as e:
raise plugin.PluginError('Failed to connect to %s (%s)' % (host, e))
entries = []
def file_size(path):
"""
Helper function to get the size of a node
"""
return sftp.lstat(path).st_size
def dir_size(path):
"""
Walk a directory to get its size
"""
sizes = []
def node_size(f):
sizes.append(file_size(f))
sftp.walktree(path, node_size, node_size, node_size, True)
size = sum(sizes)
return size
def handle_node(path, size_handler, is_dir):
"""
Generic helper function for handling a remote file system node
"""
if is_dir and files_only:
return
url = urljoin(url_prefix, sftp.normalize(path))
title = remotepath.basename(path)
entry = Entry(title, url)
if get_size:
try:
size = size_handler(path)
except Exception as e:
log.error('Failed to get size for %s (%s)' % (path, e))
size = -1
entry['content_size'] = size
if private_key:
entry['private_key'] = private_key
if private_key_pass:
entry['private_key_pass'] = private_key_pass
entries.append(entry)
# create helper functions to handle files and directories
handle_file = partial(handle_node, size_handler=file_size, is_dir=False)
handle_dir = partial(handle_node, size_handler=dir_size, is_dir=True)
def handle_unknown(path):
"""
Skip unknown files
"""
log.warn('Skipping unknown file: %s' % path)
# the business end
for dir in dirs:
try:
sftp.walktree(dir, handle_file, handle_dir, handle_unknown, recursive)
except IOError as e:
log.error('Failed to open %s (%s)' % (dir, e))
continue
sftp.close()
return entries
class SftpDownload(object):
"""
Download files from a SFTP server. This plugin requires the pysftp Python module and its
dependencies.
Configuration:
to: Destination path; supports Jinja2 templating on the input entry. Fields such
as series_name must be populated prior to input into this plugin using
metainfo_series or similar.
recursive: Indicates wether to download directory contents recursively.
delete_origin: Indicates wether to delete the remote files(s) once they've been downloaded.
Example:
sftp_download:
to: '/Volumes/External/Drobo/downloads'
delete_origin: False
"""
schema = {
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'recursive': {'type': 'boolean', 'default': True},
'delete_origin': {'type': 'boolean', 'default': False}
},
'required': ['to'],
'additionalProperties': False
}
def get_sftp_config(self, entry):
"""
Parses a url and returns a hashable config, source path, and destination path
"""
# parse url
parsed = urlparse(entry['url'])
host = parsed.hostname
username = parsed.username or None
password = parsed.password or None
port = parsed.port or 22
# get private key info if it exists
private_key = entry.get('private_key')
private_key_pass = entry.get('private_key_pass')
if parsed.scheme == 'sftp':
config = ConnectionConfig(host, port, username, password, private_key, private_key_pass)
else:
log.warn('Scheme does not match SFTP: %s' % entry['url'])
config = None
return config
def download_file(self, path, dest, sftp, delete_origin):
"""
Download a file from path to dest
"""
dir_name = remotepath.dirname(path)
dest_relpath = localpath.join(*remotepath.split(path)) # convert remote path style to local style
destination = localpath.join(dest, dest_relpath)
dest_dir = localpath.dirname(destination)
if localpath.exists(destination):
log.verbose('Destination file already exists. Skipping %s' % path)
return
if not localpath.exists(dest_dir):
os.makedirs(dest_dir)
log.verbose('Downloading file %s to %s' % (path, destination))
try:
sftp.get(path, destination)
except Exception as e:
log.error('Failed to download %s (%s)' % (path, e))
if localpath.exists(destination):
log.debug('Removing partially downloaded file %s' % destination)
os.remove(destination)
raise e
if delete_origin:
log.debug('Deleting remote file %s' % path)
try:
sftp.remove(path)
except Exception as e:
log.error('Failed to delete file %s (%s)' % (path, e))
return
self.remove_dir(sftp, dir_name)
def handle_dir(self, path):
"""
Dummy directory handler. Does nothing.
"""
pass
def handle_unknown(self, path):
"""
Dummy unknown file handler. Warns about unknown files.
"""
log.warn('Skipping unknown file %s' % path)
def remove_dir(self, sftp, path):
"""
Remove a directory if it's empty
"""
if sftp.exists(path) and not sftp.listdir(path):
log.debug('Attempting to delete directory %s' % path)
try:
sftp.rmdir(path)
except Exception as e:
log.error('Failed to delete directory %s (%s)' % (path, e))
def download_entry(self, entry, config, sftp):
"""
Downloads the file(s) described in entry
"""
path = urlparse(entry['url']).path or '.'
delete_origin = config['delete_origin']
recursive = config['recursive']
to = config['to']
if to:
try:
to = render_from_entry(to, entry)
except RenderError as e:
log.error('Could not render path: %s' % to)
entry.fail(e)
return
if not sftp.lexists(path):
log.error('Remote path does not exist: %s' % path)
return
if sftp.isfile(path):
source_file = remotepath.basename(path)
source_dir = remotepath.dirname(path)
try:
sftp.cwd(source_dir)
self.download_file(source_file, to, sftp, delete_origin)
except Exception as e:
error = 'Failed to download file %s (%s)' % (path, e)
log.error(error)
entry.fail(error)
elif sftp.isdir(path):
base_path = remotepath.normpath(remotepath.join(path, '..'))
dir_name = remotepath.basename(path)
handle_file = partial(self.download_file, dest=to, sftp=sftp, delete_origin=delete_origin)
try:
sftp.cwd(base_path)
sftp.walktree(dir_name, handle_file, self.handle_dir, self.handle_unknown, recursive)
except Exception as e:
error = 'Failed to download directory %s (%s)' % (path, e)
log.error(error)
entry.fail(error)
return
if delete_origin:
self.remove_dir(sftp, path)
else:
log.warn('Skipping unknown file %s' % path)
def on_task_download(self, task, config):
"""
Task handler for sftp_download plugin
"""
dependency_check()
# Download entries by host so we can reuse the connection
for sftp_config, entries in groupby(task.accepted, self.get_sftp_config):
if not sftp_config:
continue
error_message = None
sftp = None
try:
sftp = sftp_connect(sftp_config)
except Exception as e:
error_message = 'Failed to connect to %s (%s)' % (sftp_config.host, e)
log.error(error_message)
for entry in entries:
if sftp:
self.download_entry(entry, config, sftp)
else:
entry.fail(error_message)
if sftp:
sftp.close()
def on_task_output(self, task, config):
"""Count this as an output plugin."""
@event('plugin.register')
def register_plugin():
plugin.register(SftpList, 'sftp_list', api_ver=2)
plugin.register(SftpDownload, 'sftp_download', api_ver=2)
| thalamus/Flexget | flexget/plugins/plugin_sftp.py | Python | mit | 14,868 | 0.003228 |
"""
Views for PatreonProvider
https://www.patreon.com/platform/documentation/oauth
"""
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import API_URL, USE_API_V2, PatreonProvider
class PatreonOAuth2Adapter(OAuth2Adapter):
provider_id = PatreonProvider.id
access_token_url = "https://www.patreon.com/api/oauth2/token"
authorize_url = "https://www.patreon.com/oauth2/authorize"
profile_url = "{0}/{1}".format(
API_URL,
"identity?include=memberships&fields%5Buser%5D=email,first_name,"
"full_name,image_url,last_name,social_connections,"
"thumb_url,url,vanity"
if USE_API_V2
else "current_user",
)
def complete_login(self, request, app, token, **kwargs):
resp = requests.get(
self.profile_url,
headers={"Authorization": "Bearer " + token.token},
)
extra_data = resp.json().get("data")
if USE_API_V2:
# Extract tier/pledge level for Patreon API v2:
try:
member_id = extra_data["relationships"]["memberships"]["data"][0]["id"]
member_url = (
"{0}/members/{1}?include="
"currently_entitled_tiers&fields%5Btier%5D=title"
).format(API_URL, member_id)
resp_member = requests.get(
member_url,
headers={"Authorization": "Bearer " + token.token},
)
pledge_title = resp_member.json()["included"][0]["attributes"]["title"]
extra_data["pledge_level"] = pledge_title
except (KeyError, IndexError):
extra_data["pledge_level"] = None
pass
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(PatreonOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(PatreonOAuth2Adapter)
| pennersr/django-allauth | allauth/socialaccount/providers/patreon/views.py | Python | mit | 2,047 | 0.001466 |
import sys, Tkinter, tkFont, ttk
sys.path.insert(0, "./src/")
import button, database
from config import *
# Note: need to set size for bg_canvas here; otherwise it will grow disregard the size set while created!
def AuxscrollFunction(event):
bg_canvas.configure(scrollregion=bg_canvas.bbox("all"), height=THUMB_HEIGHT)
# create root
root = Tkinter.Tk()
root.geometry(str(WINDOW_WIDTH)+"x"+str(WINDOW_HEIGHT)+"+100+100")
root.minsize(width=WINDOW_WIDTH, height=WINDOW_HEIGHT)
root.title("Find Duplicated Photos")
Tkinter.Grid.columnconfigure(root, 0, weight=0)
Tkinter.Grid.columnconfigure(root, 1, weight=0)
Tkinter.Grid.columnconfigure(root, 2, weight=int(DISPLAY_WIDTH/INFO_WIDTH))
Tkinter.Grid.columnconfigure(root, 3, weight=0)
Tkinter.Grid.rowconfigure(root, 0, weight=int(DISPLAY_HEIGHT/THUMB_HEIGHT))
Tkinter.Grid.rowconfigure(root, 1, weight=0)
Tkinter.Grid.rowconfigure(root, 2, weight=0)
# create frame for displaying selected photo
display_photo_frame = Tkinter.Frame(root, height=DISPLAY_HEIGHT, width=DISPLAY_WIDTH)
display_photo_frame.grid(row=0, column=0, columnspan=3)
# create frame for displaying file info
display_photo_info_frame = Tkinter.Frame(root, height=DISPLAY_HEIGHT, width=INFO_WIDTH, background="white")
display_photo_info_frame.grid(row=0, column=3, sticky=Tkinter.E+Tkinter.W+Tkinter.N+Tkinter.S)
display_photo_info_frame.pack_propagate(False) # by default the frame will shrink to whatever is inside of it
# create background for scroll bar
bg_frame = Tkinter.Frame(root, height=THUMB_HEIGHT)
bg_frame.grid(row=1, column=0, columnspan=4, sticky=Tkinter.E+Tkinter.W+Tkinter.N+Tkinter.S)
bg_canvas = Tkinter.Canvas(bg_frame, background='white')
xscrollbar = Tkinter.Scrollbar(bg_frame, orient="horizontal", command=bg_canvas.xview)
xscrollbar.pack(side=Tkinter.BOTTOM, fill="x")
xscrollbar.grid_forget()
bg_canvas.configure(xscrollcommand=xscrollbar.set)
bg_canvas.pack(fill=Tkinter.BOTH, expand=True, pady=5)
# create frame for duplicated photo batch display
batch_photo_frame = Tkinter.Frame(bg_canvas, height=THUMB_HEIGHT, background='white')
bg_canvas.create_window((0,0),window=batch_photo_frame,anchor='nw')
batch_photo_frame.bind("<Configure>", AuxscrollFunction)
# Note: don't pack batch_photo_frame here, otherwise scroll bar won't show!!!
# create photo database and loading progress bar
progress_bar = ttk.Progressbar(root, orient=Tkinter.HORIZONTAL, length=PROGRESS_BAR_LENGTH, mode='determinate')
progress_bar.grid(row=2, column=2, columnspan=2, sticky=Tkinter.E+Tkinter.W, padx=10)
db = database.Database(progress_bar)
# create buttons
#button_cfg = button.ConfigButton(root, db, 2, 3)
button_next = button.NextBatchButton(root, batch_photo_frame, display_photo_frame, display_photo_info_frame, db, 2, 1)
button_open = button.OpenFolderButton(root, batch_photo_frame, db, button_next, 2, 0)
root.mainloop()
| ybdarrenwang/DuplicatedPhotoFinder | main.py | Python | bsd-3-clause | 2,866 | 0.008374 |
#!/usr/bin/env python
# (c) 2013, Greg Buehler
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Zabbix Server external inventory script.
========================================
Returns hosts and hostgroups from Zabbix Server.
If you want to run with --limit against a host group with space in the
name, use asterisk. For example --limit="Linux*servers".
Configuration is read from `zabbix.ini`.
Tested with Zabbix Server 2.0.6 and 3.2.3.
"""
from __future__ import print_function
import os
import sys
import argparse
try:
import ConfigParser as configparser
except ImportError:
import configparser
try:
from zabbix_api import ZabbixAPI
except:
print("Error: Zabbix API library must be installed: pip install zabbix-api.",
file=sys.stderr)
sys.exit(1)
import json
class ZabbixInventory(object):
def read_settings(self):
config = configparser.SafeConfigParser()
conf_path = './zabbix.ini'
if not os.path.exists(conf_path):
conf_path = os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini'
if os.path.exists(conf_path):
config.read(conf_path)
# server
if config.has_option('zabbix', 'server'):
self.zabbix_server = config.get('zabbix', 'server')
# login
if config.has_option('zabbix', 'username'):
self.zabbix_username = config.get('zabbix', 'username')
if config.has_option('zabbix', 'password'):
self.zabbix_password = config.get('zabbix', 'password')
# ssl certs
if config.has_option('zabbix', 'validate_certs'):
if config.get('zabbix', 'validate_certs') in ['false', 'False', False]:
self.validate_certs = False
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def hoststub(self):
return {
'hosts': []
}
def get_host(self, api, name):
data = {'ansible_ssh_host': name}
return data
def get_list(self, api):
hostsData = api.host.get({'output': 'extend', 'selectGroups': 'extend'})
data = {}
data[self.defaultgroup] = self.hoststub()
for host in hostsData:
hostname = host['name']
data[self.defaultgroup]['hosts'].append(hostname)
for group in host['groups']:
groupname = group['name']
if groupname not in data:
data[groupname] = self.hoststub()
data[groupname]['hosts'].append(hostname)
# Prevents Ansible from calling this script for each server with --host
data['_meta'] = {'hostvars': self.meta}
return data
def __init__(self):
self.defaultgroup = 'group_all'
self.zabbix_server = None
self.zabbix_username = None
self.zabbix_password = None
self.validate_certs = True
self.meta = {}
self.read_settings()
self.read_cli()
if self.zabbix_server and self.zabbix_username:
try:
api = ZabbixAPI(server=self.zabbix_server, validate_certs=self.validate_certs)
api.login(user=self.zabbix_username, password=self.zabbix_password)
except BaseException as e:
print("Error: Could not login to Zabbix server. Check your zabbix.ini.", file=sys.stderr)
sys.exit(1)
if self.options.host:
data = self.get_host(api, self.options.host)
print(json.dumps(data, indent=2))
elif self.options.list:
data = self.get_list(api)
print(json.dumps(data, indent=2))
else:
print("usage: --list ..OR.. --host <hostname>", file=sys.stderr)
sys.exit(1)
else:
print("Error: Configuration of server and credentials are required. See zabbix.ini.", file=sys.stderr)
sys.exit(1)
ZabbixInventory()
| alexlo03/ansible | contrib/inventory/zabbix.py | Python | gpl-3.0 | 4,795 | 0.002086 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('credit', '0005_creditrequirement_sort_value'),
]
operations = [
migrations.AlterModelOptions(
name='creditrequirement',
options={'ordering': ['sort_value']},
),
]
| cpennington/edx-platform | openedx/core/djangoapps/credit/migrations/0006_creditrequirement_alter_ordering.py | Python | agpl-3.0 | 387 | 0 |
"""
:Requirements: django-tagging
This module contains some additional helper tags for the django-tagging
project. Note that the functionality here might already be present in
django-tagging but perhaps with some slightly different behaviour or
usage.
"""
from django import template
from django.core.urlresolvers import reverse as url_reverse
from tagging.utils import parse_tag_input
register = template.Library()
class TagsForObjectNode(template.Node):
def __init__(self, tags_string, urlname, junctor=None, last_junctor=None):
self.tags_string = template.Variable(tags_string)
self.junctor = junctor is None and ', ' or junctor.lstrip('"').rstrip('"')
self.last_junctor = last_junctor is None and ' and ' or last_junctor.lstrip('"').rstrip('"')
self.urlname = urlname
def render(self, context):
tags = parse_tag_input(self.tags_string.resolve(context))
tags = ['<a href="%s" rel="tag">%s</a>' % (url_reverse(self.urlname, kwargs={'tag':t}), t) for t in tags]
if len(tags) > 2:
first_part = self.junctor.join(tags[:-1])
return first_part + self.last_junctor + tags[-1]
if len(tags) == 2:
return self.last_junctor.join(tags)
return self.junctor.join(tags)
@register.tag('object_tags')
def tags_for_object(parser, token):
"""
Simple tag for rendering tags of an object
Usage::
{% object_tags object.tags blog-tag ", " " and " %}
The last two arguments determine the junctor between the tag names with
the last being the last junctor being used.
"""
variables = token.split_contents()[1:]
return TagsForObjectNode(*variables)
| zerok/django-zsutils | django_zsutils/templatetags/zsutils/taghelpers.py | Python | bsd-3-clause | 1,704 | 0.005282 |
"""
Manages downloading and updating applications throught he use of a zip file hosted on HomePi server.
"""
import datetime
import os.path
import zipfile
import urllib2
#Test call to verify thins work.
def print_info(archive_name):
zf = zipfile.ZipFile(archive_name)
for info in zf.infolist():
print info.filename
print '\tComment:\t', info.comment
print '\tModified:\t', datetime.datetime(*info.date_time)
print '\tSystem:\t\t', info.create_system, '(0 = Windows, 3 = Unix)'
print '\tZIP version:\t', info.create_version
print '\tCompressed:\t', info.compress_size, 'bytes'
print '\tUncompressed:\t', info.file_size, 'bytes'
print
# Extracts files from given archive into targetlocation. Preserves archive folder structure.
def extractFiles(archive_name,targetLocation):
zf = zipfile.ZipFile(archive_name)
zf.extractall(path=targetLocation)
# Download archive file for unpacking.
def retrieveZipfile(saveLocation, archiveURI, currentVersion = -1):
fileName = os.path.basename(archiveURI)
print 'downloading file: %s' % fileName
try:
response = urllib2.urlopen(archiveURI)
#Check to see if new version
serverVersion = 0
if response.info().getheader('file-version') is not None:
serverVersion = int(response.info().getheader('file-version'))
##version check, download if new
if currentVersion< serverVersion:
fileDest = os.path.join(saveLocation,fileName)
with open(fileDest, "wb") as code:
code.write(response.read())
print 'Download done'
except urllib2.HTTPError as h:
print 'Error downloading file: %s' % h
#Test to see if everything runs smoothly, should add verification and clean up
if __name__ == '__main__':
#print_info('../test/resources/ansi161.zip')
extractFiles('../test/resources/ansi161.zip', '../test/resources/temp/')
retrieveZipfile( '../test/resources/temp/', 'http://the.earth.li/~sgtatham/putty/latest/x86/putty.zip') | leeclarke/homePi | src/python/zipInstall.py | Python | gpl-3.0 | 2,124 | 0.013183 |
"""
Unit tests for optimization routines from minpack.py.
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_, assert_almost_equal, assert_array_equal, \
assert_array_almost_equal, TestCase, run_module_suite, assert_raises, \
assert_allclose
import numpy as np
from numpy import array, float64, matrix
from scipy import optimize
from scipy.optimize.minpack import leastsq, curve_fit, fixed_point
class ReturnShape(object):
"""This class exists to create a callable that does not have a '__name__' attribute.
__init__ takes the argument 'shape', which should be a tuple of ints. When an instance
it called with a single argument 'x', it returns numpy.ones(shape).
"""
def __init__(self, shape):
self.shape = shape
def __call__(self, x):
return np.ones(self.shape)
def dummy_func(x, shape):
"""A function that returns an array of ones of the given shape.
`x` is ignored.
"""
return np.ones(shape)
# Function and jacobian for tests of solvers for systems of nonlinear
# equations
def pressure_network(flow_rates, Qtot, k):
"""Evaluate non-linear equation system representing
the pressures and flows in a system of n parallel pipes::
f_i = P_i - P_0, for i = 1..n
f_0 = sum(Q_i) - Qtot
Where Q_i is the flow rate in pipe i and P_i the pressure in that pipe.
Pressure is modeled as a P=kQ**2 where k is a valve coefficient and
Q is the flow rate.
Parameters
----------
flow_rates : float
A 1D array of n flow rates [kg/s].
k : float
A 1D array of n valve coefficients [1/kg m].
Qtot : float
A scalar, the total input flow rate [kg/s].
Returns
-------
F : float
A 1D array, F[i] == f_i.
"""
P = k * flow_rates**2
F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot))
return F
def pressure_network_jacobian(flow_rates, Qtot, k):
"""Return the jacobian of the equation system F(flow_rates)
computed by `pressure_network` with respect to
*flow_rates*. See `pressure_network` for the detailed
description of parrameters.
Returns
-------
jac : float
*n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)``
and *f_i* and *Q_i* are described in the doc for `pressure_network`
"""
n = len(flow_rates)
pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0])
jac = np.empty((n, n))
jac[:n-1, :n-1] = pdiff * 0
jac[:n-1, n-1] = 0
jac[n-1, :] = np.ones(n)
return jac
def pressure_network_fun_and_grad(flow_rates, Qtot, k):
return pressure_network(flow_rates, Qtot, k), \
pressure_network_jacobian(flow_rates, Qtot, k)
class TestFSolve(TestCase):
def test_pressure_network_no_gradient(self):
"""fsolve without gradient, equal pipes -> equal flows"""
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows, info, ier, mesg = optimize.fsolve(
pressure_network, initial_guess, args=(Qtot, k),
full_output=True)
assert_array_almost_equal(final_flows, np.ones(4))
assert_(ier == 1, mesg)
def test_pressure_network_with_gradient(self):
"""fsolve with gradient, equal pipes -> equal flows"""
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.fsolve(
pressure_network, initial_guess, args=(Qtot, k),
fprime=pressure_network_jacobian)
assert_array_almost_equal(final_flows, np.ones(4))
def test_wrong_shape_func_callable(self):
"""The callable 'func' has no '__name__' attribute."""
func = ReturnShape(1)
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.fsolve, func, x0)
def test_wrong_shape_func_function(self):
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),))
def test_wrong_shape_fprime_callable(self):
"""The callables 'func' and 'deriv_func' have no '__name__' attribute."""
func = ReturnShape(1)
deriv_func = ReturnShape((2,2))
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
def test_wrong_shape_fprime_function(self):
func = lambda x: dummy_func(x, (2,))
deriv_func = lambda x: dummy_func(x, (3,3))
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
def test_float32(self):
func = lambda x: np.array([x[0] - 1000, x[1] - 10000], dtype=np.float32)**2
p = optimize.fsolve(func, np.array([1, 1], np.float32))
assert_allclose(func(p), [0, 0], atol=1e-3)
class TestRootHybr(TestCase):
def test_pressure_network_no_gradient(self):
"""root/hybr without gradient, equal pipes -> equal flows"""
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
method='hybr', args=(Qtot, k)).x
assert_array_almost_equal(final_flows, np.ones(4))
def test_pressure_network_with_gradient(self):
"""root/hybr with gradient, equal pipes -> equal flows"""
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = matrix([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
args=(Qtot, k), method='hybr',
jac=pressure_network_jacobian).x
assert_array_almost_equal(final_flows, np.ones(4))
def test_pressure_network_with_gradient_combined(self):
"""root/hybr with gradient and function combined, equal pipes -> equal flows"""
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network_fun_and_grad,
initial_guess, args=(Qtot, k),
method='hybr', jac=True).x
assert_array_almost_equal(final_flows, np.ones(4))
class TestRootLM(TestCase):
def test_pressure_network_no_gradient(self):
"""root/lm without gradient, equal pipes -> equal flows"""
k = np.ones(4) * 0.5
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
method='lm', args=(Qtot, k)).x
assert_array_almost_equal(final_flows, np.ones(4))
class TestLeastSq(TestCase):
def setUp(self):
x = np.linspace(0, 10, 40)
a,b,c = 3.1, 42, -304.2
self.x = x
self.abc = a,b,c
y_true = a*x**2 + b*x + c
np.random.seed(0)
self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape)
def residuals(self, p, y, x):
a,b,c = p
err = y-(a*x**2 + b*x + c)
return err
def test_basic(self):
p0 = array([0,0,0])
params_fit, ier = leastsq(self.residuals, p0,
args=(self.y_meas, self.x))
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)'%ier)
# low precision due to random
assert_array_almost_equal(params_fit, self.abc, decimal=2)
def test_full_output(self):
p0 = matrix([0,0,0])
full_output = leastsq(self.residuals, p0,
args=(self.y_meas, self.x),
full_output=True)
params_fit, cov_x, infodict, mesg, ier = full_output
assert_(ier in (1,2,3,4), 'solution not found: %s'%mesg)
def test_input_untouched(self):
p0 = array([0,0,0],dtype=float64)
p0_copy = array(p0, copy=True)
full_output = leastsq(self.residuals, p0,
args=(self.y_meas, self.x),
full_output=True)
params_fit, cov_x, infodict, mesg, ier = full_output
assert_(ier in (1,2,3,4), 'solution not found: %s'%mesg)
assert_array_equal(p0, p0_copy)
def test_wrong_shape_func_callable(self):
"""The callable 'func' has no '__name__' attribute."""
func = ReturnShape(1)
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.leastsq, func, x0)
def test_wrong_shape_func_function(self):
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),))
def test_wrong_shape_Dfun_callable(self):
"""The callables 'func' and 'deriv_func' have no '__name__' attribute."""
func = ReturnShape(1)
deriv_func = ReturnShape((2,2))
assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
def test_wrong_shape_Dfun_function(self):
func = lambda x: dummy_func(x, (2,))
deriv_func = lambda x: dummy_func(x, (3,3))
assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
def test_float32(self):
# From Track ticket #920
def func(p,x,y):
q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3]
return q - y
x = np.array([ 1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286,
1.231], dtype=np.float32)
y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258,
0.034,0.0396], dtype=np.float32)
p0 = np.array([1.0,1.0,1.0,1.0])
p1, success = optimize.leastsq(func, p0, args=(x,y))
assert_(success in [1,2,3,4])
assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum())
class TestCurveFit(TestCase):
def setUp(self):
self.y = array([1.0, 3.2, 9.5, 13.7])
self.x = array([1.0, 2.0, 3.0, 4.0])
def test_one_argument(self):
def func(x,a):
return x**a
popt, pcov = curve_fit(func, self.x, self.y)
assert_(len(popt) == 1)
assert_(pcov.shape == (1,1))
assert_almost_equal(popt[0], 1.9149, decimal=4)
assert_almost_equal(pcov[0,0], 0.0016, decimal=4)
# Test if we get the same with full_output. Regression test for #1415.
res = curve_fit(func, self.x, self.y, full_output=1)
(popt2, pcov2, infodict, errmsg, ier) = res
assert_array_almost_equal(popt, popt2)
def test_two_argument(self):
def func(x, a, b):
return b*x**a
popt, pcov = curve_fit(func, self.x, self.y)
assert_(len(popt) == 2)
assert_(pcov.shape == (2,2))
assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
assert_array_almost_equal(pcov, [[0.0852, -0.1260],[-0.1260, 0.1912]],
decimal=4)
def test_func_is_classmethod(self):
class test_self(object):
"""This class tests if curve_fit passes the correct number of
arguments when the model function is a class instance method.
"""
def func(self, x, a, b):
return b * x**a
test_self_inst = test_self()
popt, pcov = curve_fit(test_self_inst.func, self.x, self.y)
assert_(pcov.shape == (2,2))
assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
decimal=4)
class TestFixedPoint(TestCase):
def test_scalar_trivial(self):
"""f(x) = 2x; fixed point should be x=0"""
def func(x):
return 2.0*x
x0 = 1.0
x = fixed_point(func, x0)
assert_almost_equal(x, 0.0)
def test_scalar_basic1(self):
"""f(x) = x**2; x0=1.05; fixed point should be x=1"""
def func(x):
return x**2
x0 = 1.05
x = fixed_point(func, x0)
assert_almost_equal(x, 1.0)
def test_scalar_basic2(self):
"""f(x) = x**0.5; x0=1.05; fixed point should be x=1"""
def func(x):
return x**0.5
x0 = 1.05
x = fixed_point(func, x0)
assert_almost_equal(x, 1.0)
def test_array_trivial(self):
def func(x):
return 2.0*x
x0 = [0.3, 0.15]
olderr = np.seterr(all='ignore')
try:
x = fixed_point(func, x0)
finally:
np.seterr(**olderr)
assert_almost_equal(x, [0.0, 0.0])
def test_array_basic1(self):
"""f(x) = c * x**2; fixed point should be x=1/c"""
def func(x, c):
return c * x**2
c = array([0.75, 1.0, 1.25])
x0 = [1.1, 1.15, 0.9]
olderr = np.seterr(all='ignore')
try:
x = fixed_point(func, x0, args=(c,))
finally:
np.seterr(**olderr)
assert_almost_equal(x, 1.0/c)
def test_array_basic2(self):
"""f(x) = c * x**0.5; fixed point should be x=c**2"""
def func(x, c):
return c * x**0.5
c = array([0.75, 1.0, 1.25])
x0 = [0.8, 1.1, 1.1]
x = fixed_point(func, x0, args=(c,))
assert_almost_equal(x, c**2)
if __name__ == "__main__":
run_module_suite()
| sargas/scipy | scipy/optimize/tests/test_minpack.py | Python | bsd-3-clause | 13,744 | 0.006694 |
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test management of KeyboardInterrupt in stratisd.
"""
# isort: LOCAL
import stratis_cli
from .._misc import SimTestCase
class KeyboardInterruptTestCase(SimTestCase):
"""
Test behavior of stratis on KeyboardInterrupt.
"""
def test_catch_keyboard_exception(self):
"""
Verify that the KeyboardInterrupt is propagated by the run() method.
./bin/stratis contains a try block at the outermost level which
then catches the KeyboardInterrupt and exits with an error message.
The KeyboardInterrupt is most likely raised in the dbus-python
method which is actually communicating on the D-Bus, but it is
fairly difficult to get at that method. Instead settle for getting
at the calling method generated by dbus-python-client-gen.
"""
def raise_keyboard_interrupt(_):
"""
Just raise the interrupt.
"""
raise KeyboardInterrupt()
# pylint: disable=import-outside-toplevel
# isort: LOCAL
from stratis_cli._actions import _data
# pylint: disable=protected-access
stratis_cli._actions._data.Manager.Properties.Version.Get = (
raise_keyboard_interrupt
)
with self.assertRaises(KeyboardInterrupt):
stratis_cli.run()(["daemon", "version"])
| stratis-storage/stratis-cli | tests/whitebox/monkey_patching/test_keyboard_interrupt.py | Python | apache-2.0 | 1,934 | 0 |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.signal import lfilter
def filter_and_sample(y_big, W, DT, window='rectangular', even_NSTEPS=True, detrend=False, drop_first=True):
"""Filter signal with moving average window of width W and then sample it
with time step DT."""
if (W > 1):
if (window == 'rectangular'):
y_f = lfilter((1. / W) * np.ones(W), 1., y_big, axis=0)
else:
raise NotImplementedError('Not implemented window type.')
# drop first W steps (initial conditions)
if drop_first:
y = y_f[(W - 1)::DT]
else:
y = y_f[::DT]
else:
y = y_big[::DT]
# remove the mean
if detrend:
y = y - np.mean(y, axis=0)
# keeps an even number of points
if even_NSTEPS:
if (y.shape[0] % 2 == 1):
return y[:-1]
return y
def resample_psd(freqs, psd, cutfrequency):
if (cutfrequency >= freqs[-1]):
return freqs, psd
NFREQS = freqs.size - 1
cutidx = (np.abs(freqs - cutfrequency)).argmin()
if (NFREQS % cutidx == 0): # cut frequency is sub-multiple of max freq
DT = NFREQS / cutidx
if (DT > 2):
raise Warning('DT Not implemented.')
newpsd = psd.copy()[:cutidx + 1]
newpsd = newpsd + psd[:-cutidx - 2:-1]
newfreqs = freqs[:cutidx + 1]
#log.write_log(cutidx, DT, freqs[cutidx], newpsd.size)
else:
raise NotImplementedError('Not implemented.')
return newfreqs, newpsd
| lorisercole/thermocepstrum | sportran/md/tools/resample.py | Python | gpl-3.0 | 1,531 | 0.001306 |
import pathlib
import argparse
import os
IMGFORMAT = 'JPG'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Symlink a local album directory to the "galleries" subdirectory in a local Piwigo instance.')
parser.add_argument('src_album', type=str, help='Location of album to symlink, relative to ALBUMS_ROOT')
parser.add_argument('piwigo_dir', type=str, help='Location of local Piwigo instance (e.g. /srv/http/piwigo)')
parser.add_argument('--sudo', '-su', action='store_true', help='Execute shell commands using sudo')
parser.add_argument('--range', type=str, default=None, help='Only create symlinks for photos in numeric range')
args = parser.parse_args()
src_album, piwigo_dir, use_sudo = args.src_album, args.piwigo_dir, args.sudo
minrange = int(args.range.split('-')[0]) if args.range is not None else 0
maxrange = int(args.range.split('-')[1]) if args.range is not None else 1000000
albums_root = os.getenv('ALBUMS_ROOT', f'{str(pathlib.Path.home())}/Pictures/Albums')
def sh(command):
if use_sudo:
command = f'sudo {command}'
os.popen(command).read()
def symlink_img(imgfilename):
piwigo_album_dir = f'{piwigo_dir}/galleries/{src_album}'
if not os.path.exists(piwigo_album_dir):
sh(f'mkdir -p {piwigo_album_dir}')
sh(f'ln -s {albums_root}/{src_album}/{IMGFORMAT}/{imgfilename} {piwigo_album_dir}')
def is_expected_imgformat(imgfilename):
return imgfilename.split('.')[-1].lower() == IMGFORMAT.lower()
def is_in_range(imgfilename):
imgnum = int(imgfilename.split('.')[0].split('_')[-1])
return minrange <= imgnum <= maxrange
imgs = list(filter(lambda file: is_expected_imgformat(file) and is_in_range(file), os.listdir(f'{albums_root}/{src_album}/{IMGFORMAT}')))
for img in imgs:
symlink_img(img)
| nickmcummins/misc-tools | piwigo/piwigo_symlink_local_album.py | Python | gpl-3.0 | 1,897 | 0.005799 |
"""
Contains all unit tests for the Tools app.
"""
# Django
# from django.conf import settings
# from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.urls import reverse
# from django.utils import timezone
# local Django
# from accounts.models import User
# from events.forms import EventForm
# from sigs.models import SIG
# from events.models import Event
class HomeViewCase(TestCase):
"""
A class that tests whether tools functions work
"""
def test_view_responses(self):
"""
Makes requests to each page of the site and asserts a 200 response code
(or success)
"""
response = self.client.get(reverse('tools:membership'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tools/membership.html')
| sigdotcom/acm.mst.edu | ACM_General/tools/tests.py | Python | gpl-3.0 | 857 | 0 |
from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Girls With Slingshots'
language = 'en'
url = 'http://www.girlswithslingshots.com/'
start_date = '2004-09-30'
rights = 'Danielle Corsetto'
class Crawler(CrawlerBase):
history_capable_days = 30
schedule = 'Mo,Tu,We,Th,Fr'
time_zone = 'US/Eastern'
def crawl(self, pub_date):
feed = self.parse_feed('http://www.girlswithslingshots.com/feed/')
for entry in feed.for_date(pub_date):
page = self.parse_page(entry.link)
url = page.src('img#comic')
title = entry.title.replace('Girls with Slingshots - ', '')
text = page.title('img#comic')
return CrawlerImage(url, title, text)
| datagutten/comics | comics/comics/gws.py | Python | agpl-3.0 | 839 | 0 |
import unittest
import os
import os.path
import contextlib
import sys
import test._mock_backport as mock
import test.test_support
import ensurepip
import ensurepip._uninstall
class TestEnsurePipVersion(unittest.TestCase):
def test_returns_version(self):
self.assertEqual(ensurepip._PIP_VERSION, ensurepip.version())
class EnsurepipMixin:
def setUp(self):
run_pip_patch = mock.patch("ensurepip._run_pip")
self.run_pip = run_pip_patch.start()
self.addCleanup(run_pip_patch.stop)
# Avoid side effects on the actual os module
real_devnull = os.devnull
os_patch = mock.patch("ensurepip.os")
patched_os = os_patch.start()
self.addCleanup(os_patch.stop)
patched_os.devnull = real_devnull
patched_os.path = os.path
self.os_environ = patched_os.environ = os.environ.copy()
class TestBootstrap(EnsurepipMixin, unittest.TestCase):
def test_basic_bootstrapping(self):
ensurepip.bootstrap()
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "setuptools", "pip",
],
mock.ANY,
)
additional_paths = self.run_pip.call_args[0][1]
self.assertEqual(len(additional_paths), 2)
def test_bootstrapping_with_root(self):
ensurepip.bootstrap(root="/foo/bar/")
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "--root", "/foo/bar/",
"setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_user(self):
ensurepip.bootstrap(user=True)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "--user", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_upgrade(self):
ensurepip.bootstrap(upgrade=True)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "--upgrade", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_verbosity_1(self):
ensurepip.bootstrap(verbosity=1)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "-v", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_verbosity_2(self):
ensurepip.bootstrap(verbosity=2)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "-vv", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_verbosity_3(self):
ensurepip.bootstrap(verbosity=3)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "-vvv", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_regular_install(self):
ensurepip.bootstrap()
self.assertEqual(self.os_environ["ENSUREPIP_OPTIONS"], "install")
def test_bootstrapping_with_alt_install(self):
ensurepip.bootstrap(altinstall=True)
self.assertEqual(self.os_environ["ENSUREPIP_OPTIONS"], "altinstall")
def test_bootstrapping_with_default_pip(self):
ensurepip.bootstrap(default_pip=True)
self.assertNotIn("ENSUREPIP_OPTIONS", self.os_environ)
def test_altinstall_default_pip_conflict(self):
with self.assertRaises(ValueError):
ensurepip.bootstrap(altinstall=True, default_pip=True)
self.assertFalse(self.run_pip.called)
def test_pip_environment_variables_removed(self):
# ensurepip deliberately ignores all pip environment variables
# See http://bugs.python.org/issue19734 for details
self.os_environ["PIP_THIS_SHOULD_GO_AWAY"] = "test fodder"
ensurepip.bootstrap()
self.assertNotIn("PIP_THIS_SHOULD_GO_AWAY", self.os_environ)
def test_pip_config_file_disabled(self):
# ensurepip deliberately ignores the pip config file
# See http://bugs.python.org/issue20053 for details
ensurepip.bootstrap()
self.assertEqual(self.os_environ["PIP_CONFIG_FILE"], os.devnull)
@contextlib.contextmanager
def fake_pip(version=ensurepip._PIP_VERSION):
if version is None:
pip = None
else:
class FakePip():
__version__ = version
pip = FakePip()
sentinel = object()
orig_pip = sys.modules.get("pip", sentinel)
sys.modules["pip"] = pip
try:
yield pip
finally:
if orig_pip is sentinel:
del sys.modules["pip"]
else:
sys.modules["pip"] = orig_pip
class TestUninstall(EnsurepipMixin, unittest.TestCase):
def test_uninstall_skipped_when_not_installed(self):
with fake_pip(None):
ensurepip._uninstall_helper()
self.assertFalse(self.run_pip.called)
def test_uninstall_skipped_with_warning_for_wrong_version(self):
with fake_pip("not a valid version"):
with test.test_support.captured_stderr() as stderr:
ensurepip._uninstall_helper()
warning = stderr.getvalue().strip()
self.assertIn("only uninstall a matching version", warning)
self.assertFalse(self.run_pip.called)
def test_uninstall(self):
with fake_pip():
ensurepip._uninstall_helper()
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "pip",
"setuptools",
]
)
def test_uninstall_with_verbosity_1(self):
with fake_pip():
ensurepip._uninstall_helper(verbosity=1)
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "-v", "pip",
"setuptools",
]
)
def test_uninstall_with_verbosity_2(self):
with fake_pip():
ensurepip._uninstall_helper(verbosity=2)
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "-vv", "pip",
"setuptools",
]
)
def test_uninstall_with_verbosity_3(self):
with fake_pip():
ensurepip._uninstall_helper(verbosity=3)
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "-vvv",
"pip", "setuptools",
]
)
def test_pip_environment_variables_removed(self):
# ensurepip deliberately ignores all pip environment variables
# See http://bugs.python.org/issue19734 for details
self.os_environ["PIP_THIS_SHOULD_GO_AWAY"] = "test fodder"
with fake_pip():
ensurepip._uninstall_helper()
self.assertNotIn("PIP_THIS_SHOULD_GO_AWAY", self.os_environ)
def test_pip_config_file_disabled(self):
# ensurepip deliberately ignores the pip config file
# See http://bugs.python.org/issue20053 for details
with fake_pip():
ensurepip._uninstall_helper()
self.assertEqual(self.os_environ["PIP_CONFIG_FILE"], os.devnull)
# Basic testing of the main functions and their argument parsing
EXPECTED_VERSION_OUTPUT = "pip " + ensurepip._PIP_VERSION
class TestBootstrappingMainFunction(EnsurepipMixin, unittest.TestCase):
def test_bootstrap_version(self):
with test.test_support.captured_stderr() as stderr:
with self.assertRaises(SystemExit):
ensurepip._main(["--version"])
result = stderr.getvalue().strip()
self.assertEqual(result, EXPECTED_VERSION_OUTPUT)
self.assertFalse(self.run_pip.called)
def test_basic_bootstrapping(self):
ensurepip._main([])
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "setuptools", "pip",
],
mock.ANY,
)
additional_paths = self.run_pip.call_args[0][1]
self.assertEqual(len(additional_paths), 2)
class TestUninstallationMainFunction(EnsurepipMixin, unittest.TestCase):
def test_uninstall_version(self):
with test.test_support.captured_stderr() as stderr:
with self.assertRaises(SystemExit):
ensurepip._uninstall._main(["--version"])
result = stderr.getvalue().strip()
self.assertEqual(result, EXPECTED_VERSION_OUTPUT)
self.assertFalse(self.run_pip.called)
def test_basic_uninstall(self):
with fake_pip():
ensurepip._uninstall._main([])
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "pip",
"setuptools",
]
)
if __name__ == "__main__":
test.test_support.run_unittest(__name__)
| sometallgit/AutoUploader | Python27/Lib/test/test_ensurepip.py | Python | mit | 9,313 | 0 |
# -*- coding: utf-8 -*-
"""Tests for psychopy.compatibility"""
from builtins import object
import os
from psychopy import constants, compatibility
import pytest
pytestmark = pytest.mark.skipif(
constants.PY3,
reason='Python3 cannot import the old-style pickle files')
thisPath = os.path.split(__file__)[0]
fixtures_path = os.path.join(thisPath, '..', 'data')
class _baseCompatibilityTest(object):
def test_FromFile(self):
dat = compatibility.fromFile(self.test_psydat)
class TestOldTrialHandler(_baseCompatibilityTest):
"""Test Old Trial Handler"""
def setup(self):
self.test_psydat = os.path.join(fixtures_path, 'oldstyle.psydat')
self.test_class = "<class 'psychopy.data.TrialHandler'>"
class TestNewTrialHandler(_baseCompatibilityTest):
"""Test New-styel Trial Handler"""
def setup(self):
self.test_psydat = os.path.join(fixtures_path, 'oldstyle.psydat')
self.test_class = "<class 'psychopy.data.TrialHandler'>"
class TestOldStairHandler(_baseCompatibilityTest):
"""Test Old Trial Handler"""
def setup(self):
self.test_psydat = os.path.join(fixtures_path, 'oldstyle_stair.psydat')
self.test_class = "<class 'psychopy.data.StairHandler'>"
| hoechenberger/psychopy | psychopy/tests/test_compatibility/test_compatibility.py | Python | gpl-3.0 | 1,242 | 0.000805 |
''' Script used to test bucketlistitem response and request.'''
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from django.core.urlresolvers import reverse_lazy
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth.models import User
from .test_bucketlist import ApiHeaderAuthorization
class ApiUserBucketlistItems(ApiHeaderAuthorization):
def test_user_can_addbucketlist(self):
data={'name': 'item', 'done': True }
url= reverse_lazy('addbucketitem', kwargs={'id':19})
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class ApiUserItemListDetail(ApiHeaderAuthorization):
def test_user_can_updatebucketlist(self):
data={'name': 'updateitem', 'done': True }
url= reverse_lazy('itemdetail', kwargs={'id':19, 'item_id': 24 })
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_user_cannot_updatebucketlist(self):
data={'': '', '': '' }
url= reverse_lazy('itemdetail', kwargs={'id':19, 'item_id': 24 })
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_user_can_deletebucketlist(self):
url= reverse_lazy('itemdetail', kwargs={'id':19, 'item_id': 24 })
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
| andela-sjames/django-bucketlist-application | bucketlistapp/bucketlistapi/tests/test_bucketlistitems.py | Python | gpl-3.0 | 1,569 | 0.01211 |
from __future__ import unicode_literals
from decimal import Decimal
import boto
import boto3
from boto3.dynamodb.conditions import Key
import sure # noqa
from freezegun import freeze_time
from moto import mock_dynamodb2
from boto.exception import JSONResponseError
from tests.helpers import requires_boto_gte
try:
from boto.dynamodb2.fields import GlobalAllIndex, HashKey, RangeKey, AllIndex
from boto.dynamodb2.table import Item, Table
from boto.dynamodb2.types import STRING, NUMBER
from boto.dynamodb2.exceptions import ValidationException
from boto.dynamodb2.exceptions import ConditionalCheckFailedException
except ImportError:
pass
def create_table():
table = Table.create('messages', schema=[
HashKey('forum_name'),
RangeKey('subject'),
], throughput={
'read': 10,
'write': 10,
})
return table
def create_table_with_local_indexes():
table = Table.create(
'messages',
schema=[
HashKey('forum_name'),
RangeKey('subject'),
],
throughput={
'read': 10,
'write': 10,
},
indexes=[
AllIndex(
'threads_index',
parts=[
HashKey('forum_name', data_type=STRING),
RangeKey('threads', data_type=NUMBER),
]
)
]
)
return table
def iterate_results(res):
for i in res:
pass
@requires_boto_gte("2.9")
@mock_dynamodb2
@freeze_time("2012-01-14")
def test_create_table():
table = create_table()
expected = {
'Table': {
'AttributeDefinitions': [
{'AttributeName': 'forum_name', 'AttributeType': 'S'},
{'AttributeName': 'subject', 'AttributeType': 'S'}
],
'ProvisionedThroughput': {
'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10
},
'TableSizeBytes': 0,
'TableName': 'messages',
'TableStatus': 'ACTIVE',
'KeySchema': [
{'KeyType': 'HASH', 'AttributeName': 'forum_name'},
{'KeyType': 'RANGE', 'AttributeName': 'subject'}
],
'LocalSecondaryIndexes': [],
'ItemCount': 0, 'CreationDateTime': 1326499200.0,
'GlobalSecondaryIndexes': [],
}
}
table.describe().should.equal(expected)
@requires_boto_gte("2.9")
@mock_dynamodb2
@freeze_time("2012-01-14")
def test_create_table_with_local_index():
table = create_table_with_local_indexes()
expected = {
'Table': {
'AttributeDefinitions': [
{'AttributeName': 'forum_name', 'AttributeType': 'S'},
{'AttributeName': 'subject', 'AttributeType': 'S'},
{'AttributeName': 'threads', 'AttributeType': 'N'}
],
'ProvisionedThroughput': {
'NumberOfDecreasesToday': 0,
'WriteCapacityUnits': 10,
'ReadCapacityUnits': 10,
},
'TableSizeBytes': 0,
'TableName': 'messages',
'TableStatus': 'ACTIVE',
'KeySchema': [
{'KeyType': 'HASH', 'AttributeName': 'forum_name'},
{'KeyType': 'RANGE', 'AttributeName': 'subject'}
],
'LocalSecondaryIndexes': [
{
'IndexName': 'threads_index',
'KeySchema': [
{'AttributeName': 'forum_name', 'KeyType': 'HASH'},
{'AttributeName': 'threads', 'KeyType': 'RANGE'}
],
'Projection': {'ProjectionType': 'ALL'}
}
],
'ItemCount': 0,
'CreationDateTime': 1326499200.0,
'GlobalSecondaryIndexes': [],
}
}
table.describe().should.equal(expected)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_delete_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
table = create_table()
conn.list_tables()["TableNames"].should.have.length_of(1)
table.delete()
conn.list_tables()["TableNames"].should.have.length_of(0)
conn.delete_table.when.called_with('messages').should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_update_table_throughput():
table = create_table()
table.throughput["read"].should.equal(10)
table.throughput["write"].should.equal(10)
table.update(throughput={
'read': 5,
'write': 15,
})
table.throughput["read"].should.equal(5)
table.throughput["write"].should.equal(15)
table.update(throughput={
'read': 5,
'write': 6,
})
table.describe()
table.throughput["read"].should.equal(5)
table.throughput["write"].should.equal(6)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_item_add_and_describe_and_update():
table = create_table()
ok = table.put_item(data={
'forum_name': 'LOLCat Forum',
'subject': 'Check this out!',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
})
ok.should.equal(True)
table.get_item(forum_name="LOLCat Forum", subject='Check this out!').should_not.be.none
returned_item = table.get_item(
forum_name='LOLCat Forum',
subject='Check this out!'
)
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'subject': 'Check this out!',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
})
returned_item['SentBy'] = 'User B'
returned_item.save(overwrite=True)
returned_item = table.get_item(
forum_name='LOLCat Forum',
subject='Check this out!'
)
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'subject': 'Check this out!',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM',
})
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_item_partial_save():
table = create_table()
data = {
'forum_name': 'LOLCat Forum',
'subject': 'The LOLz',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
}
table.put_item(data=data)
returned_item = table.get_item(forum_name="LOLCat Forum", subject='The LOLz')
returned_item['SentBy'] = 'User B'
returned_item.partial_save()
returned_item = table.get_item(
forum_name='LOLCat Forum',
subject='The LOLz'
)
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'subject': 'The LOLz',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
})
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_item_put_without_table():
table = Table('undeclared-table')
item_data = {
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = Item(table, item_data)
item.save.when.called_with().should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_get_missing_item():
table = create_table()
table.get_item.when.called_with(
hash_key='tester',
range_key='other',
).should.throw(ValidationException)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_get_item_with_undeclared_table():
table = Table('undeclared-table')
table.get_item.when.called_with(test_hash=3241526475).should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_get_item_without_range_key():
table = Table.create('messages', schema=[
HashKey('test_hash'),
RangeKey('test_range'),
], throughput={
'read': 10,
'write': 10,
})
hash_key = 3241526475
range_key = 1234567890987
table.put_item(data={'test_hash': hash_key, 'test_range': range_key})
table.get_item.when.called_with(test_hash=hash_key).should.throw(ValidationException)
@requires_boto_gte("2.30.0")
@mock_dynamodb2
def test_delete_item():
table = create_table()
item_data = {
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = Item(table, item_data)
item['subject'] = 'Check this out!'
item.save()
table.count().should.equal(1)
response = item.delete()
response.should.equal(True)
table.count().should.equal(0)
item.delete().should.equal(False)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_delete_item_with_undeclared_table():
table = Table("undeclared-table")
item_data = {
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = Item(table, item_data)
item.delete.when.called_with().should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_query():
table = create_table()
item_data = {
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
'subject': 'Check this out!'
}
item = Item(table, item_data)
item.save(overwrite=True)
item['forum_name'] = 'the-key'
item['subject'] = '456'
item.save(overwrite=True)
item['forum_name'] = 'the-key'
item['subject'] = '123'
item.save(overwrite=True)
item['forum_name'] = 'the-key'
item['subject'] = '789'
item.save(overwrite=True)
table.count().should.equal(4)
results = table.query_2(forum_name__eq='the-key', subject__gt='1', consistent=True)
expected = ["123", "456", "789"]
for index, item in enumerate(results):
item["subject"].should.equal(expected[index])
results = table.query_2(forum_name__eq="the-key", subject__gt='1', reverse=True)
for index, item in enumerate(results):
item["subject"].should.equal(expected[len(expected) - 1 - index])
results = table.query_2(forum_name__eq='the-key', subject__gt='1', consistent=True)
sum(1 for _ in results).should.equal(3)
results = table.query_2(forum_name__eq='the-key', subject__gt='234', consistent=True)
sum(1 for _ in results).should.equal(2)
results = table.query_2(forum_name__eq='the-key', subject__gt='9999')
sum(1 for _ in results).should.equal(0)
results = table.query_2(forum_name__eq='the-key', subject__beginswith='12')
sum(1 for _ in results).should.equal(1)
results = table.query_2(forum_name__eq='the-key', subject__beginswith='7')
sum(1 for _ in results).should.equal(1)
results = table.query_2(forum_name__eq='the-key', subject__between=['567', '890'])
sum(1 for _ in results).should.equal(1)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_query_with_undeclared_table():
table = Table('undeclared')
results = table.query(
forum_name__eq='Amazon DynamoDB',
subject__beginswith='DynamoDB',
limit=1
)
iterate_results.when.called_with(results).should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_scan():
table = create_table()
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item_data['forum_name'] = 'the-key'
item_data['subject'] = '456'
item = Item(table, item_data)
item.save()
item['forum_name'] = 'the-key'
item['subject'] = '123'
item.save()
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:09 PM',
'Ids': set([1, 2, 3]),
'PK': 7,
}
item_data['forum_name'] = 'the-key'
item_data['subject'] = '789'
item = Item(table, item_data)
item.save()
results = table.scan()
sum(1 for _ in results).should.equal(3)
results = table.scan(SentBy__eq='User B')
sum(1 for _ in results).should.equal(1)
results = table.scan(Body__beginswith='http')
sum(1 for _ in results).should.equal(3)
results = table.scan(Ids__null=False)
sum(1 for _ in results).should.equal(1)
results = table.scan(Ids__null=True)
sum(1 for _ in results).should.equal(2)
results = table.scan(PK__between=[8, 9])
sum(1 for _ in results).should.equal(0)
results = table.scan(PK__between=[5, 8])
sum(1 for _ in results).should.equal(1)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_scan_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
conn.scan.when.called_with(
table_name='undeclared-table',
scan_filter={
"SentBy": {
"AttributeValueList": [{
"S": "User B"}
],
"ComparisonOperator": "EQ"
}
},
).should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_write_batch():
table = create_table()
with table.batch_write() as batch:
batch.put_item(data={
'forum_name': 'the-key',
'subject': '123',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
})
batch.put_item(data={
'forum_name': 'the-key',
'subject': '789',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM',
})
table.count().should.equal(2)
with table.batch_write() as batch:
batch.delete_item(
forum_name='the-key',
subject='789'
)
table.count().should.equal(1)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_batch_read():
table = create_table()
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item_data['forum_name'] = 'the-key'
item_data['subject'] = '456'
item = Item(table, item_data)
item.save()
item = Item(table, item_data)
item_data['forum_name'] = 'the-key'
item_data['subject'] = '123'
item.save()
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': set([1, 2, 3]),
'PK': 7,
}
item = Item(table, item_data)
item_data['forum_name'] = 'another-key'
item_data['subject'] = '789'
item.save()
results = table.batch_get(
keys=[
{'forum_name': 'the-key', 'subject': '123'},
{'forum_name': 'another-key', 'subject': '789'},
]
)
# Iterate through so that batch_item gets called
count = len([x for x in results])
count.should.equal(2)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_get_key_fields():
table = create_table()
kf = table.get_key_fields()
kf.should.equal(['forum_name', 'subject'])
@mock_dynamodb2
def test_create_with_global_indexes():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
Table.create('messages', schema=[
HashKey('subject'),
RangeKey('version'),
], global_indexes=[
GlobalAllIndex('topic-created_at-index',
parts=[
HashKey('topic'),
RangeKey('created_at', data_type='N')
],
throughput={
'read': 6,
'write': 1
}
),
])
table_description = conn.describe_table("messages")
table_description['Table']["GlobalSecondaryIndexes"].should.equal([
{
"IndexName": "topic-created_at-index",
"KeySchema": [
{
"AttributeName": "topic",
"KeyType": "HASH"
},
{
"AttributeName": "created_at",
"KeyType": "RANGE"
},
],
"Projection": {
"ProjectionType": "ALL"
},
"ProvisionedThroughput": {
"ReadCapacityUnits": 6,
"WriteCapacityUnits": 1,
}
}
])
@mock_dynamodb2
def test_query_with_global_indexes():
table = Table.create('messages', schema=[
HashKey('subject'),
RangeKey('version'),
], global_indexes=[
GlobalAllIndex('topic-created_at-index',
parts=[
HashKey('topic'),
RangeKey('created_at', data_type='N')
],
throughput={
'read': 6,
'write': 1
}
),
GlobalAllIndex('status-created_at-index',
parts=[
HashKey('status'),
RangeKey('created_at', data_type='N')
],
throughput={
'read': 2,
'write': 1
}
)
])
item_data = {
'subject': 'Check this out!',
'version': '1',
'created_at': 0,
'status': 'inactive'
}
item = Item(table, item_data)
item.save(overwrite=True)
item['version'] = '2'
item.save(overwrite=True)
results = table.query(status__eq='active')
list(results).should.have.length_of(0)
@mock_dynamodb2
def test_query_with_local_indexes():
table = create_table_with_local_indexes()
item_data = {
'forum_name': 'Cool Forum',
'subject': 'Check this out!',
'version': '1',
'threads': 1,
'status': 'inactive'
}
item = Item(table, item_data)
item.save(overwrite=True)
item['version'] = '2'
item.save(overwrite=True)
results = table.query(forum_name__eq='Cool Forum', index='threads_index', threads__eq=1)
list(results).should.have.length_of(1)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_query_filter_eq():
table = create_table_with_local_indexes()
item_data = [
{
'forum_name': 'Cool Forum',
'subject': 'Check this out!',
'version': '1',
'threads': 1,
},
{
'forum_name': 'Cool Forum',
'subject': 'Read this now!',
'version': '1',
'threads': 5,
},
{
'forum_name': 'Cool Forum',
'subject': 'Please read this... please',
'version': '1',
'threads': 0,
}
]
for data in item_data:
item = Item(table, data)
item.save(overwrite=True)
results = table.query_2(
forum_name__eq='Cool Forum', index='threads_index', threads__eq=5
)
list(results).should.have.length_of(1)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_query_filter_lt():
table = create_table_with_local_indexes()
item_data = [
{
'forum_name': 'Cool Forum',
'subject': 'Check this out!',
'version': '1',
'threads': 1,
},
{
'forum_name': 'Cool Forum',
'subject': 'Read this now!',
'version': '1',
'threads': 5,
},
{
'forum_name': 'Cool Forum',
'subject': 'Please read this... please',
'version': '1',
'threads': 0,
}
]
for data in item_data:
item = Item(table, data)
item.save(overwrite=True)
results = table.query(
forum_name__eq='Cool Forum', index='threads_index', threads__lt=5
)
results = list(results)
results.should.have.length_of(2)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_query_filter_gt():
table = create_table_with_local_indexes()
item_data = [
{
'forum_name': 'Cool Forum',
'subject': 'Check this out!',
'version': '1',
'threads': 1,
},
{
'forum_name': 'Cool Forum',
'subject': 'Read this now!',
'version': '1',
'threads': 5,
},
{
'forum_name': 'Cool Forum',
'subject': 'Please read this... please',
'version': '1',
'threads': 0,
}
]
for data in item_data:
item = Item(table, data)
item.save(overwrite=True)
results = table.query(
forum_name__eq='Cool Forum', index='threads_index', threads__gt=1
)
list(results).should.have.length_of(1)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_query_filter_lte():
table = create_table_with_local_indexes()
item_data = [
{
'forum_name': 'Cool Forum',
'subject': 'Check this out!',
'version': '1',
'threads': 1,
},
{
'forum_name': 'Cool Forum',
'subject': 'Read this now!',
'version': '1',
'threads': 5,
},
{
'forum_name': 'Cool Forum',
'subject': 'Please read this... please',
'version': '1',
'threads': 0,
}
]
for data in item_data:
item = Item(table, data)
item.save(overwrite=True)
results = table.query(
forum_name__eq='Cool Forum', index='threads_index', threads__lte=5
)
list(results).should.have.length_of(3)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_query_filter_gte():
table = create_table_with_local_indexes()
item_data = [
{
'forum_name': 'Cool Forum',
'subject': 'Check this out!',
'version': '1',
'threads': 1,
},
{
'forum_name': 'Cool Forum',
'subject': 'Read this now!',
'version': '1',
'threads': 5,
},
{
'forum_name': 'Cool Forum',
'subject': 'Please read this... please',
'version': '1',
'threads': 0,
}
]
for data in item_data:
item = Item(table, data)
item.save(overwrite=True)
results = table.query(
forum_name__eq='Cool Forum', index='threads_index', threads__gte=1
)
list(results).should.have.length_of(2)
@mock_dynamodb2
def test_reverse_query():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
table = Table.create('messages', schema=[
HashKey('subject'),
RangeKey('created_at', data_type='N')
])
for i in range(10):
table.put_item({
'subject': "Hi",
'created_at': i
})
results = table.query_2(subject__eq="Hi",
created_at__lt=6,
limit=4,
reverse=True)
expected = [Decimal(5), Decimal(4), Decimal(3), Decimal(2)]
[r['created_at'] for r in results].should.equal(expected)
@mock_dynamodb2
def test_lookup():
from decimal import Decimal
table = Table.create('messages', schema=[
HashKey('test_hash'),
RangeKey('test_range'),
], throughput={
'read': 10,
'write': 10,
})
hash_key = 3241526475
range_key = 1234567890987
data = {'test_hash': hash_key, 'test_range': range_key}
table.put_item(data=data)
message = table.lookup(hash_key, range_key)
message.get('test_hash').should.equal(Decimal(hash_key))
message.get('test_range').should.equal(Decimal(range_key))
@mock_dynamodb2
def test_failed_overwrite():
table = Table.create('messages', schema=[
HashKey('id'),
RangeKey('range'),
], throughput={
'read': 7,
'write': 3,
})
data1 = {'id': '123', 'range': 'abc', 'data': '678'}
table.put_item(data=data1)
data2 = {'id': '123', 'range': 'abc', 'data': '345'}
table.put_item(data=data2, overwrite=True)
data3 = {'id': '123', 'range': 'abc', 'data': '812'}
table.put_item.when.called_with(data=data3).should.throw(ConditionalCheckFailedException)
returned_item = table.lookup('123', 'abc')
dict(returned_item).should.equal(data2)
data4 = {'id': '123', 'range': 'ghi', 'data': 812}
table.put_item(data=data4)
returned_item = table.lookup('123', 'ghi')
dict(returned_item).should.equal(data4)
@mock_dynamodb2
def test_conflicting_writes():
table = Table.create('messages', schema=[
HashKey('id'),
RangeKey('range'),
])
item_data = {'id': '123', 'range': 'abc', 'data': '678'}
item1 = Item(table, item_data)
item2 = Item(table, item_data)
item1.save()
item1['data'] = '579'
item2['data'] = '912'
item1.save()
item2.save.when.called_with().should.throw(ConditionalCheckFailedException)
"""
boto3
"""
@mock_dynamodb2
def test_boto3_conditions():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='users',
KeySchema=[
{
'AttributeName': 'forum_name',
'KeyType': 'HASH'
},
{
'AttributeName': 'subject',
'KeyType': 'RANGE'
},
],
AttributeDefinitions=[
{
'AttributeName': 'forum_name',
'AttributeType': 'S'
},
{
'AttributeName': 'subject',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
table = dynamodb.Table('users')
table.put_item(Item={
'forum_name': 'the-key',
'subject': '123'
})
table.put_item(Item={
'forum_name': 'the-key',
'subject': '456'
})
table.put_item(Item={
'forum_name': 'the-key',
'subject': '789'
})
# Test a query returning all items
results = table.query(
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('1'),
ScanIndexForward=True,
)
expected = ["123", "456", "789"]
for index, item in enumerate(results['Items']):
item["subject"].should.equal(expected[index])
# Return all items again, but in reverse
results = table.query(
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('1'),
ScanIndexForward=False,
)
for index, item in enumerate(reversed(results['Items'])):
item["subject"].should.equal(expected[index])
# Filter the subjects to only return some of the results
results = table.query(
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('234'),
ConsistentRead=True,
)
results['Count'].should.equal(2)
# Filter to return no results
results = table.query(
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('9999')
)
results['Count'].should.equal(0)
results = table.query(
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").begins_with('12')
)
results['Count'].should.equal(1)
results = table.query(
KeyConditionExpression=Key("subject").begins_with('7') & Key('forum_name').eq('the-key')
)
results['Count'].should.equal(1)
results = table.query(
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").between('567', '890')
)
results['Count'].should.equal(1)
@mock_dynamodb2
def test_boto3_put_item_with_conditions():
import botocore
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='users',
KeySchema=[
{
'AttributeName': 'forum_name',
'KeyType': 'HASH'
},
{
'AttributeName': 'subject',
'KeyType': 'RANGE'
},
],
AttributeDefinitions=[
{
'AttributeName': 'forum_name',
'AttributeType': 'S'
},
{
'AttributeName': 'subject',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
table = dynamodb.Table('users')
table.put_item(Item={
'forum_name': 'the-key',
'subject': '123'
})
table.put_item(
Item={
'forum_name': 'the-key-2',
'subject': '1234',
},
ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)'
)
table.put_item.when.called_with(
Item={
'forum_name': 'the-key',
'subject': '123'
},
ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)'
).should.throw(botocore.exceptions.ClientError)
table.put_item.when.called_with(
Item={
'forum_name': 'bogus-key',
'subject': 'bogus',
'test': '123'
},
ConditionExpression='attribute_exists(forum_name) AND attribute_exists(subject)'
).should.throw(botocore.exceptions.ClientError)
def _create_table_with_range_key():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='users',
KeySchema=[
{
'AttributeName': 'forum_name',
'KeyType': 'HASH'
},
{
'AttributeName': 'subject',
'KeyType': 'RANGE'
},
],
GlobalSecondaryIndexes=[{
'IndexName': 'TestGSI',
'KeySchema': [
{
'AttributeName': 'username',
'KeyType': 'HASH',
},
{
'AttributeName': 'created',
'KeyType': 'RANGE',
}
],
'Projection': {
'ProjectionType': 'ALL',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
}],
AttributeDefinitions=[
{
'AttributeName': 'forum_name',
'AttributeType': 'S'
},
{
'AttributeName': 'subject',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
return dynamodb.Table('users')
@mock_dynamodb2
def test_update_item_range_key_set():
table = _create_table_with_range_key()
table.put_item(Item={
'forum_name': 'the-key',
'subject': '123',
'username': 'johndoe',
'created': Decimal('3'),
})
item_key = {'forum_name': 'the-key', 'subject': '123'}
table.update_item(
Key=item_key,
AttributeUpdates={
'username': {
'Action': u'PUT',
'Value': 'johndoe2'
},
'created': {
'Action': u'PUT',
'Value': Decimal('4'),
},
'mapfield': {
'Action': u'PUT',
'Value': {'key': 'value'},
}
},
)
returned_item = dict((k, str(v) if isinstance(v, Decimal) else v)
for k, v in table.get_item(Key=item_key)['Item'].items())
dict(returned_item).should.equal({
'username': "johndoe2",
'forum_name': 'the-key',
'subject': '123',
'created': '4',
'mapfield': {'key': 'value'},
})
@mock_dynamodb2
def test_update_item_does_not_exist_is_created():
table = _create_table_with_range_key()
item_key = {'forum_name': 'the-key', 'subject': '123'}
result = table.update_item(
Key=item_key,
AttributeUpdates={
'username': {
'Action': u'PUT',
'Value': 'johndoe2'
},
'created': {
'Action': u'PUT',
'Value': Decimal('4'),
},
'mapfield': {
'Action': u'PUT',
'Value': {'key': 'value'},
}
},
ReturnValues='ALL_OLD',
)
assert not result.get('Attributes')
returned_item = dict((k, str(v) if isinstance(v, Decimal) else v)
for k, v in table.get_item(Key=item_key)['Item'].items())
dict(returned_item).should.equal({
'username': "johndoe2",
'forum_name': 'the-key',
'subject': '123',
'created': '4',
'mapfield': {'key': 'value'},
})
@mock_dynamodb2
def test_update_item_add_value():
table = _create_table_with_range_key()
table.put_item(Item={
'forum_name': 'the-key',
'subject': '123',
'numeric_field': Decimal('-1'),
})
item_key = {'forum_name': 'the-key', 'subject': '123'}
table.update_item(
Key=item_key,
AttributeUpdates={
'numeric_field': {
'Action': u'ADD',
'Value': Decimal('2'),
},
},
)
returned_item = dict((k, str(v) if isinstance(v, Decimal) else v)
for k, v in table.get_item(Key=item_key)['Item'].items())
dict(returned_item).should.equal({
'numeric_field': '1',
'forum_name': 'the-key',
'subject': '123',
})
@mock_dynamodb2
def test_update_item_add_value_does_not_exist_is_created():
table = _create_table_with_range_key()
item_key = {'forum_name': 'the-key', 'subject': '123'}
table.update_item(
Key=item_key,
AttributeUpdates={
'numeric_field': {
'Action': u'ADD',
'Value': Decimal('2'),
},
},
)
returned_item = dict((k, str(v) if isinstance(v, Decimal) else v)
for k, v in table.get_item(Key=item_key)['Item'].items())
dict(returned_item).should.equal({
'numeric_field': '2',
'forum_name': 'the-key',
'subject': '123',
})
@mock_dynamodb2
def test_update_item_with_expression():
table = _create_table_with_range_key()
table.put_item(Item={
'forum_name': 'the-key',
'subject': '123',
'field': '1'
})
item_key = {'forum_name': 'the-key', 'subject': '123'}
table.update_item(
Key=item_key,
UpdateExpression='SET field=2',
)
dict(table.get_item(Key=item_key)['Item']).should.equal({
'field': '2',
'forum_name': 'the-key',
'subject': '123',
})
table.update_item(
Key=item_key,
UpdateExpression='SET field = 3',
)
dict(table.get_item(Key=item_key)['Item']).should.equal({
'field': '3',
'forum_name': 'the-key',
'subject': '123',
})
@mock_dynamodb2
def test_boto3_query_gsi_range_comparison():
table = _create_table_with_range_key()
table.put_item(Item={
'forum_name': 'the-key',
'subject': '123',
'username': 'johndoe',
'created': 3,
})
table.put_item(Item={
'forum_name': 'the-key',
'subject': '456',
'username': 'johndoe',
'created': 1,
})
table.put_item(Item={
'forum_name': 'the-key',
'subject': '789',
'username': 'johndoe',
'created': 2,
})
table.put_item(Item={
'forum_name': 'the-key',
'subject': '159',
'username': 'janedoe',
'created': 2,
})
table.put_item(Item={
'forum_name': 'the-key',
'subject': '601',
'username': 'janedoe',
'created': 5,
})
# Test a query returning all johndoe items
results = table.query(
KeyConditionExpression=Key('username').eq('johndoe') & Key("created").gt(0),
ScanIndexForward=True,
IndexName='TestGSI',
)
expected = ["456", "789", "123"]
for index, item in enumerate(results['Items']):
item["subject"].should.equal(expected[index])
# Return all johndoe items again, but in reverse
results = table.query(
KeyConditionExpression=Key('username').eq('johndoe') & Key("created").gt(0),
ScanIndexForward=False,
IndexName='TestGSI',
)
for index, item in enumerate(reversed(results['Items'])):
item["subject"].should.equal(expected[index])
# Filter the creation to only return some of the results
# And reverse order of hash + range key
results = table.query(
KeyConditionExpression=Key("created").gt(1) & Key('username').eq('johndoe'),
ConsistentRead=True,
IndexName='TestGSI',
)
results['Count'].should.equal(2)
# Filter to return no results
results = table.query(
KeyConditionExpression=Key('username').eq('janedoe') & Key("created").gt(9),
IndexName='TestGSI',
)
results['Count'].should.equal(0)
results = table.query(
KeyConditionExpression=Key('username').eq('janedoe') & Key("created").eq(5),
IndexName='TestGSI',
)
results['Count'].should.equal(1)
# Test range key sorting
results = table.query(
KeyConditionExpression=Key('username').eq('johndoe') & Key("created").gt(0),
IndexName='TestGSI',
)
expected = [Decimal('1'), Decimal('2'), Decimal('3')]
for index, item in enumerate(results['Items']):
item["created"].should.equal(expected[index])
@mock_dynamodb2
def test_boto3_update_table_throughput():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='users',
KeySchema=[
{
'AttributeName': 'forum_name',
'KeyType': 'HASH'
},
{
'AttributeName': 'subject',
'KeyType': 'RANGE'
},
],
AttributeDefinitions=[
{
'AttributeName': 'forum_name',
'AttributeType': 'S'
},
{
'AttributeName': 'subject',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 6
}
)
table = dynamodb.Table('users')
table.provisioned_throughput['ReadCapacityUnits'].should.equal(5)
table.provisioned_throughput['WriteCapacityUnits'].should.equal(6)
table.update(ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 11,
})
table = dynamodb.Table('users')
table.provisioned_throughput['ReadCapacityUnits'].should.equal(10)
table.provisioned_throughput['WriteCapacityUnits'].should.equal(11)
@mock_dynamodb2
def test_boto3_update_table_gsi_throughput():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='users',
KeySchema=[
{
'AttributeName': 'forum_name',
'KeyType': 'HASH'
},
{
'AttributeName': 'subject',
'KeyType': 'RANGE'
},
],
GlobalSecondaryIndexes=[{
'IndexName': 'TestGSI',
'KeySchema': [
{
'AttributeName': 'username',
'KeyType': 'HASH',
},
{
'AttributeName': 'created',
'KeyType': 'RANGE',
}
],
'Projection': {
'ProjectionType': 'ALL',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 3,
'WriteCapacityUnits': 4
}
}],
AttributeDefinitions=[
{
'AttributeName': 'forum_name',
'AttributeType': 'S'
},
{
'AttributeName': 'subject',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 6
}
)
table = dynamodb.Table('users')
gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput']
gsi_throughput['ReadCapacityUnits'].should.equal(3)
gsi_throughput['WriteCapacityUnits'].should.equal(4)
table.provisioned_throughput['ReadCapacityUnits'].should.equal(5)
table.provisioned_throughput['WriteCapacityUnits'].should.equal(6)
table.update(GlobalSecondaryIndexUpdates=[{
'Update': {
'IndexName': 'TestGSI',
'ProvisionedThroughput': {
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 11,
}
},
}])
table = dynamodb.Table('users')
# Primary throughput has not changed
table.provisioned_throughput['ReadCapacityUnits'].should.equal(5)
table.provisioned_throughput['WriteCapacityUnits'].should.equal(6)
gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput']
gsi_throughput['ReadCapacityUnits'].should.equal(10)
gsi_throughput['WriteCapacityUnits'].should.equal(11)
@mock_dynamodb2
def test_update_table_gsi_create():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='users',
KeySchema=[
{
'AttributeName': 'forum_name',
'KeyType': 'HASH'
},
{
'AttributeName': 'subject',
'KeyType': 'RANGE'
},
],
AttributeDefinitions=[
{
'AttributeName': 'forum_name',
'AttributeType': 'S'
},
{
'AttributeName': 'subject',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 6
}
)
table = dynamodb.Table('users')
table.global_secondary_indexes.should.have.length_of(0)
table.update(GlobalSecondaryIndexUpdates=[{
'Create': {
'IndexName': 'TestGSI',
'KeySchema': [
{
'AttributeName': 'username',
'KeyType': 'HASH',
},
{
'AttributeName': 'created',
'KeyType': 'RANGE',
}
],
'Projection': {
'ProjectionType': 'ALL',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 3,
'WriteCapacityUnits': 4
}
},
}])
table = dynamodb.Table('users')
table.global_secondary_indexes.should.have.length_of(1)
gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput']
assert gsi_throughput['ReadCapacityUnits'].should.equal(3)
assert gsi_throughput['WriteCapacityUnits'].should.equal(4)
# Check update works
table.update(GlobalSecondaryIndexUpdates=[{
'Update': {
'IndexName': 'TestGSI',
'ProvisionedThroughput': {
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 11,
}
},
}])
table = dynamodb.Table('users')
gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput']
assert gsi_throughput['ReadCapacityUnits'].should.equal(10)
assert gsi_throughput['WriteCapacityUnits'].should.equal(11)
table.update(GlobalSecondaryIndexUpdates=[{
'Delete': {
'IndexName': 'TestGSI',
},
}])
table = dynamodb.Table('users')
table.global_secondary_indexes.should.have.length_of(0)
@mock_dynamodb2
def test_update_table_gsi_throughput():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='users',
KeySchema=[
{
'AttributeName': 'forum_name',
'KeyType': 'HASH'
},
{
'AttributeName': 'subject',
'KeyType': 'RANGE'
},
],
GlobalSecondaryIndexes=[{
'IndexName': 'TestGSI',
'KeySchema': [
{
'AttributeName': 'username',
'KeyType': 'HASH',
},
{
'AttributeName': 'created',
'KeyType': 'RANGE',
}
],
'Projection': {
'ProjectionType': 'ALL',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 3,
'WriteCapacityUnits': 4
}
}],
AttributeDefinitions=[
{
'AttributeName': 'forum_name',
'AttributeType': 'S'
},
{
'AttributeName': 'subject',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 6
}
)
table = dynamodb.Table('users')
table.global_secondary_indexes.should.have.length_of(1)
table.update(GlobalSecondaryIndexUpdates=[{
'Delete': {
'IndexName': 'TestGSI',
},
}])
table = dynamodb.Table('users')
table.global_secondary_indexes.should.have.length_of(0)
@mock_dynamodb2
def test_query_pagination():
table = _create_table_with_range_key()
for i in range(10):
table.put_item(Item={
'forum_name': 'the-key',
'subject': '{0}'.format(i),
'username': 'johndoe',
'created': Decimal('3'),
})
page1 = table.query(
KeyConditionExpression=Key('forum_name').eq('the-key'),
Limit=6
)
page1['Count'].should.equal(6)
page1['Items'].should.have.length_of(6)
page1.should.have.key('LastEvaluatedKey')
page2 = table.query(
KeyConditionExpression=Key('forum_name').eq('the-key'),
Limit=6,
ExclusiveStartKey=page1['LastEvaluatedKey']
)
page2['Count'].should.equal(4)
page2['Items'].should.have.length_of(4)
page2.should_not.have.key('LastEvaluatedKey')
results = page1['Items'] + page2['Items']
subjects = set([int(r['subject']) for r in results])
subjects.should.equal(set(range(10)))
| silveregg/moto | tests/test_dynamodb2/test_dynamodb_table_with_range_key.py | Python | apache-2.0 | 47,256 | 0.000825 |
# -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
import GemRB
import LoadScreen
from GUIDefines import *
StartWindow = 0
ProtocolWindow = 0
QuitWindow = 0
QuickLoadSlot = 0
def OnLoad():
global StartWindow, QuickLoadSlot
screen_width = GemRB.GetSystemVariable (SV_WIDTH)
screen_height = GemRB.GetSystemVariable (SV_HEIGHT)
if screen_width == 1024:
GemRB.LoadWindowFrame("STON10L", "STON10R", "STON10T", "STON10B")
GemRB.LoadWindowPack("GUICONN", 800, 600)
#main window
StartWindow = GemRB.LoadWindow(0)
StartWindow.SetFrame ()
ProtocolButton = StartWindow.GetControl(0x00)
NewGameButton = StartWindow.GetControl(0x02)
LoadGameButton = StartWindow.GetControl(0x07)
QuickLoadButton = StartWindow.GetControl(0x03)
JoinGameButton = StartWindow.GetControl(0x0B)
OptionsButton = StartWindow.GetControl(0x08)
QuitGameButton = StartWindow.GetControl(0x01)
StartWindow.CreateLabel(0x0fff0000, 0,0,800,30, "REALMS2", "", IE_FONT_SINGLE_LINE | IE_FONT_ALIGN_CENTER)
VersionLabel = StartWindow.GetControl(0x0fff0000)
VersionLabel.SetText(GEMRB_VERSION)
ProtocolButton.SetStatus(IE_GUI_BUTTON_ENABLED)
NewGameButton.SetStatus(IE_GUI_BUTTON_ENABLED)
LoadGameButton.SetStatus(IE_GUI_BUTTON_ENABLED)
GemRB.SetVar("SaveDir",1)
Games=GemRB.GetSaveGames()
#looking for the quicksave
EnableQuickLoad = IE_GUI_BUTTON_DISABLED
for Game in Games:
Slotname = Game.GetSaveID()
# quick save is 1
if Slotname == 1:
EnableQuickLoad = IE_GUI_BUTTON_ENABLED
QuickLoadSlot = Game
break
QuickLoadButton.SetStatus(EnableQuickLoad)
JoinGameButton.SetStatus(IE_GUI_BUTTON_DISABLED)
OptionsButton.SetStatus(IE_GUI_BUTTON_ENABLED)
QuitGameButton.SetStatus(IE_GUI_BUTTON_ENABLED)
LastProtocol = GemRB.GetVar("Last Protocol Used")
if LastProtocol == 0:
ProtocolButton.SetText(15413)
elif LastProtocol == 1:
ProtocolButton.SetText(13967)
elif LastProtocol == 2:
ProtocolButton.SetText(13968)
NewGameButton.SetText(13963)
LoadGameButton.SetText(13729)
QuickLoadButton.SetText(33508)
JoinGameButton.SetText(13964)
OptionsButton.SetText(13905)
QuitGameButton.SetText(13731)
QuitGameButton.SetFlags(IE_GUI_BUTTON_CANCEL, OP_OR)
NewGameButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NewGamePress)
QuitGameButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, QuitPress)
ProtocolButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, ProtocolPress)
OptionsButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, OptionsPress)
LoadGameButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, LoadPress)
QuickLoadButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, QuickLoadPress)
StartWindow.SetVisible(WINDOW_VISIBLE)
GemRB.LoadMusicPL("Theme.mus")
return
def ProtocolPress():
global StartWindow, ProtocolWindow
#StartWindow.Unload()
StartWindow.SetVisible(WINDOW_INVISIBLE)
ProtocolWindow = GemRB.LoadWindow(1)
#Disabling Unused Buttons in this Window
Button = ProtocolWindow.GetControl(2)
Button.SetState(IE_GUI_BUTTON_DISABLED)
Button.SetFlags(IE_GUI_BUTTON_NO_IMAGE, OP_OR)
Button = ProtocolWindow.GetControl(3)
Button.SetState(IE_GUI_BUTTON_DISABLED)
Button.SetFlags(IE_GUI_BUTTON_NO_IMAGE, OP_OR)
Button = ProtocolWindow.GetControl(9)
Button.SetState(IE_GUI_BUTTON_DISABLED)
Button.SetFlags(IE_GUI_BUTTON_NO_IMAGE, OP_OR)
SinglePlayerButton = ProtocolWindow.GetControl(10)
SinglePlayerButton.SetFlags(IE_GUI_BUTTON_RADIOBUTTON,OP_OR)
SinglePlayerButton.SetText(15413)
IPXButton = ProtocolWindow.GetControl(0)
IPXButton.SetFlags(IE_GUI_BUTTON_RADIOBUTTON,OP_OR)
IPXButton.SetText(13967)
TCPIPButton = ProtocolWindow.GetControl(1)
TCPIPButton.SetFlags(IE_GUI_BUTTON_RADIOBUTTON,OP_OR)
TCPIPButton.SetText(13968)
SinglePlayerButton.SetVarAssoc("Last Protocol Used", 0)
IPXButton.SetVarAssoc("Last Protocol Used", 1)
TCPIPButton.SetVarAssoc("Last Protocol Used", 2)
TextArea = ProtocolWindow.GetControl(7)
TextArea.SetText(11316)
DoneButton = ProtocolWindow.GetControl(6)
DoneButton.SetText(11973)
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, ProtocolDonePress)
DoneButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
ProtocolWindow.SetVisible(WINDOW_VISIBLE)
return
def ProtocolDonePress():
global StartWindow, ProtocolWindow
if ProtocolWindow:
ProtocolWindow.Unload()
ProtocolButton = StartWindow.GetControl(0x00)
LastProtocol = GemRB.GetVar("Last Protocol Used")
if LastProtocol == 0:
ProtocolButton.SetText(15413)
elif LastProtocol == 1:
ProtocolButton.SetText(13967)
elif LastProtocol == 2:
ProtocolButton.SetText(13968)
StartWindow.SetVisible(WINDOW_VISIBLE)
return
def LoadPress():
global StartWindow
if StartWindow:
StartWindow.Unload()
GemRB.SetNextScript("GUILOAD")
return
def QuickLoadPress():
global StartWindow, QuickLoadSlot
LoadScreen.StartLoadScreen()
GemRB.LoadGame(QuickLoadSlot) # load & start game
GemRB.EnterGame()
return
def OptionsPress():
global StartWindow
if StartWindow:
StartWindow.Unload()
GemRB.SetNextScript("Options")
return
def QuitPress():
global StartWindow, QuitWindow
StartWindow.SetVisible(WINDOW_INVISIBLE)
QuitWindow = GemRB.LoadWindow(22)
CancelButton = QuitWindow.GetControl(2)
CancelButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, QuitCancelPress)
CancelButton.SetFlags(IE_GUI_BUTTON_CANCEL, OP_OR)
QuitButton = QuitWindow.GetControl(1)
QuitButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, QuitQuitPress)
QuitButton.SetFlags(IE_GUI_BUTTON_DEFAULT, OP_OR)
TextArea = QuitWindow.GetControl(0)
CancelButton.SetText(13727)
QuitButton.SetText(15417)
TextArea.SetText(19532)
QuitWindow.SetVisible(WINDOW_VISIBLE)
return
def NewGamePress():
global StartWindow
if StartWindow:
StartWindow.Unload()
GemRB.SetNextScript("SPParty")
return
def QuitCancelPress():
global StartWindow, QuitWindow
if QuitWindow:
QuitWindow.Unload()
StartWindow.SetVisible(WINDOW_VISIBLE)
return
def QuitQuitPress():
GemRB.Quit()
return
| flaing/gemrb | gemrb/GUIScripts/iwd2/Start.py | Python | gpl-2.0 | 6,550 | 0.027176 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# === This file is part of Calamares - <https://calamares.io> ===
#
# SPDX-FileCopyrightText: 2014 Aurélien Gâteau <[email protected]>
# SPDX-FileCopyrightText: 2014 Anke Boersma <[email protected]>
# SPDX-FileCopyrightText: 2014 Daniel Hillenbrand <[email protected]>
# SPDX-FileCopyrightText: 2014 Benjamin Vaudour <[email protected]>
# SPDX-FileCopyrightText: 2014-2019 Kevin Kofler <[email protected]>
# SPDX-FileCopyrightText: 2015-2018 Philip Mueller <[email protected]>
# SPDX-FileCopyrightText: 2016-2017 Teo Mrnjavac <[email protected]>
# SPDX-FileCopyrightText: 2017 Alf Gaida <[email protected]>
# SPDX-FileCopyrightText: 2017-2019 Adriaan de Groot <[email protected]>
# SPDX-FileCopyrightText: 2017 Gabriel Craciunescu <[email protected]>
# SPDX-FileCopyrightText: 2017 Ben Green <[email protected]>
# SPDX-FileCopyrightText: 2021 Neal Gompa <[email protected]>
# SPDX-License-Identifier: GPL-3.0-or-later
#
# Calamares is Free Software: see the License-Identifier above.
#
import os
import shutil
import subprocess
import libcalamares
from libcalamares.utils import check_target_env_call
import gettext
_ = gettext.translation("calamares-python",
localedir=libcalamares.utils.gettext_path(),
languages=libcalamares.utils.gettext_languages(),
fallback=True).gettext
# This is the sanitizer used all over to tidy up filenames
# to make identifiers (or to clean up names to make filenames).
file_name_sanitizer = str.maketrans(" /()", "_-__")
def pretty_name():
return _("Install bootloader.")
def get_uuid():
"""
Checks and passes 'uuid' to other routine.
:return:
"""
partitions = libcalamares.globalstorage.value("partitions")
for partition in partitions:
if partition["mountPoint"] == "/":
libcalamares.utils.debug("Root partition uuid: \"{!s}\"".format(partition["uuid"]))
return partition["uuid"]
return ""
def get_bootloader_entry_name():
"""
Passes 'bootloader_entry_name' to other routine based
on configuration file.
:return:
"""
if "bootloaderEntryName" in libcalamares.job.configuration:
return libcalamares.job.configuration["bootloaderEntryName"]
else:
branding = libcalamares.globalstorage.value("branding")
return branding["bootloaderEntryName"]
def get_kernel_line(kernel_type):
"""
Passes 'kernel_line' to other routine based on configuration file.
:param kernel_type:
:return:
"""
if kernel_type == "fallback":
if "fallbackKernelLine" in libcalamares.job.configuration:
return libcalamares.job.configuration["fallbackKernelLine"]
else:
return " (fallback)"
else:
if "kernelLine" in libcalamares.job.configuration:
return libcalamares.job.configuration["kernelLine"]
else:
return ""
def get_zfs_root():
"""
Looks in global storage to find the zfs root
:return: A string containing the path to the zfs root or None if it is not found
"""
zfs = libcalamares.globalstorage.value("zfsDatasets")
if not zfs:
libcalamares.utils.warning("Failed to locate zfs dataset list")
return None
# Find the root dataset
for dataset in zfs:
try:
if dataset["mountpoint"] == "/":
return dataset["zpool"] + "/" + dataset["dsName"]
except KeyError:
# This should be impossible
libcalamares.utils.warning("Internal error handling zfs dataset")
raise
return None
def is_btrfs_root(partition):
""" Returns True if the partition object refers to a btrfs root filesystem
:param partition: A partition map from global storage
:return: True if btrfs and root, False otherwise
"""
return partition["mountPoint"] == "/" and partition["fs"] == "btrfs"
def is_zfs_root(partition):
""" Returns True if the partition object refers to a zfs root filesystem
:param partition: A partition map from global storage
:return: True if zfs and root, False otherwise
"""
return partition["mountPoint"] == "/" and partition["fs"] == "zfs"
def create_systemd_boot_conf(install_path, efi_dir, uuid, entry, entry_name, kernel_type):
"""
Creates systemd-boot configuration files based on given parameters.
:param install_path:
:param efi_dir:
:param uuid:
:param entry:
:param entry_name:
:param kernel_type:
"""
kernel = libcalamares.job.configuration["kernel"]
kernel_params = ["quiet"]
partitions = libcalamares.globalstorage.value("partitions")
swap_uuid = ""
swap_outer_mappername = None
cryptdevice_params = []
# Take over swap settings:
# - unencrypted swap partition sets swap_uuid
# - encrypted root sets cryptdevice_params
for partition in partitions:
if partition["fs"] == "linuxswap" and not partition.get("claimed", None):
continue
has_luks = "luksMapperName" in partition
if partition["fs"] == "linuxswap" and not has_luks:
swap_uuid = partition["uuid"]
if (partition["fs"] == "linuxswap" and has_luks):
swap_outer_mappername = partition["luksMapperName"]
if partition["mountPoint"] == "/" and has_luks:
cryptdevice_params = ["cryptdevice=UUID="
+ partition["luksUuid"]
+ ":"
+ partition["luksMapperName"],
"root=/dev/mapper/"
+ partition["luksMapperName"]]
for partition in partitions:
# systemd-boot with a BTRFS root filesystem needs to be told abouut the root subvolume.
# If a btrfs root subvolume wasn't set, it means the root is directly on the partition
# and this option isn't needed
if is_btrfs_root(partition):
btrfs_root_subvolume = libcalamares.globalstorage.value("btrfsRootSubvolume")
if btrfs_root_subvolume:
kernel_params.append("rootflags=subvol=" + btrfs_root_subvolume)
# zfs needs to be told the location of the root dataset
if is_zfs_root(partition):
zfs_root_path = get_zfs_root()
if zfs_root_path is not None:
kernel_params.append("zfs=" + zfs_root_path)
else:
# Something is really broken if we get to this point
libcalamares.utils.warning("Internal error handling zfs dataset")
raise Exception("Internal zfs data missing, please contact your distribution")
if cryptdevice_params:
kernel_params.extend(cryptdevice_params)
else:
kernel_params.append("root=UUID={!s}".format(uuid))
if swap_uuid:
kernel_params.append("resume=UUID={!s}".format(swap_uuid))
if swap_outer_mappername:
kernel_params.append("resume=/dev/mapper/{!s}".format(
swap_outer_mappername))
kernel_line = get_kernel_line(kernel_type)
libcalamares.utils.debug("Configure: \"{!s}\"".format(kernel_line))
if kernel_type == "fallback":
img = libcalamares.job.configuration["fallback"]
entry_name = entry_name + "-fallback"
else:
img = libcalamares.job.configuration["img"]
conf_path = os.path.join(install_path + efi_dir,
"loader",
"entries",
entry_name + ".conf")
# Copy kernel and initramfs to a subdirectory of /efi partition
files_dir = os.path.join(install_path + efi_dir, entry_name)
os.makedirs(files_dir, exist_ok=True)
kernel_path = install_path + kernel
kernel_name = os.path.basename(kernel_path)
shutil.copyfile(kernel_path, os.path.join(files_dir, kernel_name))
img_path = install_path + img
img_name = os.path.basename(img_path)
shutil.copyfile(img_path, os.path.join(files_dir, img_name))
lines = [
'## This is just an example config file.\n',
'## Please edit the paths and kernel parameters according\n',
'## to your system.\n',
'\n',
"title {!s}{!s}\n".format(entry, kernel_line),
"linux {!s}\n".format(os.path.join("/", entry_name, kernel_name)),
"initrd {!s}\n".format(os.path.join("/", entry_name, img_name)),
"options {!s} rw\n".format(" ".join(kernel_params)),
]
with open(conf_path, 'w') as conf_file:
for line in lines:
conf_file.write(line)
def create_loader(loader_path, entry):
"""
Writes configuration for loader.
:param loader_path:
:param entry:
"""
timeout = libcalamares.job.configuration["timeout"]
lines = [
"timeout {!s}\n".format(timeout),
"default {!s}\n".format(entry),
]
with open(loader_path, 'w') as loader_file:
for line in lines:
loader_file.write(line)
class suffix_iterator(object):
"""
Wrapper for one of the "generator" classes below to behave like
a proper Python iterator. The iterator is initialized with a
maximum number of attempts to generate a new suffix.
"""
def __init__(self, attempts, generator):
self.generator = generator
self.attempts = attempts
self.counter = 0
def __iter__(self):
return self
def __next__(self):
self.counter += 1
if self.counter <= self.attempts:
return self.generator.next()
raise StopIteration
class serialEfi(object):
"""
EFI Id generator that appends a serial number to the given name.
"""
def __init__(self, name):
self.name = name
# So the first call to next() will bump it to 0
self.counter = -1
def next(self):
self.counter += 1
if self.counter > 0:
return "{!s}{!s}".format(self.name, self.counter)
else:
return self.name
def render_in_base(value, base_values, length=-1):
"""
Renders @p value in base-N, where N is the number of
items in @p base_values. When rendering, use the items
of @p base_values (e.g. use "0123456789" to get regular decimal
rendering, or "ABCDEFGHIJ" for letters-as-numbers 'encoding').
If length is positive, pads out to at least that long with
leading "zeroes", whatever base_values[0] is.
"""
if value < 0:
raise ValueError("Cannot render negative values")
if len(base_values) < 2:
raise ValueError("Insufficient items for base-N rendering")
if length < 1:
length = 1
digits = []
base = len(base_values)
while value > 0:
place = value % base
value = value // base
digits.append(base_values[place])
while len(digits) < length:
digits.append(base_values[0])
return "".join(reversed(digits))
class randomEfi(object):
"""
EFI Id generator that appends a random 4-digit hex number to the given name.
"""
def __init__(self, name):
self.name = name
# So the first call to next() will bump it to 0
self.counter = -1
def next(self):
self.counter += 1
if self.counter > 0:
import random
v = random.randint(0, 65535) # 16 bits
return "{!s}{!s}".format(self.name, render_in_base(v, "0123456789ABCDEF", 4))
else:
return self.name
class phraseEfi(object):
"""
EFI Id generator that appends a random phrase to the given name.
"""
words = ("Sun", "Moon", "Mars", "Soyuz", "Falcon", "Kuaizhou", "Gaganyaan")
def __init__(self, name):
self.name = name
# So the first call to next() will bump it to 0
self.counter = -1
def next(self):
self.counter += 1
if self.counter > 0:
import random
desired_length = 1 + self.counter // 5
v = random.randint(0, len(self.words) ** desired_length)
return "{!s}{!s}".format(self.name, render_in_base(v, self.words))
else:
return self.name
def get_efi_suffix_generator(name):
"""
Handle EFI bootloader Ids with @@<something>@@ for suffix-processing.
"""
if "@@" not in name:
raise ValueError("Misplaced call to get_efi_suffix_generator, no @@")
parts = name.split("@@")
if len(parts) != 3:
raise ValueError("EFI Id {!r} is malformed".format(name))
if parts[2]:
# Supposed to be empty because the string ends with "@@"
raise ValueError("EFI Id {!r} is malformed".format(name))
if parts[1] not in ("SERIAL", "RANDOM", "PHRASE"):
raise ValueError("EFI suffix {!r} is unknown".format(parts[1]))
generator = None
if parts[1] == "SERIAL":
generator = serialEfi(parts[0])
elif parts[1] == "RANDOM":
generator = randomEfi(parts[0])
elif parts[1] == "PHRASE":
generator = phraseEfi(parts[0])
if generator is None:
raise ValueError("EFI suffix {!r} is unsupported".format(parts[1]))
return generator
def change_efi_suffix(efi_directory, bootloader_id):
"""
Returns a label based on @p bootloader_id that is usable within
@p efi_directory. If there is a @@<something>@@ suffix marker
in the given id, tries to generate a unique label.
"""
if bootloader_id.endswith("@@"):
# Do 10 attempts with any suffix generator
g = suffix_iterator(10, get_efi_suffix_generator(bootloader_id))
else:
# Just one attempt
g = [bootloader_id]
for candidate_name in g:
if not os.path.exists(os.path.join(efi_directory, candidate_name)):
return candidate_name
return bootloader_id
def efi_label(efi_directory):
"""
Returns a sanitized label, possibly unique, that can be
used within @p efi_directory.
"""
if "efiBootloaderId" in libcalamares.job.configuration:
efi_bootloader_id = change_efi_suffix( efi_directory, libcalamares.job.configuration["efiBootloaderId"] )
else:
branding = libcalamares.globalstorage.value("branding")
efi_bootloader_id = branding["bootloaderEntryName"]
return efi_bootloader_id.translate(file_name_sanitizer)
def efi_word_size():
# get bitness of the underlying UEFI
try:
sysfile = open("/sys/firmware/efi/fw_platform_size", "r")
efi_bitness = sysfile.read(2)
except Exception:
# if the kernel is older than 4.0, the UEFI bitness likely isn't
# exposed to the userspace so we assume a 64 bit UEFI here
efi_bitness = "64"
return efi_bitness
def efi_boot_next():
"""
Tell EFI to definitely boot into the just-installed
system next time.
"""
boot_mgr = libcalamares.job.configuration["efiBootMgr"]
boot_entry = None
efi_bootvars = subprocess.check_output([boot_mgr], text=True)
for line in efi_bootvars.split('\n'):
if not line:
continue
words = line.split()
if len(words) >= 2 and words[0] == "BootOrder:":
boot_entry = words[1].split(',')[0]
break
if boot_entry:
subprocess.call([boot_mgr, "-n", boot_entry])
def install_systemd_boot(efi_directory):
"""
Installs systemd-boot as bootloader for EFI setups.
:param efi_directory:
"""
libcalamares.utils.debug("Bootloader: systemd-boot")
install_path = libcalamares.globalstorage.value("rootMountPoint")
install_efi_directory = install_path + efi_directory
uuid = get_uuid()
distribution = get_bootloader_entry_name()
distribution_translated = distribution.translate(file_name_sanitizer)
loader_path = os.path.join(install_efi_directory,
"loader",
"loader.conf")
subprocess.call(["bootctl",
"--path={!s}".format(install_efi_directory),
"install"])
create_systemd_boot_conf(install_path,
efi_directory,
uuid,
distribution,
distribution_translated,
"default")
if "fallback" in libcalamares.job.configuration:
create_systemd_boot_conf(install_path,
efi_directory,
uuid,
distribution,
distribution_translated,
"fallback")
create_loader(loader_path, distribution_translated)
def get_grub_efi_parameters():
"""
Returns a 3-tuple of suitable parameters for GRUB EFI installation,
depending on the host machine architecture. The return is
- target name
- grub.efi name
- boot.efi name
all three are strings. May return None if there is no suitable
set for the current machine. May return unsuitable values if the
host architecture is unknown (e.g. defaults to x86_64).
"""
import platform
efi_bitness = efi_word_size()
cpu_type = platform.machine()
if efi_bitness == "32":
# Assume all 32-bitters are legacy x86
return ("i386-efi", "grubia32.efi", "bootia32.efi")
elif efi_bitness == "64" and cpu_type == "aarch64":
return ("arm64-efi", "grubaa64.efi", "bootaa64.efi")
elif efi_bitness == "64" and cpu_type == "loongarch64":
return ("loongarch64-efi", "grubloongarch64.efi", "bootloongarch64.efi")
elif efi_bitness == "64":
# If it's not ARM, must by AMD64
return ("x86_64-efi", "grubx64.efi", "bootx64.efi")
libcalamares.utils.warning("Could not find GRUB parameters for bits {b} and cpu {c}".format(b=repr(efi_bitness), c=repr(cpu_type)))
return None
def run_grub_mkconfig(partitions, output_file):
"""
Runs grub-mkconfig in the target environment
:param partitions: The partitions list from global storage
:param output_file: A string containing the path to the generating grub config file
:return:
"""
# zfs needs an environment variable set for grub-mkconfig
if any([is_zfs_root(partition) for partition in partitions]):
check_target_env_call(["sh", "-c", "ZPOOL_VDEV_NAME_PATH=1 " +
libcalamares.job.configuration["grubMkconfig"] + " -o " + output_file])
else:
# The input file /etc/default/grub should already be filled out by the
# grubcfg job module.
check_target_env_call([libcalamares.job.configuration["grubMkconfig"], "-o", output_file])
def run_grub_install(fw_type, partitions, efi_directory):
"""
Runs grub-install in the target environment
:param fw_type: A string which is "efi" for UEFI installs. Any other value results in a BIOS install
:param partitions: The partitions list from global storage
:param efi_directory: The path of the efi directory relative to the root of the install
:return:
"""
is_zfs = any([is_zfs_root(partition) for partition in partitions])
# zfs needs an environment variable set for grub
if is_zfs:
check_target_env_call(["sh", "-c", "echo ZPOOL_VDEV_NAME_PATH=1 >> /etc/environment"])
if fw_type == "efi":
assert efi_directory is not None
efi_bootloader_id = efi_label(efi_directory)
efi_target, efi_grub_file, efi_boot_file = get_grub_efi_parameters()
if is_zfs:
check_target_env_call(["sh", "-c", "ZPOOL_VDEV_NAME_PATH=1 " + libcalamares.job.configuration["grubInstall"]
+ " --target=" + efi_target + " --efi-directory=" + efi_directory
+ " --bootloader-id=" + efi_bootloader_id + " --force"])
else:
check_target_env_call([libcalamares.job.configuration["grubInstall"],
"--target=" + efi_target,
"--efi-directory=" + efi_directory,
"--bootloader-id=" + efi_bootloader_id,
"--force"])
else:
assert efi_directory is None
if libcalamares.globalstorage.value("bootLoader") is None:
return
boot_loader = libcalamares.globalstorage.value("bootLoader")
if boot_loader["installPath"] is None:
return
if is_zfs:
check_target_env_call(["sh", "-c", "ZPOOL_VDEV_NAME_PATH=1 "
+ libcalamares.job.configuration["grubInstall"]
+ " --target=i386-pc --recheck --force "
+ boot_loader["installPath"]])
else:
check_target_env_call([libcalamares.job.configuration["grubInstall"],
"--target=i386-pc",
"--recheck",
"--force",
boot_loader["installPath"]])
def install_grub(efi_directory, fw_type):
"""
Installs grub as bootloader, either in pc or efi mode.
:param efi_directory:
:param fw_type:
"""
# get the partition from global storage
partitions = libcalamares.globalstorage.value("partitions")
if not partitions:
libcalamares.utils.warning(_("Failed to install grub, no partitions defined in global storage"))
return
if fw_type == "efi":
libcalamares.utils.debug("Bootloader: grub (efi)")
install_path = libcalamares.globalstorage.value("rootMountPoint")
install_efi_directory = install_path + efi_directory
if not os.path.isdir(install_efi_directory):
os.makedirs(install_efi_directory)
efi_bootloader_id = efi_label(efi_directory)
efi_target, efi_grub_file, efi_boot_file = get_grub_efi_parameters()
run_grub_install(fw_type, partitions, efi_directory)
# VFAT is weird, see issue CAL-385
install_efi_directory_firmware = (vfat_correct_case(
install_efi_directory,
"EFI"))
if not os.path.exists(install_efi_directory_firmware):
os.makedirs(install_efi_directory_firmware)
# there might be several values for the boot directory
# most usual they are boot, Boot, BOOT
install_efi_boot_directory = (vfat_correct_case(
install_efi_directory_firmware,
"boot"))
if not os.path.exists(install_efi_boot_directory):
os.makedirs(install_efi_boot_directory)
# Workaround for some UEFI firmwares
fallback = "installEFIFallback"
libcalamares.utils.debug("UEFI Fallback: " + str(libcalamares.job.configuration.get(fallback, "<unset>")))
if libcalamares.job.configuration.get(fallback, True):
libcalamares.utils.debug(" .. installing '{!s}' fallback firmware".format(efi_boot_file))
efi_file_source = os.path.join(install_efi_directory_firmware,
efi_bootloader_id,
efi_grub_file)
efi_file_target = os.path.join(install_efi_boot_directory, efi_boot_file)
shutil.copy2(efi_file_source, efi_file_target)
else:
libcalamares.utils.debug("Bootloader: grub (bios)")
run_grub_install(fw_type, partitions, None)
run_grub_mkconfig(partitions, libcalamares.job.configuration["grubCfg"])
def install_secureboot(efi_directory):
"""
Installs the secureboot shim in the system by calling efibootmgr.
"""
efi_bootloader_id = efi_label(efi_directory)
install_path = libcalamares.globalstorage.value("rootMountPoint")
install_efi_directory = install_path + efi_directory
if efi_word_size() == "64":
install_efi_bin = "shimx64.efi"
elif efi_word_size() == "32":
install_efi_bin = "shimia32.efi"
# Copied, roughly, from openSUSE's install script,
# and pythonified. *disk* is something like /dev/sda,
# while *drive* may return "(disk/dev/sda,gpt1)" ..
# we're interested in the numbers in the second part
# of that tuple.
efi_drive = subprocess.check_output([
libcalamares.job.configuration["grubProbe"],
"-t", "drive", "--device-map=", install_efi_directory]).decode("ascii")
efi_disk = subprocess.check_output([
libcalamares.job.configuration["grubProbe"],
"-t", "disk", "--device-map=", install_efi_directory]).decode("ascii")
efi_drive_partition = efi_drive.replace("(","").replace(")","").split(",")[1]
# Get the first run of digits from the partition
efi_partition_number = None
c = 0
start = None
while c < len(efi_drive_partition):
if efi_drive_partition[c].isdigit() and start is None:
start = c
if not efi_drive_partition[c].isdigit() and start is not None:
efi_partition_number = efi_drive_partition[start:c]
break
c += 1
if efi_partition_number is None:
raise ValueError("No partition number found for %s" % install_efi_directory)
subprocess.call([
libcalamares.job.configuration["efiBootMgr"],
"-c",
"-w",
"-L", efi_bootloader_id,
"-d", efi_disk,
"-p", efi_partition_number,
"-l", install_efi_directory + "/" + install_efi_bin])
efi_boot_next()
# The input file /etc/default/grub should already be filled out by the
# grubcfg job module.
check_target_env_call([libcalamares.job.configuration["grubMkconfig"],
"-o", os.path.join(efi_directory, "EFI",
efi_bootloader_id, "grub.cfg")])
def vfat_correct_case(parent, name):
for candidate in os.listdir(parent):
if name.lower() == candidate.lower():
return os.path.join(parent, candidate)
return os.path.join(parent, name)
def prepare_bootloader(fw_type):
"""
Prepares bootloader.
Based on value 'efi_boot_loader', it either calls systemd-boot
or grub to be installed.
:param fw_type:
:return:
"""
efi_boot_loader = libcalamares.job.configuration["efiBootLoader"]
efi_directory = libcalamares.globalstorage.value("efiSystemPartition")
if efi_boot_loader == "systemd-boot" and fw_type == "efi":
install_systemd_boot(efi_directory)
elif efi_boot_loader == "sb-shim" and fw_type == "efi":
install_secureboot(efi_directory)
elif efi_boot_loader == "grub" or fw_type != "efi":
install_grub(efi_directory, fw_type)
else:
libcalamares.utils.debug( "WARNING: the combination of "
"boot-loader '{!s}' and firmware '{!s}' "
"is not supported.".format(efi_boot_loader, fw_type) )
def run():
"""
Starts procedure and passes 'fw_type' to other routine.
:return:
"""
fw_type = libcalamares.globalstorage.value("firmwareType")
if (libcalamares.globalstorage.value("bootLoader") is None and fw_type != "efi"):
libcalamares.utils.warning( "Non-EFI system, and no bootloader is set." )
return None
partitions = libcalamares.globalstorage.value("partitions")
if fw_type == "efi":
efi_system_partition = libcalamares.globalstorage.value("efiSystemPartition")
esp_found = [ p for p in partitions if p["mountPoint"] == efi_system_partition ]
if not esp_found:
libcalamares.utils.warning( "EFI system, but nothing mounted on {!s}".format(efi_system_partition) )
return None
try:
prepare_bootloader(fw_type)
except subprocess.CalledProcessError as e:
libcalamares.utils.warning(str(e))
libcalamares.utils.debug("stdout:" + str(e.stdout))
libcalamares.utils.debug("stderr:" + str(e.stderr))
return (_("Bootloader installation error"),
_("The bootloader could not be installed. The installation command <pre>{!s}</pre> returned error code {!s}.")
.format(e.cmd, e.returncode))
return None
| calamares/calamares | src/modules/bootloader/main.py | Python | gpl-3.0 | 28,413 | 0.001901 |
import logging
import asyncio
logger = logging.getLogger(__name__)
class InfluxLineProtocol(asyncio.DatagramProtocol):
def __init__(self, loop):
self.loop = loop
self.transport = None
def connection_made(self, transport):
self.transport = transport
@staticmethod
def fmt(measurement, fields, *, tags={}, timestamp=None):
msg = measurement
msg = msg.replace(" ", "\\ ")
msg = msg.replace(",", "\\,")
for k, v in tags.items():
k = k.replace(" ", "\\ ")
k = k.replace(",", "\\,")
k = k.replace("=", "\\=")
v = v.replace(" ", "\\ ")
v = v.replace(",", "\\,")
v = v.replace("=", "\\=")
msg += ",{}={}".format(k, v)
msg += " "
for k, v in fields.items():
k = k.replace(" ", "\\ ")
k = k.replace(",", "\\,")
k = k.replace("=", "\\=")
msg += "{:s}=".format(k)
if isinstance(v, int):
msg += "{:d}i".format(v)
elif isinstance(v, float):
msg += "{:g}".format(v)
elif isinstance(v, bool):
msg += "{:s}".format(v)
elif isinstance(v, str):
msg += '"{:s}"'.format(v.replace('"', '\\"'))
else:
raise TypeError(v)
msg += ","
if fields:
msg = msg[:-1]
if timestamp:
msg += " {:d}".format(timestamp)
return msg
def write_one(self, *args, **kwargs):
msg = self.fmt(*args, **kwargs)
logger.debug(msg)
self.transport.sendto(msg.encode())
def write_many(self, lines):
msg = "\n".join(lines)
logger.debug(msg)
self.transport.sendto(msg.encode())
def datagram_received(self, data, addr):
logger.error("recvd %s %s", data, addr)
self.transport.close()
def error_received(self, exc):
logger.error("error %s", exc)
def connection_lost(self, exc):
logger.info("lost conn %s", exc)
| jordens/sensortag | influx_udp.py | Python | lgpl-3.0 | 2,082 | 0 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common functions
"""
# Import Local Modules
from marvin.cloudstackAPI import (listConfigurations,
listPhysicalNetworks,
listRegions,
addNetworkServiceProvider,
updateNetworkServiceProvider,
listDomains,
listZones,
listPods,
listOsTypes,
listTemplates,
updateResourceLimit,
listRouters,
listNetworks,
listClusters,
listSystemVms,
listStoragePools,
listVirtualMachines,
listLoadBalancerRuleInstances,
listFirewallRules,
listVolumes,
listIsos,
listAccounts,
listSnapshotPolicies,
listDiskOfferings,
listVlanIpRanges,
listUsageRecords,
listNetworkServiceProviders,
listHosts,
listPublicIpAddresses,
listPortForwardingRules,
listLoadBalancerRules,
listSnapshots,
listUsers,
listEvents,
listServiceOfferings,
listVirtualRouterElements,
listNetworkOfferings,
listResourceLimits,
listVPCOfferings,
migrateSystemVm)
from marvin.sshClient import SshClient
from marvin.codes import (PASS, FAILED, ISOLATED_NETWORK, VPC_NETWORK,
BASIC_ZONE, FAIL, NAT_RULE, STATIC_NAT_RULE,
RESOURCE_PRIMARY_STORAGE, RESOURCE_SECONDARY_STORAGE,
RESOURCE_CPU, RESOURCE_MEMORY, PUBLIC_TRAFFIC,
GUEST_TRAFFIC, MANAGEMENT_TRAFFIC, STORAGE_TRAFFIC,
VMWAREDVS)
from marvin.lib.utils import (validateList,
xsplit,
get_process_status,
random_gen,
format_volume_to_ext3)
from marvin.lib.base import (PhysicalNetwork,
PublicIPAddress,
NetworkOffering,
NATRule,
StaticNATRule,
Volume,
Account,
Project,
Snapshot,
NetScaler,
VirtualMachine,
FireWallRule,
Template,
Network,
Host,
Resources,
Configurations,
Router,
PublicIpRange,
StorageNetworkIpRange,
TrafficType)
from marvin.lib.vcenter import Vcenter
from netaddr import IPAddress
import random
import re
import itertools
import random
import hashlib
# Import System modules
import time
def is_config_suitable(apiclient, name, value):
"""
Ensure if the deployment has the expected `value` for the global setting `name'
@return: true if value is set, else false
"""
configs = Configurations.list(apiclient, name=name)
assert(
configs is not None and isinstance(
configs,
list) and len(
configs) > 0)
return configs[0].value == value
def wait_for_cleanup(apiclient, configs=None):
"""Sleeps till the cleanup configs passed"""
# Configs list consists of the list of global configs
if not isinstance(configs, list):
return
for config in configs:
cmd = listConfigurations.listConfigurationsCmd()
cmd.name = config
cmd.listall = True
try:
config_descs = apiclient.listConfigurations(cmd)
except Exception as e:
raise Exception("Failed to fetch configurations: %s" % e)
if not isinstance(config_descs, list):
raise Exception("List configs didn't returned a valid data")
config_desc = config_descs[0]
# Sleep for the config_desc.value time
time.sleep(int(config_desc.value))
return
def add_netscaler(apiclient, zoneid, NSservice):
""" Adds Netscaler device and enables NS provider"""
cmd = listPhysicalNetworks.listPhysicalNetworksCmd()
cmd.zoneid = zoneid
physical_networks = apiclient.listPhysicalNetworks(cmd)
if isinstance(physical_networks, list):
physical_network = physical_networks[0]
cmd = listNetworkServiceProviders.listNetworkServiceProvidersCmd()
cmd.name = 'Netscaler'
cmd.physicalnetworkid = physical_network.id
nw_service_providers = apiclient.listNetworkServiceProviders(cmd)
if isinstance(nw_service_providers, list):
netscaler_provider = nw_service_providers[0]
else:
cmd1 = addNetworkServiceProvider.addNetworkServiceProviderCmd()
cmd1.name = 'Netscaler'
cmd1.physicalnetworkid = physical_network.id
netscaler_provider = apiclient.addNetworkServiceProvider(cmd1)
netscaler = NetScaler.add(
apiclient,
NSservice,
physicalnetworkid=physical_network.id
)
if netscaler_provider.state != 'Enabled':
cmd = updateNetworkServiceProvider.updateNetworkServiceProviderCmd()
cmd.id = netscaler_provider.id
cmd.state = 'Enabled'
apiclient.updateNetworkServiceProvider(cmd)
return netscaler
def get_region(apiclient, region_id=None, region_name=None):
'''
@name : get_region
@Desc : Returns the Region Information for a given region id or region name
@Input : region_name: Name of the Region
region_id : Id of the region
@Output : 1. Region Information for the passed inputs else first Region
2. FAILED In case the cmd failed
'''
cmd = listRegions.listRegionsCmd()
if region_name is not None:
cmd.name = region_name
if region_id is not None:
cmd.id = region_id
cmd_out = apiclient.listRegions(cmd)
return FAILED if validateList(cmd_out)[0] != PASS else cmd_out[0]
def get_domain(apiclient, domain_id=None, domain_name=None):
'''
@name : get_domain
@Desc : Returns the Domain Information for a given domain id or domain name
@Input : domain id : Id of the Domain
domain_name : Name of the Domain
@Output : 1. Domain Information for the passed inputs else first Domain
2. FAILED In case the cmd failed
'''
cmd = listDomains.listDomainsCmd()
if domain_name is not None:
cmd.name = domain_name
if domain_id is not None:
cmd.id = domain_id
cmd_out = apiclient.listDomains(cmd)
if validateList(cmd_out)[0] != PASS:
return FAILED
return cmd_out[0]
def find_storage_pool_type(apiclient, storagetype='NetworkFileSystem'):
"""
@name : find_storage_pool_type
@Desc : Returns true if the given storage pool type exists
@Input : type : type of the storage pool[NFS, RBD, etc.,]
@Output : True : if the type of storage is found
False : if the type of storage is not found
FAILED In case the cmd failed
"""
cmd = listStoragePools.listStoragePoolsCmd()
cmd_out = apiclient.listStoragePools(cmd)
if validateList(cmd_out)[0] != PASS:
return FAILED
for storage_pool in cmd_out:
if storage_pool.type.lower() == storagetype:
return True
return False
def get_zone(apiclient, zone_name=None, zone_id=None):
'''
@name : get_zone
@Desc :Returns the Zone Information for a given zone id or Zone Name
@Input : zone_name: Name of the Zone
zone_id : Id of the zone
@Output : 1. Zone Information for the passed inputs else first zone
2. FAILED In case the cmd failed
'''
cmd = listZones.listZonesCmd()
if zone_name is not None:
cmd.name = zone_name
if zone_id is not None:
cmd.id = zone_id
cmd_out = apiclient.listZones(cmd)
if validateList(cmd_out)[0] != PASS:
return FAILED
'''
Check if input zone name and zone id is None,
then return first element of List Zones command
'''
return cmd_out[0]
def get_physical_networks(apiclient, zoneid):
'''
@name : get_physical_networks
@Desc :Returns A list of the Physical Networks in the given Zone
@Input : zoneid: The Zone ID
@Output : 1. A list containing the Physical Networks
'''
cmd = listPhysicalNetworks.listPhysicalNetworksCmd()
cmd.zoneid = zoneid
physical_networks = apiclient.listPhysicalNetworks(cmd)
return physical_networks
def get_pod(apiclient, zone_id=None, pod_id=None, pod_name=None):
'''
@name : get_pod
@Desc : Returns the Pod Information for a given zone id or Zone Name
@Input : zone_id: Id of the Zone
pod_name : Name of the Pod
pod_id : Id of the Pod
@Output : 1. Pod Information for the pod
2. FAILED In case the cmd failed
'''
cmd = listPods.listPodsCmd()
if pod_name is not None:
cmd.name = pod_name
if pod_id is not None:
cmd.id = pod_id
if zone_id is not None:
cmd.zoneid = zone_id
cmd_out = apiclient.listPods(cmd)
if validateList(cmd_out)[0] != PASS:
return FAILED
return cmd_out[0]
def get_template(
apiclient, zone_id=None, ostype_desc=None, template_filter="featured", template_type='BUILTIN',
template_id=None, template_name=None, account=None, domain_id=None, project_id=None,
hypervisor=None):
'''
@Name : get_template
@Desc : Retrieves the template Information based upon inputs provided
Template is retrieved based upon either of the inputs matched
condition
@Input : returns a template"
@Output : FAILED in case of any failure
template Information matching the inputs
'''
cmd = listTemplates.listTemplatesCmd()
cmd.templatefilter = template_filter
if domain_id is not None:
cmd.domainid = domain_id
if zone_id is not None:
cmd.zoneid = zone_id
if template_id is not None:
cmd.id = template_id
if template_name is not None:
cmd.name = template_name
if hypervisor is not None:
cmd.hypervisor = hypervisor
if project_id is not None:
cmd.projectid = project_id
if account is not None:
cmd.account = account
'''
Get the Templates pertaining to the inputs provided
'''
list_templatesout = apiclient.listTemplates(cmd)
if validateList(list_templatesout)[0] != PASS:
return FAILED
for template in list_templatesout:
if template.isready and template.templatetype == template_type:
return template
'''
Return default first template, if no template matched
'''
return list_templatesout[0]
def get_windows_template(
apiclient, zone_id=None, ostype_desc=None, template_filter="featured", template_type='USER',
template_id=None, template_name=None, account=None, domain_id=None, project_id=None,
hypervisor=None):
'''
@Name : get_template
@Desc : Retrieves the template Information based upon inputs provided
Template is retrieved based upon either of the inputs matched
condition
@Input : returns a template"
@Output : FAILED in case of any failure
template Information matching the inputs
'''
cmd = listTemplates.listTemplatesCmd()
cmd.templatefilter = template_filter
if domain_id is not None:
cmd.domainid = domain_id
if zone_id is not None:
cmd.zoneid = zone_id
if template_id is not None:
cmd.id = template_id
if template_name is not None:
cmd.name = template_name
if hypervisor is not None:
cmd.hypervisor = hypervisor
if project_id is not None:
cmd.projectid = project_id
if account is not None:
cmd.account = account
'''
Get the Templates pertaining to the inputs provided
'''
list_templatesout = apiclient.listTemplates(cmd)
#print("template result is %s"%(list_templatesout))
if list_templatesout is None:
return FAILED
if validateList(list_templatesout[0]) == FAIL :
return FAILED
for template in list_templatesout:
if template.isready and template.templatetype == "USER" and template.ostypename == ostype_desc:
return template
'''
Return default first template, if no template matched
'''
return FAILED
def download_systemplates_sec_storage(server, services):
"""Download System templates on sec storage"""
try:
# Login to management server
ssh = SshClient(
server["ipaddress"],
server["port"],
server["username"],
server["password"]
)
except Exception:
raise Exception("SSH access failed for server with IP address: %s" %
server["ipaddess"])
# Mount Secondary Storage on Management Server
cmds = [
"mkdir -p %s" % services["mnt_dir"],
"mount -t nfs %s:/%s %s" % (
services["sec_storage"],
services["path"],
services["mnt_dir"]
),
"%s -m %s -u %s -h %s -F" % (
services["command"],
services["mnt_dir"],
services["download_url"],
services["hypervisor"]
)
]
for c in cmds:
result = ssh.execute(c)
res = str(result)
# Unmount the Secondary storage
ssh.execute("umount %s" % (services["mnt_dir"]))
if res.count("Successfully installed system VM template") == 1:
return
else:
raise Exception("Failed to download System Templates on Sec Storage")
return
def wait_for_ssvms(apiclient, zoneid, podid, interval=60):
"""After setup wait for SSVMs to come Up"""
time.sleep(interval)
timeout = 40
while True:
list_ssvm_response = list_ssvms(
apiclient,
systemvmtype='secondarystoragevm',
zoneid=zoneid,
podid=podid
)
ssvm = list_ssvm_response[0]
if ssvm.state != 'Running':
# Sleep to ensure SSVMs are Up and Running
time.sleep(interval)
timeout = timeout - 1
elif ssvm.state == 'Running':
break
elif timeout == 0:
raise Exception("SSVM failed to come up")
break
timeout = 40
while True:
list_ssvm_response = list_ssvms(
apiclient,
systemvmtype='consoleproxy',
zoneid=zoneid,
podid=podid
)
cpvm = list_ssvm_response[0]
if cpvm.state != 'Running':
# Sleep to ensure SSVMs are Up and Running
time.sleep(interval)
timeout = timeout - 1
elif cpvm.state == 'Running':
break
elif timeout == 0:
raise Exception("CPVM failed to come up")
break
return
def get_builtin_template_info(apiclient, zoneid):
"""Returns hypervisor specific infor for templates"""
list_template_response = Template.list(
apiclient,
templatefilter='featured',
zoneid=zoneid,
)
for b_template in list_template_response:
if b_template.templatetype == 'BUILTIN':
break
extract_response = Template.extract(apiclient,
b_template.id,
'HTTP_DOWNLOAD',
zoneid)
return extract_response.url, b_template.hypervisor, b_template.format
def download_builtin_templates(apiclient, zoneid, hypervisor, host,
linklocalip, interval=60):
"""After setup wait till builtin templates are downloaded"""
# Change IPTABLES Rules
get_process_status(
host["ipaddress"],
host["port"],
host["username"],
host["password"],
linklocalip,
"iptables -P INPUT ACCEPT"
)
time.sleep(interval)
# Find the BUILTIN Templates for given Zone, Hypervisor
list_template_response = list_templates(
apiclient,
hypervisor=hypervisor,
zoneid=zoneid,
templatefilter='self'
)
if not isinstance(list_template_response, list):
raise Exception("Failed to download BUILTIN templates")
# Ensure all BUILTIN templates are downloaded
templateid = None
for template in list_template_response:
if template.templatetype == "BUILTIN":
templateid = template.id
# Sleep to ensure that template is in downloading state after adding
# Sec storage
time.sleep(interval)
while True:
template_response = list_templates(
apiclient,
id=templateid,
zoneid=zoneid,
templatefilter='self'
)
template = template_response[0]
# If template is ready,
# template.status = Download Complete
# Downloading - x% Downloaded
# Error - Any other string
if template.status == 'Download Complete':
break
elif 'Downloaded' in template.status:
time.sleep(interval)
elif 'Installing' not in template.status:
raise Exception("ErrorInDownload")
return
def update_resource_limit(apiclient, resourcetype, account=None,
domainid=None, max=None, projectid=None):
"""Updates the resource limit to 'max' for given account"""
cmd = updateResourceLimit.updateResourceLimitCmd()
cmd.resourcetype = resourcetype
if account:
cmd.account = account
if domainid:
cmd.domainid = domainid
if max:
cmd.max = max
if projectid:
cmd.projectid = projectid
apiclient.updateResourceLimit(cmd)
return
def list_os_types(apiclient, **kwargs):
"""List all os types matching criteria"""
cmd = listOsTypes.listOsTypesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listOsTypes(cmd))
def list_routers(apiclient, **kwargs):
"""List all Routers matching criteria"""
cmd = listRouters.listRoutersCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listRouters(cmd))
def list_zones(apiclient, **kwargs):
"""List all Zones matching criteria"""
cmd = listZones.listZonesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listZones(cmd))
def list_networks(apiclient, **kwargs):
"""List all Networks matching criteria"""
cmd = listNetworks.listNetworksCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listNetworks(cmd))
def list_clusters(apiclient, **kwargs):
"""List all Clusters matching criteria"""
cmd = listClusters.listClustersCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listClusters(cmd))
def list_ssvms(apiclient, **kwargs):
"""List all SSVMs matching criteria"""
cmd = listSystemVms.listSystemVmsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listSystemVms(cmd))
def list_storage_pools(apiclient, **kwargs):
"""List all storage pools matching criteria"""
cmd = listStoragePools.listStoragePoolsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listStoragePools(cmd))
def list_virtual_machines(apiclient, **kwargs):
"""List all VMs matching criteria"""
cmd = listVirtualMachines.listVirtualMachinesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listVirtualMachines(cmd))
def list_hosts(apiclient, **kwargs):
"""List all Hosts matching criteria"""
cmd = listHosts.listHostsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listHosts(cmd))
def list_configurations(apiclient, **kwargs):
"""List configuration with specified name"""
cmd = listConfigurations.listConfigurationsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listConfigurations(cmd))
def list_publicIP(apiclient, **kwargs):
"""List all Public IPs matching criteria"""
cmd = listPublicIpAddresses.listPublicIpAddressesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listPublicIpAddresses(cmd))
def list_nat_rules(apiclient, **kwargs):
"""List all NAT rules matching criteria"""
cmd = listPortForwardingRules.listPortForwardingRulesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listPortForwardingRules(cmd))
def list_lb_rules(apiclient, **kwargs):
"""List all Load balancing rules matching criteria"""
cmd = listLoadBalancerRules.listLoadBalancerRulesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listLoadBalancerRules(cmd))
def list_lb_instances(apiclient, **kwargs):
"""List all Load balancing instances matching criteria"""
cmd = listLoadBalancerRuleInstances.listLoadBalancerRuleInstancesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listLoadBalancerRuleInstances(cmd))
def list_firewall_rules(apiclient, **kwargs):
"""List all Firewall Rules matching criteria"""
cmd = listFirewallRules.listFirewallRulesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listFirewallRules(cmd))
def list_volumes(apiclient, **kwargs):
"""List all volumes matching criteria"""
cmd = listVolumes.listVolumesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listVolumes(cmd))
def list_isos(apiclient, **kwargs):
"""Lists all available ISO files."""
cmd = listIsos.listIsosCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listIsos(cmd))
def list_snapshots(apiclient, **kwargs):
"""List all snapshots matching criteria"""
cmd = listSnapshots.listSnapshotsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listSnapshots(cmd))
def list_templates(apiclient, **kwargs):
"""List all templates matching criteria"""
cmd = listTemplates.listTemplatesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listTemplates(cmd))
def list_domains(apiclient, **kwargs):
"""Lists domains"""
cmd = listDomains.listDomainsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listDomains(cmd))
def list_accounts(apiclient, **kwargs):
"""Lists accounts and provides detailed account information for
listed accounts"""
cmd = listAccounts.listAccountsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listAccounts(cmd))
def list_users(apiclient, **kwargs):
"""Lists users and provides detailed account information for
listed users"""
cmd = listUsers.listUsersCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listUsers(cmd))
def list_snapshot_policy(apiclient, **kwargs):
"""Lists snapshot policies."""
cmd = listSnapshotPolicies.listSnapshotPoliciesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listSnapshotPolicies(cmd))
def list_events(apiclient, **kwargs):
"""Lists events"""
cmd = listEvents.listEventsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listEvents(cmd))
def list_disk_offering(apiclient, **kwargs):
"""Lists all available disk offerings."""
cmd = listDiskOfferings.listDiskOfferingsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listDiskOfferings(cmd))
def list_service_offering(apiclient, **kwargs):
"""Lists all available service offerings."""
cmd = listServiceOfferings.listServiceOfferingsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listServiceOfferings(cmd))
def list_vlan_ipranges(apiclient, **kwargs):
"""Lists all VLAN IP ranges."""
cmd = listVlanIpRanges.listVlanIpRangesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listVlanIpRanges(cmd))
def list_usage_records(apiclient, **kwargs):
"""Lists usage records for accounts"""
cmd = listUsageRecords.listUsageRecordsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listUsageRecords(cmd))
def list_nw_service_prividers(apiclient, **kwargs):
"""Lists Network service providers"""
cmd = listNetworkServiceProviders.listNetworkServiceProvidersCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listNetworkServiceProviders(cmd))
def list_virtual_router_elements(apiclient, **kwargs):
"""Lists Virtual Router elements"""
cmd = listVirtualRouterElements.listVirtualRouterElementsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listVirtualRouterElements(cmd))
def list_network_offerings(apiclient, **kwargs):
"""Lists network offerings"""
cmd = listNetworkOfferings.listNetworkOfferingsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listNetworkOfferings(cmd))
def list_resource_limits(apiclient, **kwargs):
"""Lists resource limits"""
cmd = listResourceLimits.listResourceLimitsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listResourceLimits(cmd))
def list_vpc_offerings(apiclient, **kwargs):
""" Lists VPC offerings """
cmd = listVPCOfferings.listVPCOfferingsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listVPCOfferings(cmd))
def update_resource_count(apiclient, domainid, accountid=None,
projectid=None, rtype=None):
"""updates the resource count
0 - VM
1 - Public IP
2 - Volume
3 - Snapshot
4 - Template
5 - Projects
6 - Network
7 - VPC
8 - CPUs
9 - RAM
10 - Primary (shared) storage (Volumes)
11 - Secondary storage (Snapshots, Templates & ISOs)
"""
Resources.updateCount(apiclient,
domainid=domainid,
account=accountid if accountid else None,
projectid=projectid if projectid else None,
resourcetype=rtype if rtype else None
)
return
def findSuitableHostForMigration(apiclient, vmid):
"""Returns a suitable host for VM migration"""
suitableHost = None
try:
hosts = Host.listForMigration(apiclient, virtualmachineid=vmid,
)
except Exception as e:
raise Exception("Exception while getting hosts list suitable for migration: %s" % e)
suitablehosts = []
if isinstance(hosts, list) and len(hosts) > 0:
suitablehosts = [host for host in hosts if (str(host.resourcestate).lower() == "enabled"\
and str(host.state).lower() == "up")]
if len(suitablehosts)>0:
suitableHost = suitablehosts[0]
return suitableHost
def get_resource_type(resource_id):
"""Returns resource type"""
lookup = {0: "VM",
1: "Public IP",
2: "Volume",
3: "Snapshot",
4: "Template",
5: "Projects",
6: "Network",
7: "VPC",
8: "CPUs",
9: "RAM",
10: "Primary (shared) storage (Volumes)",
11: "Secondary storage (Snapshots, Templates & ISOs)"
}
return lookup[resource_id]
def get_free_vlan(apiclient, zoneid):
"""
Find an unallocated VLAN outside the range allocated to the physical network.
@note: This does not guarantee that the VLAN is available for use in
the deployment's network gear
@return: physical_network, shared_vlan_tag
"""
list_physical_networks_response = PhysicalNetwork.list(
apiclient,
zoneid=zoneid
)
assert isinstance(list_physical_networks_response, list)
assert len(
list_physical_networks_response) > 0, "No physical networks found in zone %s" % zoneid
physical_network = list_physical_networks_response[0]
networks = list_networks(apiclient, zoneid=zoneid)
usedVlanIds = []
if isinstance(networks, list) and len(networks) > 0:
usedVlanIds = [int(nw.vlan)
for nw in networks if (nw.vlan and str(nw.vlan).lower() != "untagged")]
if not hasattr(physical_network, "vlan"):
while True:
shared_ntwk_vlan = random.randrange(1, 4095)
if shared_ntwk_vlan in usedVlanIds:
continue
else:
break
else:
vlans = xsplit(physical_network.vlan, ['-', ','])
assert len(vlans) > 0
assert int(vlans[0]) < int(
vlans[-1]), "VLAN range %s was improperly split" % physical_network.vlan
# Assuming random function will give different integer each time
retriesCount = 20
shared_ntwk_vlan = None
while True:
if retriesCount == 0:
break
free_vlan = int(vlans[-1]) + random.randrange(1, 20)
if free_vlan > 4095:
free_vlan = int(vlans[0]) - random.randrange(1, 20)
if free_vlan < 0 or (free_vlan in usedVlanIds):
retriesCount -= 1
continue
else:
shared_ntwk_vlan = free_vlan
break
return physical_network, shared_ntwk_vlan
def setNonContiguousVlanIds(apiclient, zoneid):
"""
Form the non contiguous ranges based on currently assigned range in physical network
"""
NonContigVlanIdsAcquired = False
list_physical_networks_response = PhysicalNetwork.list(
apiclient,
zoneid=zoneid
)
assert isinstance(list_physical_networks_response, list)
assert len(
list_physical_networks_response) > 0, "No physical networks found in zone %s" % zoneid
for physical_network in list_physical_networks_response:
vlans = xsplit(physical_network.vlan, ['-', ','])
assert len(vlans) > 0
assert int(vlans[0]) < int(
vlans[-1]), "VLAN range %s was improperly split" % physical_network.vlan
# Keep some gap between existing vlan and the new vlans which we are going to add
# So that they are non contiguous
non_contig_end_vlan_id = int(vlans[-1]) + 6
non_contig_start_vlan_id = int(vlans[0]) - 6
# Form ranges which are consecutive to existing ranges but not immediately contiguous
# There should be gap in between existing range and new non contiguous
# ranage
# If you can't add range after existing range, because it's crossing 4095, then
# select VLAN ids before the existing range such that they are greater than 0, and
# then add this non contiguoud range
vlan = {"partial_range": ["", ""], "full_range": ""}
if non_contig_end_vlan_id < 4095:
vlan["partial_range"][0] = str(
non_contig_end_vlan_id - 4) + '-' + str(non_contig_end_vlan_id - 3)
vlan["partial_range"][1] = str(
non_contig_end_vlan_id - 1) + '-' + str(non_contig_end_vlan_id)
vlan["full_range"] = str(
non_contig_end_vlan_id - 4) + '-' + str(non_contig_end_vlan_id)
NonContigVlanIdsAcquired = True
elif non_contig_start_vlan_id > 0:
vlan["partial_range"][0] = str(
non_contig_start_vlan_id) + '-' + str(non_contig_start_vlan_id + 1)
vlan["partial_range"][1] = str(
non_contig_start_vlan_id + 3) + '-' + str(non_contig_start_vlan_id + 4)
vlan["full_range"] = str(
non_contig_start_vlan_id) + '-' + str(non_contig_start_vlan_id + 4)
NonContigVlanIdsAcquired = True
else:
NonContigVlanIdsAcquired = False
# If failed to get relevant vlan ids, continue to next physical network
# else break from loop as we have hot the non contiguous vlan ids for
# the test purpose
if not NonContigVlanIdsAcquired:
continue
else:
break
# If even through looping from all existing physical networks, failed to get relevant non
# contiguous vlan ids, then fail the test case
if not NonContigVlanIdsAcquired:
return None, None
return physical_network, vlan
def isIpInDesiredState(apiclient, ipaddressid, state):
""" Check if the given IP is in the correct state (given)
and return True/False accordingly"""
retriesCount = 10
ipInDesiredState = False
exceptionOccured = False
exceptionMessage = ""
try:
while retriesCount >= 0:
portableips = PublicIPAddress.list(apiclient, id=ipaddressid)
assert validateList(
portableips)[0] == PASS, "IPs list validation failed"
if str(portableips[0].state).lower() == state:
ipInDesiredState = True
break
retriesCount -= 1
time.sleep(60)
except Exception as e:
exceptionOccured = True
exceptionMessage = e
return [exceptionOccured, ipInDesiredState, e]
if not ipInDesiredState:
exceptionMessage = "Ip should be in %s state, it is in %s" %\
(state, portableips[0].state)
return [False, ipInDesiredState, exceptionMessage]
def setSharedNetworkParams(networkServices, range=20):
"""Fill up the services dictionary for shared network using random subnet"""
# @range: range decides the endip. Pass the range as "x" if you want the difference between the startip
# and endip as "x"
# Set the subnet number of shared networks randomly prior to execution
# of each test case to avoid overlapping of ip addresses
shared_network_subnet_number = random.randrange(1,254)
networkServices["gateway"] = "172.16."+str(shared_network_subnet_number)+".1"
networkServices["startip"] = "172.16."+str(shared_network_subnet_number)+".2"
networkServices["endip"] = "172.16."+str(shared_network_subnet_number)+"."+str(range+1)
networkServices["netmask"] = "255.255.255.0"
return networkServices
def createEnabledNetworkOffering(apiclient, networkServices):
"""Create and enable network offering according to the type
@output: List, containing [ Result,Network Offering,Reason ]
Ist Argument('Result') : FAIL : If exception or assertion error occurs
PASS : If network offering
is created and enabled successfully
IInd Argument(Net Off) : Enabled network offering
In case of exception or
assertion error, it will be None
IIIrd Argument(Reason) : Reason for failure,
default to None
"""
try:
resultSet = [FAIL, None, None]
# Create network offering
network_offering = NetworkOffering.create(apiclient, networkServices, conservemode=False)
# Update network offering state from disabled to enabled.
NetworkOffering.update(network_offering, apiclient, id=network_offering.id,
state="enabled")
except Exception as e:
resultSet[2] = e
return resultSet
return [PASS, network_offering, None]
def shouldTestBeSkipped(networkType, zoneType):
"""Decide which test to skip, according to type of network and zone type"""
# If network type is isolated or vpc and zone type is basic, then test should be skipped
skipIt = False
if ((networkType.lower() == str(ISOLATED_NETWORK).lower() or networkType.lower() == str(VPC_NETWORK).lower())
and (zoneType.lower() == BASIC_ZONE)):
skipIt = True
return skipIt
def verifyNetworkState(apiclient, networkid, state, listall=True):
"""List networks and check if the network state matches the given state"""
retriesCount = 10
isNetworkInDesiredState = False
exceptionOccured = False
exceptionMessage = ""
try:
while retriesCount >= 0:
networks = Network.list(apiclient, id=networkid, listall=listall)
assert validateList(
networks)[0] == PASS, "Networks list validation failed"
if str(networks[0].state).lower() == state:
isNetworkInDesiredState = True
break
retriesCount -= 1
time.sleep(60)
if not isNetworkInDesiredState:
exceptionMessage = "Network state should be %s, it is %s" %\
(state, networks[0].state)
except Exception as e:
exceptionOccured = True
exceptionMessage = e
return [exceptionOccured, isNetworkInDesiredState, exceptionMessage]
return [exceptionOccured, isNetworkInDesiredState, exceptionMessage]
def verifyComputeOfferingCreation(apiclient, computeofferingid):
"""List Compute offerings by ID and verify that the offering exists"""
cmd = listServiceOfferings.listServiceOfferingsCmd()
cmd.id = computeofferingid
serviceOfferings = None
try:
serviceOfferings = apiclient.listServiceOfferings(cmd)
except Exception:
return FAIL
if not (isinstance(serviceOfferings, list) and len(serviceOfferings) > 0):
return FAIL
return PASS
def createNetworkRulesForVM(apiclient, virtualmachine, ruletype,
account, networkruledata):
"""Acquire IP, create Firewall and NAT/StaticNAT rule
(associating it with given vm) for that IP"""
try:
public_ip = PublicIPAddress.create(
apiclient,accountid=account.name,
zoneid=virtualmachine.zoneid,domainid=account.domainid,
networkid=virtualmachine.nic[0].networkid)
FireWallRule.create(
apiclient,ipaddressid=public_ip.ipaddress.id,
protocol='TCP', cidrlist=[networkruledata["fwrule"]["cidr"]],
startport=networkruledata["fwrule"]["startport"],
endport=networkruledata["fwrule"]["endport"]
)
if ruletype == NAT_RULE:
# Create NAT rule
NATRule.create(apiclient, virtualmachine,
networkruledata["natrule"],ipaddressid=public_ip.ipaddress.id,
networkid=virtualmachine.nic[0].networkid)
elif ruletype == STATIC_NAT_RULE:
# Enable Static NAT for VM
StaticNATRule.enable(apiclient,public_ip.ipaddress.id,
virtualmachine.id, networkid=virtualmachine.nic[0].networkid)
except Exception as e:
[FAIL, e]
return [PASS, public_ip]
def getPortableIpRangeServices(config):
""" Reads config values related to portable ip and fills up
services accordingly"""
services = {}
attributeError = False
if config.portableIpRange.startip:
services["startip"] = config.portableIpRange.startip
else:
attributeError = True
if config.portableIpRange.endip:
services["endip"] = config.portableIpRange.endip
else:
attributeError = True
if config.portableIpRange.netmask:
services["netmask"] = config.portableIpRange.netmask
else:
attributeError = True
if config.portableIpRange.gateway:
services["gateway"] = config.portableIpRange.gateway
else:
attributeError = True
if config.portableIpRange.vlan:
services["vlan"] = config.portableIpRange.vlan
if attributeError:
services = FAILED
return services
def uploadVolume(apiclient, zoneid, account, services):
try:
# Upload the volume
volume = Volume.upload(apiclient, services["volume"],
zoneid=zoneid, account=account.name,
domainid=account.domainid, url=services["url"])
volume.wait_for_upload(apiclient)
# Check List Volume response for newly created volume
volumes = Volume.list(apiclient, id=volume.id,
zoneid=zoneid, listall=True)
validationresult = validateList(volumes)
assert validationresult[0] == PASS,\
"volumes list validation failed: %s" % validationresult[2]
assert str(volumes[0].state).lower() == "uploaded",\
"Volume state should be 'uploaded' but it is %s" % volumes[0].state
except Exception as e:
return [FAIL, e]
return [PASS, volume]
def matchResourceCount(apiclient, expectedCount, resourceType,
accountid=None, projectid=None):
"""Match the resource count of account/project with the expected
resource count"""
try:
resourceholderlist = None
if accountid:
resourceholderlist = Account.list(apiclient, id=accountid)
elif projectid:
resourceholderlist = Project.list(apiclient, id=projectid, listall=True)
validationresult = validateList(resourceholderlist)
assert validationresult[0] == PASS,\
"accounts list validation failed"
if resourceType == RESOURCE_PRIMARY_STORAGE:
resourceCount = resourceholderlist[0].primarystoragetotal
elif resourceType == RESOURCE_SECONDARY_STORAGE:
resourceCount = resourceholderlist[0].secondarystoragetotal
elif resourceType == RESOURCE_CPU:
resourceCount = resourceholderlist[0].cputotal
elif resourceType == RESOURCE_MEMORY:
resourceCount = resourceholderlist[0].memorytotal
assert str(resourceCount) == str(expectedCount),\
"Resource count %s should match with the expected resource count %s" %\
(resourceCount, expectedCount)
except Exception as e:
return [FAIL, e]
return [PASS, None]
def createSnapshotFromVirtualMachineVolume(apiclient, account, vmid):
"""Create snapshot from volume"""
try:
volumes = Volume.list(apiclient, account=account.name,
domainid=account.domainid, virtualmachineid=vmid)
validationresult = validateList(volumes)
assert validateList(volumes)[0] == PASS,\
"List volumes should return a valid response"
snapshot = Snapshot.create(apiclient, volume_id=volumes[0].id,
account=account.name, domainid=account.domainid)
snapshots = Snapshot.list(apiclient, id=snapshot.id,
listall=True)
validationresult = validateList(snapshots)
assert validationresult[0] == PASS,\
"List snapshot should return a valid list"
except Exception as e:
return[FAIL, e]
return [PASS, snapshot]
def isVmExpunged(apiclient, vmid, projectid=None, timeout=600):
"""Verify if VM is expunged or not"""
vmExpunged= False
while timeout>=0:
try:
vms = VirtualMachine.list(apiclient, id=vmid, projectid=projectid)
if vms is None:
vmExpunged = True
break
timeout -= 60
time.sleep(60)
except Exception:
vmExpunged = True
break
#end while
return vmExpunged
def isDomainResourceCountEqualToExpectedCount(apiclient, domainid, expectedcount,
resourcetype):
"""Get the resource count of specific domain and match
it with the expected count
Return list [isExceptionOccured, reasonForException, isResourceCountEqual]"""
isResourceCountEqual = False
isExceptionOccured = False
reasonForException = None
try:
response = Resources.updateCount(apiclient, domainid=domainid,
resourcetype=resourcetype)
except Exception as e:
reasonForException = "Failed while updating resource count: %s" % e
isExceptionOccured = True
return [isExceptionOccured, reasonForException, isResourceCountEqual]
resourcecount = (response[0].resourcecount / (1024**3))
if resourcecount == expectedcount:
isResourceCountEqual = True
return [isExceptionOccured, reasonForException, isResourceCountEqual]
def isNetworkDeleted(apiclient, networkid, timeout=600):
""" List the network and check that the list is empty or not"""
networkDeleted = False
while timeout >= 0:
networks = Network.list(apiclient, id=networkid)
if networks is None:
networkDeleted = True
break
timeout -= 60
time.sleep(60)
#end while
return networkDeleted
def createChecksum(service=None,
virtual_machine=None,
disk=None,
disk_type=None):
""" Calculate the MD5 checksum of the disk by writing \
data on the disk where disk_type is either root disk or data disk
@return: returns the calculated checksum"""
random_data_0 = random_gen(size=100)
# creating checksum(MD5)
m = hashlib.md5()
m.update(random_data_0)
ckecksum_random_data_0 = m.hexdigest()
try:
ssh_client = SshClient(
virtual_machine.ssh_ip,
virtual_machine.ssh_port,
virtual_machine.username,
virtual_machine.password
)
except Exception:
raise Exception("SSH access failed for server with IP address: %s" %
virtual_machine.ssh_ip)
# Format partition using ext3
format_volume_to_ext3(
ssh_client,
service["volume_write_path"][
virtual_machine.hypervisor.lower()][disk_type]
)
cmds = ["fdisk -l",
"mkdir -p %s" % service["data_write_paths"]["mount_dir"],
"mount -t ext3 %s1 %s" % (
service["volume_write_path"][
virtual_machine.hypervisor.lower()][disk_type],
service["data_write_paths"]["mount_dir"]
),
"mkdir -p %s/%s/%s " % (
service["data_write_paths"]["mount_dir"],
service["data_write_paths"]["sub_dir"],
service["data_write_paths"]["sub_lvl_dir1"],
),
"echo %s > %s/%s/%s/%s" % (
random_data_0,
service["data_write_paths"]["mount_dir"],
service["data_write_paths"]["sub_dir"],
service["data_write_paths"]["sub_lvl_dir1"],
service["data_write_paths"]["random_data"]
),
"cat %s/%s/%s/%s" % (
service["data_write_paths"]["mount_dir"],
service["data_write_paths"]["sub_dir"],
service["data_write_paths"]["sub_lvl_dir1"],
service["data_write_paths"]["random_data"]
)
]
for c in cmds:
ssh_client.execute(c)
# Unmount the storage
cmds = [
"umount %s" % (service["data_write_paths"]["mount_dir"]),
]
for c in cmds:
ssh_client.execute(c)
return ckecksum_random_data_0
def compareChecksum(
apiclient,
service=None,
original_checksum=None,
disk_type=None,
virt_machine=None
):
"""
Create md5 checksum of the data present on the disk and compare
it with the given checksum
"""
if virt_machine.state != "Running":
virt_machine.start(apiclient)
try:
# Login to VM to verify test directories and files
ssh = SshClient(
virt_machine.ssh_ip,
virt_machine.ssh_port,
virt_machine.username,
virt_machine.password
)
except Exception:
raise Exception("SSH access failed for server with IP address: %s" %
virt_machine.ssh_ip)
# Mount datadiskdevice_1 because this is the first data disk of the new
# virtual machine
cmds = ["blkid",
"fdisk -l",
"mkdir -p %s" % service["data_write_paths"]["mount_dir"],
"mount -t ext3 %s1 %s" % (
service["volume_write_path"][
virt_machine.hypervisor.lower()][disk_type],
service["data_write_paths"]["mount_dir"]
),
]
for c in cmds:
ssh.execute(c)
returned_data_0 = ssh.execute(
"cat %s/%s/%s/%s" % (
service["data_write_paths"]["mount_dir"],
service["data_write_paths"]["sub_dir"],
service["data_write_paths"]["sub_lvl_dir1"],
service["data_write_paths"]["random_data"]
))
n = hashlib.md5()
n.update(returned_data_0[0])
ckecksum_returned_data_0 = n.hexdigest()
# Verify returned data
assert original_checksum == ckecksum_returned_data_0, \
"Cheskum does not match with checksum of original data"
# Unmount the Sec Storage
cmds = [
"umount %s" % (service["data_write_paths"]["mount_dir"]),
]
for c in cmds:
ssh.execute(c)
return
def verifyRouterState(apiclient, routerid, state, listall=True):
"""List router and check if the router state matches the given state"""
retriesCount = 10
isRouterInDesiredState = False
exceptionOccured = False
exceptionMessage = ""
try:
while retriesCount >= 0:
routers = Router.list(apiclient, id=routerid, listall=listall)
assert validateList(
routers)[0] == PASS, "Routers list validation failed"
if str(routers[0].state).lower() == state:
isRouterInDesiredState = True
break
retriesCount -= 1
time.sleep(60)
if not isRouterInDesiredState:
exceptionMessage = "Router state should be %s, it is %s" %\
(state, routers[0].state)
except Exception as e:
exceptionOccured = True
exceptionMessage = e
return [exceptionOccured, isRouterInDesiredState, exceptionMessage]
return [exceptionOccured, isRouterInDesiredState, exceptionMessage]
def isIpRangeInUse(api_client, publicIpRange):
''' Check that if any Ip in the IP Range is in use
currently
'''
vmList = VirtualMachine.list(api_client,
zoneid=publicIpRange.zoneid,
listall=True)
if not vmList:
return False
for vm in vmList:
for nic in vm.nic:
publicIpAddresses = PublicIPAddress.list(api_client,
associatednetworkid=nic.networkid,
listall=True)
if validateList(publicIpAddresses)[0] == PASS:
for ipaddress in publicIpAddresses:
if IPAddress(publicIpRange.startip) <=\
IPAddress(ipaddress.ipaddress) <=\
IPAddress(publicIpRange.endip):
return True
return False
def verifyGuestTrafficPortGroups(api_client, config, setup_zone):
""" This function matches the given zone with
the zone in config file used to deploy the setup and
retrieves the corresponding vcenter details and forms
the vcenter connection object. It makes call to
verify the guest traffic for given zone """
try:
zoneDetailsInConfig = [zone for zone in config.zones
if zone.name == setup_zone.name][0]
vcenterusername = zoneDetailsInConfig.vmwaredc.username
vcenterpassword = zoneDetailsInConfig.vmwaredc.password
vcenterip = zoneDetailsInConfig.vmwaredc.vcenter
vcenterObj = Vcenter(
vcenterip,
vcenterusername,
vcenterpassword)
response = verifyVCenterPortGroups(
api_client,
vcenterObj,
traffic_types_to_validate=[
GUEST_TRAFFIC],
zoneid=setup_zone.id,
switchTypes=[VMWAREDVS])
assert response[0] == PASS, response[1]
except Exception as e:
return [FAIL, e]
return [PASS, None]
def analyzeTrafficType(trafficTypes, trafficTypeToFilter, switchTypes=None):
""" Analyze traffic types for given type and return
switch name and vlan Id from the
vmwarenetworklabel string of trafficTypeToFilter
"""
try:
filteredList = [trafficType for trafficType in trafficTypes
if trafficType.traffictype.lower() ==
trafficTypeToFilter]
if not filteredList:
return [PASS, filteredList, None, None]
# Split string with , so as to extract the switch Name and
# vlan ID
splitString = str(
filteredList[0].vmwarenetworklabel).split(",")
switchName = splitString[0]
vlanSpecified = splitString[1]
availableSwitchType = splitString[2]
if switchTypes and availableSwitchType.lower() not in switchTypes:
return [PASS, None, None, None]
return [PASS, filteredList, switchName, vlanSpecified]
except Exception as e:
return [FAIL, e, None, None]
def getExpectedPortGroupNames(
api_client,
physical_network,
network_rate,
switch_name,
traffic_types,
switch_dict,
vcenter_conn,
specified_vlan,
traffic_type):
""" Return names of expected port groups that should be
present in vcenter
Parameters:
@physical_network: Physical Network of the @traffic_type
@network_rate: as defined by network.throttling.rate
@switch_name: Name of the switch used by the traffic in
vcenter
@traffic_types: List of all traffic types present in the physical
network
@switch_dict: Dictionary containing switch information in vcenter
@vcenter_conn: vcenter connection object used to fetch information
from vcenter
@specified_vlan: The vlan for @traffic_type
@traffic_type: Traffic type for which the port names are to be
returned
Return value:
[PASS/FAIL, exception object if FAIL else expected port group names
for @traffic_type]
"""
try:
expectedDVPortGroupNames = []
if traffic_type == PUBLIC_TRAFFIC:
publicIpRanges = PublicIpRange.list(
api_client,
physicalnetworkid=physical_network.id
)
if publicIpRanges is not None:
for publicIpRange in publicIpRanges:
vlanInIpRange = re.findall(
'\d+',
str(publicIpRange.vlan))
vlanId = "untagged"
if len(vlanInIpRange) > 0:
vlanId = vlanInIpRange[0]
ipRangeInUse = isIpRangeInUse(api_client, publicIpRange)
if ipRangeInUse:
expectedDVPortGroupName = "cloud" + "." + \
PUBLIC_TRAFFIC + "." + vlanId + "." + \
network_rate + "." + "1" + "-" + \
switch_name
expectedDVPortGroupNames.append(
expectedDVPortGroupName)
expectedDVPortGroupName = "cloud" + "." + PUBLIC_TRAFFIC + "." + \
vlanId + "." + "0" + "." + "1" + "-" + switch_name
expectedDVPortGroupNames.append(expectedDVPortGroupName)
if traffic_type == GUEST_TRAFFIC:
networks = Network.list(
api_client,
physicalnetworkid=physical_network.id,
listall=True
)
if networks is not None:
for network in networks:
networkVlan = re.findall(
'\d+', str(network.vlan))
if len(networkVlan) > 0:
vlanId = networkVlan[0]
expectedDVPortGroupName = "cloud" + "." + GUEST_TRAFFIC + "." + \
vlanId + "." + network_rate + "." + "1" + "-" + \
switch_name
expectedDVPortGroupNames.append(
expectedDVPortGroupName)
if traffic_type == STORAGE_TRAFFIC:
vlanId = ""
storageIpRanges = StorageNetworkIpRange.list(
api_client,
zoneid=physical_network.zoneid
)
if storageIpRanges is not None:
for storageIpRange in storageIpRanges:
vlanInIpRange = re.findall(
'\d+',
str(storageIpRange.vlan))
if len(vlanInIpRange) > 0:
vlanId = vlanInIpRange[0]
else:
vlanId = "untagged"
expectedDVPortGroupName = "cloud" + "." + STORAGE_TRAFFIC + \
"." + vlanId + "." + "0" + "." + "1" + "-" + \
switch_name
expectedDVPortGroupNames.append(
expectedDVPortGroupName)
else:
response = analyzeTrafficType(
traffic_types, MANAGEMENT_TRAFFIC)
assert response[0] == PASS, response[1]
filteredList, switchName, vlanSpecified =\
response[1], response[2], response[3]
if not filteredList:
raise Exception("No Management traffic present and\
Storage traffic does not have any IP range,\
Invalid zone setting")
if switchName not in switch_dict:
dvswitches = vcenter_conn.get_dvswitches(
name=switchName)
switch_dict[switchName] = dvswitches[0][
'dvswitch']['portgroupNameList']
if vlanSpecified:
vlanId = vlanSpecified
else:
vlanId = "untagged"
expectedDVPortGroupName = "cloud" + "." + STORAGE_TRAFFIC + \
"." + vlanId + "." + "0" + "." + "1" + "-" + switchName
expectedDVPortGroupNames.append(expectedDVPortGroupName)
if traffic_type == MANAGEMENT_TRAFFIC:
vlanId = "untagged"
if specified_vlan:
vlanId = specified_vlan
expectedDVPortGroupName = "cloud" + "." + "private" + "." + \
vlanId + "." + "0" + "." + "1" + "-" + switch_name
expectedDVPortGroupNames.append(expectedDVPortGroupName)
except Exception as e:
return [FAIL, e]
return [PASS, expectedDVPortGroupNames]
def verifyVCenterPortGroups(
api_client,
vcenter_conn,
zoneid,
traffic_types_to_validate,
switchTypes):
""" Generate expected port groups for given traffic types and
verify they are present in the vcenter
Parameters:
@api_client: API client of root admin account
@vcenter_conn: connection object for vcenter used to fetch data
using vcenterAPI
@zone_id: Zone for which port groups are to be verified
@traffic_types_to_validate:
Traffic types (public, guest, management, storage) for
which verification is to be done
@switchTypes: The switch types for which port groups
are to be verified e.g vmwaredvs
Return value:
[PASS/FAIL, exception message if FAIL else None]
"""
try:
expectedDVPortGroupNames = []
vcenterPortGroups = []
config = Configurations.list(
api_client,
name="network.throttling.rate"
)
networkRate = config[0].value
switchDict = {}
physicalNetworks = PhysicalNetwork.list(
api_client,
zoneid=zoneid
)
# If there are no physical networks in zone, return as PASS
# as there are no validations to make
if validateList(physicalNetworks)[0] != PASS:
return [PASS, None]
for physicalNetwork in physicalNetworks:
trafficTypes = TrafficType.list(
api_client,
physicalnetworkid=physicalNetwork.id)
for trafficType in traffic_types_to_validate:
response = analyzeTrafficType(
trafficTypes, trafficType, switchTypes)
assert response[0] == PASS, response[1]
filteredList, switchName, vlanSpecified=\
response[1], response[2], response[3]
if not filteredList:
continue
if switchName not in switchDict:
dvswitches = vcenter_conn.get_dvswitches(
name=switchName)
switchDict[switchName] = dvswitches[0][
'dvswitch']['portgroupNameList']
response = getExpectedPortGroupNames(
api_client,
physicalNetwork,
networkRate,
switchName,
trafficTypes,
switchDict,
vcenter_conn,
vlanSpecified,
trafficType)
assert response[0] == PASS, response[1]
dvPortGroups = response[1]
expectedDVPortGroupNames.extend(dvPortGroups)
vcenterPortGroups = list(itertools.chain(*(switchDict.values())))
for expectedDVPortGroupName in expectedDVPortGroupNames:
assert expectedDVPortGroupName in vcenterPortGroups,\
"Port group %s not present in VCenter DataCenter" %\
expectedDVPortGroupName
except Exception as e:
return [FAIL, e]
return [PASS, None]
def migrate_router(apiclient, router_id, host_id):
cmd = migrateSystemVm.migrateSystemVmCmd()
cmd.hostid = host_id
cmd.virtualmachineid = router_id
apiclient.migrateSystemVm(cmd)
| ikoula/cloudstack | tools/marvin/marvin/lib/common.py | Python | gpl-2.0 | 67,873 | 0.002328 |
# Pre-compute the shortest path length in the stoichiometric matrix
# NB: check some of the shortest path calcs?
import pdb
import settings
import networkx as nx
import pandas as pd
import numpy as np
import os
METS_TO_REMOVE = ['h', 'h2o', 'co2', 'o2', 'pi', 'atp', 'adp', 'amp',
'nad', 'nadh', 'nadp', 'nadph', 'coa', 'thf', '5mthf',
'5fthf', 'methf', 'mlthf', 'nh4', 'cmp', 'q8', 'q8h2',
'udp', 'udpg', 'fad', 'fadh2', 'ade', 'ctp', 'gtp', 'h2o2',
'mql8', 'mqn8', 'na1', 'ppi', 'acp']
def convert_to_bipartite(S):
"""
convert a standard stoichiometric matrix (in a Pandas DataFrame)
to a bipartite graph with an edge between every reactant and all its
reactions
"""
# convert the stoichiometric matrix to a sparse representation
S_sparse = pd.melt(S.reset_index(),
id_vars='bigg.metabolite', value_name='coeff')
S_sparse = S_sparse[S_sparse.coeff != 0]
# remove the high-degree metabolites that we want to ignore for graph
# distance
met_comp = S_sparse['bigg.metabolite'].str.rsplit('_', 1, expand=True)
S_sparse = S_sparse[(~met_comp[0].isin(METS_TO_REMOVE)) & (met_comp[1] == 'c')]
S_sparse['bigg.metabolite'] = met_comp[0].str.upper()
mets = set(S_sparse['bigg.metabolite'].unique())
rxns = set(S_sparse['bigg.reaction'].unique())
B = nx.Graph()
B.add_nodes_from(mets, bipartite=0)
B.add_nodes_from(rxns, bipartite=1)
B.add_weighted_edges_from(S_sparse.as_matrix())
return B, mets, rxns
def calculate_distances(smrn):
smrn['bigg.metabolite'] = smrn['bigg.metabolite'].str.upper()
# %% Read BIGG model
model, S = settings.get_ecoli_json()
B, mets, rxns = convert_to_bipartite(S)
spl = dict(nx.shortest_path_length(B))
spl_values = []
for met in mets:
r = rxns.intersection(spl[met].keys())
spl_values += list(map(spl[met].get, r))
all_distances = (np.array(spl_values) - 1.0) / 2
smrn_dist = smrn[['bigg.metabolite', 'bigg.reaction']].drop_duplicates()
smrn_dist['distance'] = pd.np.nan
for i, row in smrn_dist.iterrows():
source = row['bigg.metabolite'] # remember we dropped it before
target = row['bigg.reaction']
if source.lower() in METS_TO_REMOVE:
continue
if target in spl[source]:
smrn_dist.at[i, 'distance'] = (spl[source][target] - 1.0) / 2.0
# %% Save data
smrn_dist = smrn_dist.dropna()
return smrn_dist, all_distances
if __name__ == '__main__':
# print out a list of all 0-distance interaction, i.e. substrate
# or product inhibition
smrn = pd.read_csv(os.path.join(settings.CACHE_DIR,
'iJO1366_SMRN.csv'), index_col=None)
smrn_dist, all_distances = calculate_distances(smrn)
smrn_merged = pd.merge(smrn, smrn_dist, on=['bigg.metabolite',
'bigg.reaction'])
dist_mode_df = smrn_merged.groupby(('bigg.metabolite',
'bigg.reaction', 'Mode')).first()
dist_mode_df = dist_mode_df[['distance']].reset_index()
react_inhibition = dist_mode_df[(dist_mode_df['Mode'] == '-') & (dist_mode_df['distance'] == 0)]
react_inhibition.to_excel(os.path.join(settings.RESULT_DIR, 'reactant_inhibition.xls'))
| eladnoor/small-molecule-regulation | python/topology.py | Python | mit | 3,400 | 0.000882 |
"""Farragone.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version."""
import gettext
from . import coreconf as _conf
gettext.install(_conf.IDENTIFIER, _conf.PATH_LOCALE, names=('ngettext',))
from . import util, conf, core, ui
| ikn/farragone | farragone/__init__.py | Python | gpl-3.0 | 418 | 0.002392 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceLoadBalancersOperations(object):
"""NetworkInterfaceLoadBalancersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceLoadBalancerListResult"]
"""List all load balancers in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceLoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.NetworkInterfaceLoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceLoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceLoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_network_interface_load_balancers_operations.py | Python | mit | 5,808 | 0.004304 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 30 20:12:17 2017
@author: Mohtashim
"""
# Create a list of strings: spells
spells = ["protego", "accio", "expecto patronum", "legilimens"]
# Use map() to apply a lambda function over spells: shout_spells
shout_spells = map(lambda item: item + '!!!', spells)
# Convert shout_spells to a list: shout_spells_list
shout_spells_list = list(shout_spells)
# Convert shout_spells into a list and print it
print(shout_spells_list)
| Moshiasri/learning | Python_dataCamp/Map()LambdaFunction.py | Python | gpl-3.0 | 474 | 0 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/corellia/player_house_deed/shared_corellia_house_large_deed.iff"
result.attribute_template_id = 2
result.stfName("deed","corellia_house_large_deed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/tangible/deed/corellia/player_house_deed/shared_corellia_house_large_deed.py | Python | mit | 490 | 0.044898 |
import h2o, h2o_config
l = h2o_config.setup_test_config(test_config_json='test_config.json')
print "\nsetup_test_config returns list of test config objs:", l
# Here are some ways to reference the config state that the json created
print "\nHow to reference.."
for i, obj in enumerate(h2o_config.configs):
print "keys in config", i, ":", obj.__dict__.keys()
print h2o_config.configs[0].trees
for t in h2o_config.configs:
print "\nTest config_name:", t.config_name
print "trees:", t.trees
print "params:", t.params
print "params['timeoutSecs']:", t.params['timeoutSecs']
| 111t8e/h2o-2 | py/test_config_basic.py | Python | apache-2.0 | 597 | 0.005025 |
from robot.libraries.BuiltIn import BuiltIn
def fail_with_traceback(traceback_message):
BuiltIn().fail(traceback_message)
| allure-framework/allure-python | allure-robotframework/examples/status/status_library.py | Python | apache-2.0 | 129 | 0 |
# -*- coding: utf-8 *-*
# made for python3!
from tkinter import *
from tkinter.ttk import *
class TkWindow():
registers = {}
def __init__(self, parent, title, width=400, height=300):
self.parent = parent #Tk or toplevel
self.w = width
self.h = height
self.make_gui(title)
self.loaded()
def loaded(self):
pass # overload me
"""register another window to receive a signal"""
@classmethod
def register(cls, target, signame):
if not target in cls.registers:
cls.registers[target] = []
cls.registers[target].append(signame)
"""send a signal to all registered windows"""
def send(self, signame, data=None):
cls = self.__class__
for targ, sigs in cls.registers.items():
if sigs != None:
if signame in sigs:
targ.receive(self, signame, data)
"""receive a signame"""
def receive(self, sender, signame, data):
print("receive not overloaded but signal registered for <"
+ signame + "> from <"
+ str(sender) + "> with <" + str(data) +">")
# overload me in your receiving window for your application
def make_gui(self, title):
self.parent.title(title)
Style().configure("TFrame", padding=5)
self.frame = Frame(self.parent,
width=self.w,
height=self.h)
def makelabel(self, parent, lcol=0, lrow=0, caption='', **options):
entry = Label(parent, text=caption, **options).grid(row=lrow, column=lcol, sticky=NE)
return entry
"""create a multiline text entry field with a label"""
def maketext(self, parent, lcol=0, lrow=0, erow=0, ecol=1, caption='', width=None, **options):
print(lrow, lcol)
if caption != '':
Label(parent, text=caption).grid(row=lrow, column=lcol, sticky=NE)
entry = Text(parent, **options)
if width:
entry.config(width=width)
entry.grid(row=erow, column=ecol, sticky=W)
return entry
def makeentry(self, parent, lcol=0, lrow=0, erow=0, ecol=1, caption='', width=None, **options):
if caption!='':
Label(parent, text=caption).grid(row=lrow, column=lcol, sticky=E)
entry = Entry(parent, **options)
if width:
entry.config(width=width)
entry.grid(row=erow, column=ecol, sticky=W)
return entry
def setentryvalue(self, entry, value):
entry.delete(0,END)
entry.insert(0, value)
def settextvalue(self, entry, value):
entry.delete(0.0,END);
entry.insert(0.0, value);
def setbuttontext(self, button, txt):
button['text'] = txt
def makecombo(self, parent, ccol=1, crow=0, lcol=0, lrow=0, caption='',
width=None, **options):
if caption!='':
Label(parent, text=caption).grid(row=lrow, column=lcol, sticky=E)
cbox = Combobox(parent, **options)
if width:
cbox.config(width=width)
cbox.grid(row=crow, column=ccol)
return cbox
def makecheck(self, parent, ecol=0, erow=0, caption='', **options):
cb = Checkbutton(parent, text=caption, **options)
cb.grid(row=erow, column=ecol, sticky=W)
return cb
def makebutton(self, parent, bcol=0, brow=0, caption='Press me', sticky=W, **options):
bu = Button(parent, text=caption, **options)
bu.grid(row=brow, column=bcol, sticky=sticky)
return bu
"""create a list at the givne position"""
def makelist(self, parent, llcol=0, llrow=1, lcol=0, lrow=0,
caption='List', elements=[], mode='v',
lrowspan=1, lcolspan=1,
**options):
frame = Frame(parent)
frame.grid(row=lrow, column=lcol, rowspan=lrowspan, columnspan=lcolspan)
hscroll = vscroll = None
if caption!='':
Label(parent, text=caption).grid(row=llrow, column=llcol, sticky=W)
lb = Listbox(frame, **options)
if 'v' in mode:
vscroll = Scrollbar(frame, orient=VERTICAL)
lb.config(yscrollcommand = vscroll.set)
vscroll.config(command=lb.yview)
vscroll.pack(side=RIGHT, fill=Y)
if 'h' in mode:
hscroll = Scrollbar(frame, orient=HROZONTAL)
lb.configure(xscrollcommand = hscroll.set)
hscroll.config(command = lb.xview)
hscroll.pack(side=BOTTOM, fill=X)
lb.pack(side=LEFT, fill=BOTH, expand=1)
if len(elements)>0:
self.setlistelements(elements)
return lb
def setlistelements(self, lb, elements):
lb.delete(0, END)
for element in elements:
lb.insert(END, element)
| ManInAGarden/PiADCMeasure | tkwindow.py | Python | lgpl-3.0 | 4,958 | 0.010286 |
from pycp2k.inputsection import InputSection
from ._each112 import _each112
class _davidson2(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each112()
self._name = "DAVIDSON"
self._keywords = {'Log_print_key': 'LOG_PRINT_KEY', 'Filename': 'FILENAME', 'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
| SINGROUP/pycp2k | pycp2k/classes/_davidson2.py | Python | lgpl-3.0 | 666 | 0.003003 |
#
# Copyright (C) 2016 by YOUR NAME HERE
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import sys, os, Ice
ROBOCOMP = ''
try:
ROBOCOMP = os.environ['ROBOCOMP']
except:
print '$ROBOCOMP environment variable not set, using the default value /opt/robocomp'
ROBOCOMP = '/opt/robocomp'
if len(ROBOCOMP)<1:
print 'ROBOCOMP environment variable not set! Exiting.'
sys.exit()
preStr = "-I"+ROBOCOMP+"/interfaces/ --all "+ROBOCOMP+"/interfaces/"
Ice.loadSlice(preStr+"TrajectoryRobot2D.ice")
from RoboCompTrajectoryRobot2D import *
class TrajectoryRobot2DI(TrajectoryRobot2D):
def __init__(self, worker):
self.worker = worker
def getState(self, c):
return self.worker.getState()
def goBackwards(self, target, c):
return self.worker.goBackwards(target)
def stop(self, c):
return self.worker.stop()
def goReferenced(self, target, xRef, zRef, threshold, c):
return self.worker.goReferenced(target, xRef, zRef, threshold)
def changeTarget(self, target, c):
return self.worker.changeTarget(target)
def go(self, target, c):
return self.worker.go(target)
def mapBasedTarget(self, parameters, c):
return self.worker.mapBasedTarget(parameters)
| robocomp/robocomp-robolab | experimental/dumbGlobalTrajectory/src/trajectoryrobot2dI.py | Python | gpl-3.0 | 1,815 | 0.019835 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from .. import core
__all__ = [
'start_gperf_profiler',
'stop_gperf_profiler',
]
def start_gperf_profiler():
core.start_imperative_gperf_profiler()
def stop_gperf_profiler():
core.stop_imperative_gperf_profiler()
| luotao1/Paddle | python/paddle/fluid/dygraph/profiler.py | Python | apache-2.0 | 886 | 0 |
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""In charge of collecting data from drivers and push it to the publisher."""
import os
import msgpack
import nanomsg
from oslo_log import log
from watcher_metering.agent.manager import MetricManager
LOG = log.getLogger(__name__)
class Agent(MetricManager):
def __init__(self, conf, driver_names, use_nanoconfig_service,
publisher_endpoint, nanoconfig_service_endpoint,
nanoconfig_update_endpoint, nanoconfig_profile):
"""
:param conf: Configuration obtained from a configuration file
:type conf: oslo_config.cfg.ConfigOpts instance
:param driver_names: The list of driver names to register
:type driver_names: list of str
:param use_nanoconfig_service: Indicates whether or not it should use a
nanoconfig service
:type use_nanoconfig_service: bool
:param publisher_endpoint: Publisher server URI
:type publisher_endpoint: str
:param nanoconfig_service_endpoint: Nanoconfig service URI
:type nanoconfig_service_endpoint: str
:param nanoconfig_update_endpoint: Nanoconfig update service URI
:type nanoconfig_update_endpoint: str
:param nanoconfig_profile: Nanoconfig profile URI
:type nanoconfig_profile: str
"""
super(Agent, self).__init__(conf, driver_names)
self.socket = nanomsg.Socket(nanomsg.PUSH)
self.use_nanoconfig_service = use_nanoconfig_service
self.publisher_endpoint = publisher_endpoint
self.nanoconfig_service_endpoint = nanoconfig_service_endpoint
self.nanoconfig_update_endpoint = nanoconfig_update_endpoint
self.nanoconfig_profile = nanoconfig_profile
@property
def namespace(self):
return "watcher_metering.drivers"
def start(self):
LOG.info("[Agent] Starting main thread...")
super(Agent, self).start()
def setup_socket(self):
if self.use_nanoconfig_service:
self.set_nanoconfig_endpoints()
self.socket.configure(self.nanoconfig_profile)
LOG.info("[Agent] Agent nanomsg's profile `%s`",
self.nanoconfig_profile)
else:
LOG.debug("[Agent] Agent connected to: `%s`",
self.publisher_endpoint)
self.socket.connect(self.publisher_endpoint)
LOG.info("[Agent] Ready for pushing to Publisher node")
def set_nanoconfig_endpoints(self):
"""This methods sets both the `NN_CONFIG_SERVICE` and
`NN_CONFIG_UPDATES` environment variable as nanoconfig uses it to
access the nanoconfig service
"""
# NN_CONFIG_SERVICE:
nn_config_service = os.environ.get("NN_CONFIG_SERVICE")
if not self.nanoconfig_service_endpoint and not nn_config_service:
raise ValueError(
"Invalid configuration! No NN_CONFIG_SERVICE set. You need to "
"configure your `nanoconfig_service_endpoint`.")
if self.nanoconfig_service_endpoint:
os.environ["NN_CONFIG_SERVICE"] = self.nanoconfig_service_endpoint
else:
self.nanoconfig_service_endpoint = nn_config_service
# NN_CONFIG_UPDATES
nn_config_updates = os.environ.get("NN_CONFIG_UPDATES")
if not self.nanoconfig_update_endpoint and not nn_config_updates:
raise ValueError(
"Invalid configuration! No NN_CONFIG_UPDATES set. You need to "
"configure your `nanoconfig_update_endpoint`.")
if self.nanoconfig_update_endpoint:
os.environ["NN_CONFIG_UPDATES"] = self.nanoconfig_update_endpoint
else:
self.nanoconfig_update_endpoint = nn_config_updates
def run(self):
self.setup_socket()
super(Agent, self).run()
def stop(self):
self.socket.close()
super(Agent, self).stop()
LOG.debug("[Agent] Stopped")
def update(self, notifier, data):
LOG.debug("[Agent] Updated by: %s", notifier)
LOG.debug("[Agent] Preparing to send message %s", msgpack.loads(data))
try:
LOG.debug("[Agent] Sending message...")
# The agent will wait for the publisher server to be listening on
# the related publisher_endpoint before continuing
# In which case, you should start the publisher to make it work!
self.socket.send(data)
LOG.debug("[Agent] Message sent successfully!")
except nanomsg.NanoMsgError as exc:
LOG.error("Exception during sending the message to controller %s",
exc.args[0])
| b-com/watcher-metering | watcher_metering/agent/agent.py | Python | apache-2.0 | 5,245 | 0 |
def mangled_node_tree_name(b_material):
return "PH_" + b_material.name
| TzuChieh/Photon-v2 | BlenderAddon/PhotonBlend/bmodule/common/__init__.py | Python | mit | 73 | 0.013699 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A Solution to "Maximum path sum I" – Project Euler Problem No. 18
# by Florian Buetow
#
# Sourcecode: https://github.com/fbcom/project-euler
# Problem statement: https://projecteuler.net/problem=18
#
def get_triangular_list(str):
ret = []
tmp = []
i = j = 1
for n in str.split():
tmp.append(int(n))
j = j + 1
if j > i:
ret.append(tmp)
tmp = []
j = 1
i = i + 1
return ret
def find_max_path(nums, row, col):
if row == len(nums):
return 0
n = nums[row][col]
a = n + find_max_path(nums, row + 1, col + 0)
b = n + find_max_path(nums, row + 1, col + 1)
return max(a, b)
# Testrun
pyramid = """
3
7 4
2 4 6
8 5 9 3
"""
tri = get_triangular_list(pyramid)
assert (23 == find_max_path(tri, 0, 0)), "Testcase failed"
# Solve
pyramid = """
75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23
"""
tri = get_triangular_list(pyramid)
print "Solution:", find_max_path(tri, 0, 0)
| fbcom/project-euler | 018_maximum_path_sum_1.py | Python | mit | 1,351 | 0.000741 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Product(Model):
_required = []
_attribute_map = {
'integer': {'key': 'integer', 'type': 'int'},
'string': {'key': 'string', 'type': 'str'},
}
def __init__(self, *args, **kwargs):
"""Product
:param int integer
:param str string
"""
self.integer = None
self.string = None
super(Product, self).__init__(*args, **kwargs)
| vulcansteel/autorest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/BodyArray/auto_rest_swagger_bat_array_service/models/product.py | Python | mit | 931 | 0 |
from __future__ import print_function, division
import cPickle
import gzip
import os
import sys
import timeit
import numpy
import theano
from theano import tensor
import mdn_one_ahead
# parameters
batch_size = 100
L1_reg=0.00
L2_reg=0.0001
n_epochs=200
learning_rate = 0.001
momentum = 0.9
sigma_in = 320
mixing_in = 320
n_components = 5
EPS = numpy.finfo(theano.config.floatX).eps
# load data
datasets = mdn_one_ahead.load_data()
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
X = train_set_x.get_value(borrow=True)[:20].copy()
Y = train_set_y.get_value(borrow=True)[:20].copy()
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size
print( '... building the model')
# allocate symbolic variables for the data
index = tensor.lscalar() # index to a [mini]batch
x = tensor.matrix('x') # the data is presented as rasterized images
y = tensor.vector('y') # the labels are presented as 1D vector of
rng = numpy.random.RandomState(1234)
classifier = mdn_one_ahead.MLP(
rng=rng,
input=x,
n_in=320,
n_hiddens=[300, 300, 300, 300]
)
cost = (
classifier.negative_log_likelihood(y)
+ L2_reg * classifier.L2_sqr
)
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
gparams = [tensor.grad(cost, param) for param in classifier.params]
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(classifier.params, gparams)
]
model_gradients = theano.function(
inputs = [x, y], outputs=gparams)
train_gradients = theano.function(
inputs=[index],
outputs=gparams,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
print('... training')
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.99995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
gs = train_gradients(minibatch_index)
if any(numpy.any(numpy.isnan(g)) for g in gs):
import pdb; pdb.set_trace()
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
# l = 7.752, tanh, 3 components, 20 hid, 1 hidlayer,
# l = 5.057, relu, 3 components, (100, 100) hid
# l = 4.865, relu, 5 components, (150, 150, 150) hid
| markstoehr/structured_gaussian_mixtures | structured_gaussian_mixtures/mdn_experiment_one_ahead.py | Python | apache-2.0 | 5,886 | 0.002039 |
from ubluepy import Scanner, constants
def bytes_to_str(bytes):
string = ""
for b in bytes:
string += chr(b)
return string
def get_device_names(scan_entries):
dev_names = []
for e in scan_entries:
scan = e.getScanData()
if scan:
for s in scan:
if s[0] == constants.ad_types.AD_TYPE_COMPLETE_LOCAL_NAME:
dev_names.append((e, bytes_to_str(s[2])))
return dev_names
def find_device_by_name(name):
s = Scanner()
scan_res = s.scan(100)
device_names = get_device_names(scan_res)
for dev in device_names:
if name == dev[1]:
return dev[0]
# >>> res = find_device_by_name("micr")
# >>> if res:
# ... print("address:", res.addr())
# ... print("address type:", res.addr_type())
# ... print("rssi:", res.rssi())
# ...
# ...
# ...
# address: c2:73:61:89:24:45
# address type: 1
# rssi: -26
| adafruit/micropython | ports/nrf/examples/ubluepy_scan.py | Python | mit | 923 | 0.005417 |
# -*- coding: utf-8 -*-
from dp_tornado.engine.controller import Controller
class MmddController(Controller):
def get(self):
self.model.tests.helper_test.datetime.switch_timezone('Asia/Seoul')
ts = 1451671445
ms = ts * 1000
dt = self.helper.datetime.convert(timestamp=ts)
args_dt = {'datetime': dt}
args_ms = {'timestamp': ms, 'ms': True}
args_ts = {'timestamp': ts}
args_dt_cc = {'datetime': dt, 'concat': ''}
args_ms_cc = {'timestamp': ms, 'ms': True, 'concat': '/'}
args_ts_cc = {'timestamp': ts, 'concat': '/'}
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_dt}) == '01.02')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_ms}) == '01.02')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_ts}) == '01.02')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_dt_cc}) == '0102')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_ms_cc}) == '01/02')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_ts_cc}) == '01/02')
| why2pac/dp-tornado | example/controller/tests/view/ui_methods/mmdd.py | Python | mit | 1,216 | 0.004934 |
# -*- coding: utf-8 -*-
#
# This file is part of JSONAlchemy.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# JSONAlchemy is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# JSONAlchemy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with JSONAlchemy; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Function for tokenizing strings in models' files."""
def util_split(string, separator, index):
"""
Helper function to split safely a string and get the n-th element.
:param string: String to be split
:param separator:
:param index: n-th part of the split string to return
:return: The n-th part of the string or empty string in case of error
"""
string_splitted = string.split(separator)
try:
return string_splitted[index].strip()
except:
return ""
| tiborsimko/jsonalchemy | jsonalchemy/jsonext/functions/util_split.py | Python | gpl-2.0 | 1,313 | 0.000762 |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api import http_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.iam.v1.logging import audit_data_pb2
from google.longrunning import operations_pb2
from google.protobuf import any_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
from google.rpc import status_pb2
from google.api_core.protobuf_helpers import get_messages
from google.cloud.spanner_admin_database_v1.proto import (
spanner_database_admin_pb2)
_shared_modules = [
http_pb2,
iam_policy_pb2,
policy_pb2,
audit_data_pb2,
operations_pb2,
any_pb2,
descriptor_pb2,
empty_pb2,
timestamp_pb2,
status_pb2,
]
_local_modules = [
spanner_database_admin_pb2,
]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = 'google.cloud.spanner_admin_database_v1.types'
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
| jonparrott/google-cloud-python | spanner/google/cloud/spanner_admin_database_v1/types.py | Python | apache-2.0 | 1,907 | 0 |
import datetime
import os
import shutil
import tarfile
import tempfile
from django.conf import settings
from django.utils import timezone
from celery import shared_task
from celery.utils.log import get_task_logger
from grout.models import RecordType
from black_spots.tasks import (
forecast_segment_incidents,
load_blackspot_geoms,
load_road_network,
get_training_noprecip
)
from black_spots.tasks.get_segments import get_segments_shp, create_segments_tar
from black_spots.models import BlackSpotTrainingCsv, RoadSegmentsShapefile, BlackSpotConfig
from data.tasks.fetch_record_csv import export_records
logger = get_task_logger(__name__)
COMBINED_SEGMENTS_SHP_NAME = os.getenv('COMBINED_SEGMENTS_SHP_NAME', 'combined_segments.shp')
def get_latest_segments_tar_uuid(roads_srid, records_csv_obj_id):
cutoff = timezone.now() - datetime.timedelta(days=30)
segments_shp_obj = RoadSegmentsShapefile.objects.all().order_by('-created').first()
# Refresh road segments if the most recent one is more than 30 days out of date
if segments_shp_obj and segments_shp_obj.created > cutoff:
logger.info("Using existing RoadSegmentsShapefile")
return str(segments_shp_obj.uuid)
logger.info("Creating new RoadSegmentsShapefile")
logger.info("Loading road network")
lines_shp_path = load_road_network(output_srid='EPSG:{}'.format(roads_srid))
logger.info("Creating segments shape files")
shp_output_dir = get_segments_shp(lines_shp_path, records_csv_obj_id, roads_srid)
logger.info("Compressing shape files into tarball")
return create_segments_tar(shp_output_dir)
def get_forecast_csv_path(segments_shp_uuid, records_csv_obj_id, roads_srid):
# - Match events to segments shapefile
blackspots_output = get_training_noprecip(
segments_shp_uuid,
records_csv_obj_id,
roads_srid
)
# - Run Rscript to output CSV
segments_csv = BlackSpotTrainingCsv.objects.get(pk=blackspots_output).csv.path
return forecast_segment_incidents(segments_csv, '/var/www/media/forecasts.csv')
@shared_task
def calculate_black_spots(history_length=datetime.timedelta(days=5 * 365 + 1), roads_srid=3395):
"""Integrates all black spot tasks into a pipeline
Args:
history_length (timedelta): Length of time to use for querying for historic records.
Note: the R script will fail if it doesn't have a certain
amount of data, which is why this is set to 5 years.
TODO: make the R script more robust, so it can handle a
dynamic number of years without failure.
roads_srid (int): SRID in which to deal with the Roads data
"""
try:
severity_percentile_threshold = (
BlackSpotConfig.objects.all().order_by('pk').first().severity_percentile_threshold
)
except AttributeError:
logger.warn('BlackSpots are not fully configured; set a percentile cutoff first.')
return
# Get the parameters we'll use to filter down the records we want
# Note that this assumes that the RecordType with this label to be used will also be marked as
# `active`. The `load_incidents` script ensures only the most recent record type is set as such.
record_type_pk = RecordType.objects.filter(
label=settings.BLACKSPOT_RECORD_TYPE_LABEL,
active=True
).first().pk
# - Get events CSV. This is obtained before the road network segments are calculated
# as an optimization, so we can ignore roads that won't have any associated records.
now = timezone.now()
oldest = now - history_length
records_csv_obj_id = export_records(
oldest,
now,
record_type_pk
)
# Get the UUID, since that is what is used when passing to tasks in the chain
segments_shp_uuid = get_latest_segments_tar_uuid(
roads_srid,
records_csv_obj_id
)
forecasts_csv = get_forecast_csv_path(
segments_shp_uuid,
records_csv_obj_id,
roads_srid
)
# - Load blackspot geoms from shapefile and CSV
# The shapefile is stored as a gzipped tarfile so we need to extract it
tar_output_dir = tempfile.mkdtemp()
try:
shp_tar = RoadSegmentsShapefile.objects.get(uuid=segments_shp_uuid).shp_tgz.path
with tarfile.open(shp_tar, "r:gz") as tar:
# TODO: Extract only the combined segments file, not the entire tarball
tar.extractall(tar_output_dir)
logger.info("Performing blackspot calculations")
segments_path = os.path.join(tar_output_dir, 'segments', COMBINED_SEGMENTS_SHP_NAME)
load_blackspot_geoms(
segments_path,
forecasts_csv,
record_type_pk,
roads_srid,
output_percentile=severity_percentile_threshold
)
finally:
shutil.rmtree(tar_output_dir)
| WorldBank-Transport/DRIVER | app/black_spots/tasks/calculate_black_spots.py | Python | gpl-3.0 | 4,985 | 0.004814 |
import unittest
import numpy as np
from collections import OrderedDict
from gtrackcore.metadata import GenomeInfo
from gtrackcore.track.core.GenomeRegion import GenomeRegion
from gtrackcore.track.format.TrackFormat import TrackFormat
from gtrackcore.track_operations.operations.Flank import Flank
from gtrackcore.track_operations.TrackContents import TrackContents
from gtrackcore.test.track_operations.OperationTest import createTrackView
class FlankTest(unittest.TestCase):
def setUp(self):
self.chr1 = (GenomeRegion('hg19', 'chr1', 0,
GenomeInfo.GENOMES['hg19']['size']['chr1']))
self.chromosomes = (GenomeRegion('hg19', c, 0, l)
for c, l in
GenomeInfo.GENOMES['hg19']['size'].iteritems())
def _runFlankSegmentsTest(self, starts, ends, expStarts, expEnds,
nrBP, after=True, before=True):
"""
Run a test on the creation of a Flank track from a segmented track.
The test expects there to only to be segments in chr1,
All other chromosomes need to be of size zero.
:param startsA: Arrays of starts in track.
:param endsA: Array of ends in track.
:param expStarts: Expected starts of flanks.
:param expEnds: Expected ends of flanks.
:parap nrBP: INT. Size of flank i base pairs.
:param after: Boolean. Flanks from the starts.
:param before: Boolean. Flanks form the ends.
:return:
"""
track = self._createTrackContent(starts, ends)
f = Flank(track)
# Result track type is Segments as default
f.setFlankSize(nrBP)
f.setAfter(after)
f.setBefore(before)
tc = f()
for (k, v) in tc.getTrackViews().items():
print expStarts
print v.startsAsNumpyArray()
print expEnds
print v.endsAsNumpyArray()
if cmp(k, self.chr1) == 0:
# All test tracks are in chr1
self.assertTrue(np.array_equal(v.startsAsNumpyArray(),
expStarts))
self.assertTrue(np.array_equal(v.endsAsNumpyArray(), expEnds))
else:
# Tests if all tracks no in chr1 have a size of 0.
self.assertEqual(v.startsAsNumpyArray().size, 0)
self.assertEqual(v.endsAsNumpyArray().size, 0)
def _createTrackContent(self, starts, ends):
"""
Create a track view a start, end list pair.
Help method used in testing. This method will create a hg19 tracks with
data in chromosome 1 only.
:param starts: List of track start positions
:param ends: List of track end positions
:return: A TrackContent object
"""
starts = np.array(starts)
ends = np.array(ends)
tv = createTrackView(region=self.chr1, startList=starts, endList=ends,
allow_overlap=False)
d = OrderedDict()
d[self.chr1] = tv
return TrackContents('hg19', d)
# **** Points tests ****
# **** Segments tests ****
def testFlankSimpleBefore(self):
"""
Simple single segment before.
:return: None
"""
self._runFlankSegmentsTest(starts=[100], ends=[150], expStarts=[50],
expEnds=[100], nrBP=50, after=False,
before=True)
def testFlankSimpleAfter(self):
"""
Simple single segment after.
:return: None
"""
self._runFlankSegmentsTest(starts=[100], ends=[150], expStarts=[150],
expEnds=[200], nrBP=50, after=True,
before=False)
if __name__ == "__main__":
unittest.main()
| sivertkh/gtrackcore | gtrackcore/test/track_operations/FlankTest.py | Python | gpl-3.0 | 3,882 | 0.000258 |
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
__author__ = "alex"
__date__ = "$Jun 1, 2015 10:46:55 PM$"
from math import radians, sin, cos, sqrt, asin
from bs4 import BeautifulSoup
from types import SimpleNamespace
import urllib.request
import urllib.parse
import json
class Restraunt:
def __init__(self, name, address, last_inspection, category):
self.name = name
self.address = address
self.last_inspection = last_inspection
self.category = category
scheme_host = "http://www.healthspace.com"
vdh_detail_translate = {
'Phone Number:': 'phone_number',
'Facility Type:': 'facility_type',
'# of Priority Foundation Items on Last Inspection:': 'priority_foundation_items',
'# of Priority Items on Last Inspection:': 'prioirty_items',
'# of Core Items on Last Inspection:': 'core_items',
'# of Critical Violations on Last Inspection:': 'critical_items',
'# of Non-Critical Violations on Last Inspection:': 'non_critical_items'
}
MI = 3959
NM = 3440
KM = 6371
def harversine(point1, point2, R=KM):
lat_1, lon_1 = point1
lat_2, lon_2 = point2
delta_lat = radians(lat_2 - lat_1)
delta_lon = radians(lon_2 - lon_1)
lat_1 = radians(lat_1)
lat_2 = radians(lat_2)
a = sin(delta_lat / 2) ** 2 + cos(lat_1) * cos(lat_2) * sin(delta_lon / 2) ** 2
c = 2 * asin(sqrt(a))
return R * c
def get_food_list_by_name(): # get all restraunts
path = "/Clients/VDH/Norfolk/Norolk_Website.nsf/Food-List-ByName"
form = {
"OpenView": "",
"RestrictToCategory": "faa4e68b1bbbb48f008d02bf09dd656f",
"count": "400",
"start": "1",
}
query = urllib.parse.urlencode(form)
with urllib.request.urlopen(scheme_host + path + "?" + query) as data:
soup = BeautifulSoup(data.read())
return soup
def food_table_iter(soup):
"""Columns are 'Name', '' , 'Facility Location', 'Last Inspection',
Plus an unnamed column with a RestrictToCategory key"""
table = soup.html.body.table
for row in table.find_all("tr"):
columns = [td.text.strip() for td in row.find_all("td")]
for td in row.find_all("td"):
if td.a:
url = urllib.parse.urlparse(td.a["href"])
form = urllib.parse.parse_qs(url.query)
columns.append(form['RestrictToCategory'][0])
yield columns
def food_row_iter(table_iter):
heading = next(table_iter)
for row in table_iter:
yield Restraunt(name=row[0], address=row[2], last_inspection=row[3], category=row[4])
def geocode_detail(business):
form = {
"address": business.address + ", Norfolk, VA",
"sensor": "false",
}
query = urllib.parse.urlencode(form, safe=",")
scheme_netloc_path = "http://maps.googleapis.com/maps/api/geocode/json"
with urllib.request.urlopen(scheme_netloc_path + "?" + query) as geocode:
response = json.loads(geocode.read().decode("UTF-8"))
lat_lon = response['results'][0]['geometry']['location']
business.latitude = lat_lon['lat']
business.longitude = lat_lon['lng']
return business
def get_food_facility_history(cat_key):
url_detail = "/Clients/VDH/Norfolk/Norolk_Website.nsf/Food-FacilityHistory"
form = {
"OpenView": "",
"RestrictToCategory": cat_key
}
query = urllib.parse.urlencode(form)
with urllib.request.urlopen(scheme_host + url_detail + "?" + query) as data:
soup = BeautifulSoup(data.read())
return soup
def inspection_detail(business):
soup = get_food_facility_history(business.category)
business.name2 = soup.body.h2.text.strip()
table = soup.body.table
for row in table.find_all("tr"):
column = list(row.find_all("td"))
name = column[0].text.strip()
value = column[1].text.strip()
setattr(business, vdh_detail_translate[name], value)
return business
def get_chicago_json():
form = {
"accessType": "DOWNLOAD",
"$where": "inspection_date>2015-01-01",
}
query = urllib.parse.urlencode(form)
scheme_netloc_path = "https://data.cityofgchicago.org/api/views/4ijn-s7e5/rows.json"
with urllib.request.urlopen(scheme_netloc_path + "?" + query) as data:
with open("chicago_data.json", "w") as output:
output.write(data.read())
def food_row_iter(): # create object from json
with open("chicago_data.json", encoding="UTF-8") as data_file:
inspections = json.load(data_file)
headings = [item['fieldName'] for item in inspections["meta"]["view"]["columns"]]
for row in inspections["data"]:
data = SimpleNamespace(**dict(zip(headings, row)))
yield data
def parse_details(business):
business.latitude = float(business.latitude)
business.longitude = float(business.longitude)
if business.violations is None:
business.details = []
else:
business.details = [v.strip for v in business.violations.split("|")]
return business
def choice_iter_norfolk():
n_base = SimpleNamespace(address='333 Waterside Drive')
geocode_detail(n_base)
print(n_base)
soup = get_food_list_by_name()
for row in food_row_iter():
for row in food_table_iter(soup):
geocode_detail(row)
inspection_detail(row)
row.distance = harversine((row.latitude, row.longitude), (n_base.latitude, n_base.longitude))
yield row
def choice_iter_chicago():
c_base = SimpleNamespace(address='3420 W GRACE ST')
geocode_detail(c_base)
print(c_base)
for row in food_row_iter():
try:
parse_details(row)
row.distance = harversine((row.latitude, row.longitude),
(c_base.latitude, c_base.longitude))
yield row
except TypeError:
pass
# main code of the app
soup = get_food_list_by_name()
raw_column = food_table_iter(soup)
for business in choice_iter_norfolk():
print('name ', business.name, ' address ', business.address, ' lat ', business.latitude, ' lon ',
business.longitude,
' phone ', business.phone_number, ' type ', business.facility_type)
get_chicago_json()
for business in choice_iter_chicago():
print('name ', business.dba_name, ' address ', business.address, ' lat ', business.latitude, ' lon ',
business.longitude,
' phone ', business.phone_number, ' type ', business.facility_type, ' results ', business.results)
| Dr762/PythonExamples3.4 | Python3/restraunt_finder.py | Python | gpl-2.0 | 6,651 | 0.001504 |
# -*- coding: utf-8 -*-
# Copyright 2021 El Nogal - Pedro Gómez <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import registry
from openerp.addons import jasper_reports
def parser(cr, uid, ids, data, context):
parameters = {}
name = 'report.invoice_report_jasper'
model = 'account.invoice'
data_source = 'model'
uom_obj = registry(cr.dbname).get('product.uom')
invoice_obj = registry(cr.dbname).get('account.invoice')
invoice_ids = invoice_obj.browse(cr, uid, ids, context)
language = list(set(invoice_ids.mapped('partner_id.lang')))
if len(language) == 1:
context['lang'] = language[0]
invoice_lines_ids = {}
for invoice_id in invoice_ids:
language = invoice_id.partner_id.lang or 'es_ES'
invoice_lines_ids[str(invoice_id.id)] = []
for line in invoice_id.invoice_line:
product_id = line.product_id.with_context(lang=language)
uom_id = product_id.uom_id
uos_id = line.uos_id.with_context(lang=language)
uos_qty = line.quantity
uom_qty = uom_obj._compute_qty(cr, uid, uos_id.id, uos_qty, uom_id.id)
price_unit = line.price_unit
if uos_id and uos_id != uom_id:
price_unit = line.price_unit * uos_qty / uom_qty
vals = {
'invoice_id': invoice_id.id,
'prod_code': product_id.default_code or '',
'prod_ean13': product_id.ean13 or '',
'prod_name': line.name or product_id.name or '',
'origin': line.origin or '',
'client_order_ref': line.stock_move_id.picking_id.client_order_ref or '',
'uom_qty': uom_qty,
'uos_qty': uos_qty,
'uom_name': uom_id.name or '',
'uos_name': uos_id.name or uom_id.name or '',
'price_unit': price_unit or 0.0,
'discount': line.discount or 0.0,
'price_subtotal': line.price_subtotal,
'taxes': line.tax_str or '',
}
invoice_lines_ids[str(invoice_id.id)].append(vals)
parameters['invoice_lines_ids'] = invoice_lines_ids
return {
'ids': ids,
'name': name,
'model': model,
'records': [],
'data_source': data_source,
'parameters': parameters,
}
jasper_reports.report_jasper('report.invoice_report_jasper', 'account.invoice', parser)
| Comunitea/CMNT_00040_2016_ELN_addons | eln_reports/report/invoice/invoice_report_parser.py | Python | agpl-3.0 | 2,501 | 0.0012 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0018_auto_20170418_1220'),
]
operations = [
migrations.AlterField(
model_name='case',
name='type',
field=models.ForeignKey(related_name='cases', to='cases.CaseType'),
),
]
| HelloLily/hellolily | lily/cases/migrations/0019_auto_20170418_1243.py | Python | agpl-3.0 | 425 | 0 |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
try:
from crowdin.connection import Connection, Configuration
except ImportError:
from connection import Connection, Configuration
import six
import logging
import json
import zipfile
import shutil
import io
import os
logger = logging.getLogger('crowdin')
class Methods:
def __init__(self, any_options, options_config):
# Get options arguments from console input
self.any_options = any_options
# Get parsed config file
self.options_config = options_config
self.project_info = {}
self.languages_list = []
# Main connection method to interact with connection.py
def true_connection(self, url, params, api_files=None, additional_parameters=None):
return Connection(self.options_config, url, params, api_files, self.any_options,
additional_parameters).connect()
def get_info(self):
# POST https://api.crowdin.com/api/project/{project-identifier}/info?key={project-key}
url = {'post': 'POST', 'url_par1': '/api/project/', 'url_par2': True,
'url_par3': '/info', 'url_par4': True}
params = {'json': 'json'}
self.project_info = json.loads(self.true_connection(url, params).decode())
def get_info_files(self):
if not self.project_info:
self.get_info()
return self.project_info['files']
def get_info_lang(self):
if not self.project_info:
self.get_info()
return self.project_info['languages']
def get_info_branches(self):
if not self.project_info:
self.get_info()
branches = set()
for item in self.project_info['files']:
if item['node_type'] == 'branch':
branches.add(item['name'])
return branches
def lang(self):
if not self.languages_list:
data = json.loads(self.supported_languages().decode())
my_lang = self.get_info_lang()
for i in data:
for l in my_lang:
if i['crowdin_code'] == l['code']:
self.languages_list.append(i)
return self.languages_list
def parse(self, data, parent='', branch=False):
if data is None or not len(data):
yield parent + ('/' if data is not None and not len(data) else '')
else:
if branch:
for node in data:
if node.get('node_type') == 'branch' and node.get('name') == branch:
# remove branch name from files hierarchy
for result in self.parse(node.get('files'), parent, branch=False):
yield result
else:
for node in data:
if node.get('node_type') != 'branch':
for result in self.parse(node.get('files'), parent + '/' + node.get('name')):
yield result
def create_directory(self, name, is_branch=False):
# POST https://api.crowdin.net/api/project/{project-identifier}/add-directory?key={project-key}
logger.info("Creating remote {type} {name}".format(name=name, type='directory' if not is_branch else 'branch'))
url = {'post': 'POST', 'url_par1': '/api/project/', 'url_par2': True,
'url_par3': '/add-directory', 'url_par4': True}
params = {'name': name, 'json': 'json'}
if is_branch:
params['is_branch'] = 1
if self.any_options.branch and not is_branch:
params['branch'] = self.any_options.branch
return self.true_connection(url, params)
def upload_files(self, files, export_patterns, parameters, item):
# POST https://api.crowdin.com/api/project/{project-identifier}/add-file?key={project-key}
url = {'post': 'POST', 'url_par1': '/api/project/', 'url_par2': True,
'url_par3': '/add-file', 'url_par4': True}
if item[0] == '/':
sources = item[1:]
else:
sources = item
params = {'json': 'json', 'export_patterns[{0}]'.format(sources): export_patterns,
'titles[{0}]'.format(sources): parameters.get('titles'),
'type': parameters.get('type'),
'first_line_contains_header': parameters.get('first_line_contains_header'),
'scheme': parameters.get('scheme'), 'translate_content': parameters.get('translate_content'),
'translate_attributes': parameters.get('translate_attributes'),
'content_segmentation': parameters.get('content_segmentation'),
'translatable_elements': parameters.get('translatable_elements'),
'escape_quotes': parameters.get('escape_quotes', '3')}
if self.any_options.branch:
params['branch'] = self.any_options.branch
additional_parameters = {'file_name': sources, 'action_type': "Uploading"}
try:
with open(files, 'rb') as f:
api_files = {'files[{0}]'.format(sources): f}
return self.true_connection(url, params, api_files, additional_parameters)
except(OSError, IOError) as e:
print(e, "\n Skipped")
def update_files(self, files, export_patterns, parameters, item):
# POST https://api.crowdin.com/api/project/{project-identifier}/update-file?key={project-key}
url = {'post': 'POST', 'url_par1': '/api/project/', 'url_par2': True,
'url_par3': '/update-file', 'url_par4': True}
if item[0] == '/':
sources = item[1:]
else:
sources = item
params = {'json': 'json', 'export_patterns[{0}]'.format(sources): export_patterns,
'titles[{0}]'.format(sources): parameters.get('titles'),
'first_line_contains_header': parameters.get('first_line_contains_header'),
'scheme': parameters.get('scheme'),
'update_option': parameters.get('update_option'),
'escape_quotes': parameters.get('escape_quotes', '3')}
if self.any_options.branch:
params['branch'] = self.any_options.branch
additional_parameters = {'file_name': sources, 'action_type': "Updating"}
try:
with open(files, 'rb') as f:
api_files = {'files[{0}]'.format(sources): f}
# print files
return self.true_connection(url, params, api_files, additional_parameters)
except(OSError, IOError) as e:
print(e, "\n Skipped")
def upload_translations_files(self, translations, language, source_file):
# POST https://api.crowdin.com/api/project/{project-identifier}/upload-translation?key={project-key
url = dict(post='POST', url_par1='/api/project/', url_par2=True, url_par3='/upload-translation', url_par4=True)
options_dict = vars(self.any_options)
params = {'json': 'json', 'language': language,
'auto_approve_imported': options_dict.get('imported', '0'),
'import_eq_suggestions': options_dict.get('suggestions', '0'),
'import_duplicates': options_dict.get('duplicates', '0')}
if self.any_options.branch:
params['branch'] = self.any_options.branch
additional_parameters = {'file_name': source_file, 't_l': language, 'action_type': "translations"}
try:
with open(translations, 'rb') as f:
api_files = {'files[{0}]'.format(source_file): f}
# print files
return self.true_connection(url, params, api_files, additional_parameters)
except(OSError, IOError) as e:
print(e, "\n Skipped")
def preserve_hierarchy(self, common_path):
common_path = [i[1:] if i[:1] == '/' and i.count('/') == 1 else i for i in common_path]
preserve_hierarchy = Configuration(self.options_config).preserve_hierarchy
if preserve_hierarchy is False:
for i in common_path:
if i.count('/') >= 2 and i.count('//') == 0:
check_list = []
for x in common_path:
new = x[:x.rfind("/")]
check_list.append(new[new.rfind("/"):])
if check_list.count(check_list[0]) == len(check_list):
sorted_list = [x[:x.rfind("/")] + '/' for x in common_path]
else:
sorted_list = []
for x in common_path:
g = x[:x.rfind("/")]
sorted_list.append(g[:g.rfind("/")])
common_path = [s.replace(os.path.commonprefix(sorted_list), '', 1) for s in common_path]
break
return common_path
def upload_sources(self, dirss=False):
dirs = []
files = []
project_files = self.parse(self.get_info_files(), branch=self.any_options.branch)
for item in project_files:
p = "/"
f = item[:item.rfind("/")]
l = f[1:].split("/")
i = 0
while i < len(l):
p = p + l[i] + "/"
i += 1
if p not in dirs:
dirs.append(p)
if not item.endswith("/"):
files.append(item)
all_info = Configuration(self.options_config).get_files_source()
base_path = os.path.normpath(Configuration(self.options_config).get_base_path()) + os.sep
common_path = self.preserve_hierarchy(all_info[::3])
# sources_path = common_path
translations_path = all_info[1::3]
sources_parameters = all_info[2::3]
# Creating branch if needed
if self.any_options.branch and self.any_options.branch not in self.get_info_branches():
self.create_directory(self.any_options.branch, is_branch=True)
# Creating directories
for item in common_path:
if '/' in item and not item[:item.rfind("/")] in dirs:
items = item[:item.rfind("/")]
# print items
p = "/"
if items[0] == '/':
items = items[1:]
l = items.split("/")
i = 0
while i < len(l):
p = p + l[i] + "/"
i += 1
if p not in dirs and not p == '//':
dirs.append(p)
self.create_directory(p)
# Uploading/updating files
for item, export_patterns, true_path, parameters in zip(common_path, translations_path,
all_info[::3], sources_parameters):
if parameters.get('dest'):
if '/' in item:
items = item[item.rfind("/"):]
item = parameters.get('dest').join(item.rsplit(items, 1))
else:
item = parameters.get('dest')
if item[0] != '/':
ite = "/" + item
else:
ite = item
full_path = base_path.replace('\\', '/') + true_path
print(full_path)
if ite not in files:
self.upload_files(full_path, export_patterns, parameters, item)
else:
self.update_files(full_path, export_patterns, parameters, item)
if dirss:
return dirs
def upload_translations(self):
info2 = Configuration(self.options_config).export_pattern_to_path(self.lang())
base_path = os.path.normpath(Configuration(self.options_config).get_base_path()) + os.sep
translations_language = info2[1::3]
translations_path = self.preserve_hierarchy(info2[::3])
translations_parameters = info2[2::3]
# Creating branch if needed
if self.any_options.branch and self.any_options.branch not in self.get_info_branches():
self.create_directory(self.any_options.branch, is_branch=True)
for i, source_file, params in zip(translations_language, translations_path, translations_parameters):
for language, item in six.iteritems(i):
if params.get('dest'):
if '/' in item:
items = source_file[source_file.rfind("/"):]
source_file = params.get('dest').join(source_file.rsplit(items, 1))
else:
source_file = params.get('dest')
full_path = base_path.replace('\\', '/') + item
check_l_option = self.any_options.language
if check_l_option:
if language == check_l_option:
self.upload_translations_files(full_path, language, source_file)
else:
self.upload_translations_files(full_path, language, source_file)
# print item, language, source_file, params
def supported_languages(self):
# GET https://api.crowdin.com/api/supported-languages
# POST https://api.crowdin.com/api/project/{project-identifier}/supported-languages?key={project-key}
url = {'post': 'POST', 'url_par1': '/api/project/', 'url_par2': True,
'url_par3': '/supported-languages', 'url_par4': True}
params = {'json': 'json'}
return self.true_connection(url, params)
def download_project(self):
# GET https://api.crowdin.com/api/project/{project-identifier}/download/{package}.zip?key={project-key}
self.build_project()
base_path = os.path.normpath(Configuration(self.options_config).get_base_path()) + os.sep
if self.any_options.dlanguage:
lang = self.any_options.dlanguage
else:
lang = "all"
url = {'post': 'GET', 'url_par1': '/api/project/', 'url_par2': True,
'url_par3': '/download/{0}.zip'.format(lang), 'url_par4': True}
params = {'json': 'json'}
if self.any_options.branch:
params['branch'] = self.any_options.branch
# files that exists in archive and doesn't match current project configuration
unmatched_files = []
with zipfile.ZipFile(io.BytesIO(self.true_connection(url, params))) as z:
# for i in self.exists(Configuration().get_files_source()):
unzip_dict = {}
lang = self.lang()
translations_file = Configuration(self.options_config).export_pattern_to_path(lang, download=True)
trans_file_no_mapping = Configuration(self.options_config).export_pattern_to_path(lang)
for i, y in zip(translations_file[1::3], trans_file_no_mapping[1::3]):
for k, v in six.iteritems(y):
for key, value in six.iteritems(i):
if k == key:
unzip_dict[value] = v
if self.any_options.branch:
unzip_dict[self.any_options.branch + '/' + value] = v
initial_files = unzip_dict.keys()
for target_lang in lang:
for source_file in list(initial_files):
# change only for target_lang files
for lang_key in target_lang:
if target_lang[lang_key] in source_file:
if source_file == unzip_dict[source_file]:
f = os.path.basename(source_file)
else:
r_source = list(reversed(source_file.split('/')))
r_target = list(reversed(unzip_dict[source_file].split('/')))
f = ''
# print(r_source)
# print(r_target)
for i in range(len(r_target)-1):
if r_target[i] == r_source[i]:
f = '/' + r_target[i] + f
if not self.any_options.branch:
k = target_lang[lang_key] + '/' + f
else:
k = self.any_options.branch + '/' + target_lang[lang_key] + '/' + f
k = k.replace('//', '/')
unzip_dict[k] = unzip_dict[source_file]
matched_files = []
for structure in z.namelist():
if not structure.endswith("/"):
for key, value in six.iteritems(unzip_dict):
if structure == key:
matched_files.append(structure)
source = z.open(structure)
target_path = os.path.join(base_path, value)
target_dir = os.path.dirname(target_path)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
target = open(target_path, "wb")
logger.info("Download: {0} to {1}".format(key, target_path))
with source, target:
shutil.copyfileobj(source, target)
# z.extract(structure, base_path)
if structure not in unmatched_files and structure not in matched_files:
unmatched_files.append(structure)
if unmatched_files:
logger.warning(
"\n Warning: Downloaded translations do not match current project configuration. "
"Some of the resulted files will be omitted."
)
for i in unmatched_files:
print(i)
def build_project(self):
# GET https://api.crowdin.com/api/project/{project-identifier}/export?key={project-key}
url = {'post': 'POST', 'url_par1': '/api/project/', 'url_par2': True,
'url_par3': '/export', 'url_par4': True}
params = {'json': 'json'}
if self.any_options.branch:
params['branch'] = self.any_options.branch
data = json.loads(self.true_connection(url, params).decode())
logger.info("Building ZIP archive with the latest translations - {0}".format(data["success"]["status"]))
if data["success"]["status"] == 'skipped':
print("Warning: Export was skipped. Please note that this method can be invoked only once per 30 minutes.")
def list_project_files(self):
# print self.any_options
listing = []
if self.any_options.sources == 'project':
project_files = self.parse(self.get_info_files())
for i in project_files:
print(i)
listing.append(i)
if self.any_options.sources == 'sources':
sources_files = Configuration(self.options_config).get_files_source()
for i in sources_files[::3]:
print(i)
listing.append(i)
if self.any_options.sources == 'translations':
translations_file = Configuration(self.options_config).export_pattern_to_path(self.lang())
for i in translations_file[1::3]:
for key, value in six.iteritems(i):
print(value)
listing.append(value)
return listing
def test(self):
print(Configuration(self.options_config).get_files_source())
| PaulGregor/crowdin-cli | crowdin/methods.py | Python | mit | 19,747 | 0.003393 |
class UndirectedGraphNode:
def __init__(self, x):
self.label = x
self.neighbors = []
#using DFS
class Solution:
# @param node, a undirected graph node
# @return a undirected graph node
def cloneGraph(self, node):
seen={}
visited=[]
seen[None] = None
head = UndirectedGraphNode(node.label)
seen[node] = head
visited.append(node)
while len(visited) != 0:
refNode = visited.pop()
for n in refNode.neighbors:
if n not in seen:
neighBorNode = UndirectedGraphNode(n.label)
seen[refNode].neighbors.append(neighBorNode)
seen[n] = neighBorNode
visited.append(n)
else:
seen[refNode].neighbors.append(seen[n])
return head
A=UndirectedGraphNode(2)
B=UndirectedGraphNode(3)
C=UndirectedGraphNode(4)
A.neighbors.append(B)
A.neighbors.append(C)
B.neighbors.append(C)
N=Solution()
for i in N.cloneGraph(A).neighbors:
print i.label
| bourneagain/pythonBytes | cloneGraph_BFS.py | Python | mit | 1,080 | 0.012037 |
"""Query utils."""
class LuaScript:
def __init__(self, script):
self.script = script
class LuaStoredQuery:
def __init__(self, name, query_args):
self.name = name
self.args = query_args
class LogicalOperator:
def __init__(self, *args):
self.clauses = args
def __str__(self):
return " {} ".format(self.OPERATOR).join(
[str(clause) for clause in self.clauses]
)
class Or(LogicalOperator):
OPERATOR = "or"
class And(LogicalOperator):
OPERATOR = "and"
class Not:
def __init__(self, clause):
self.clause = clause
def __str__(self):
return "not ({})".format(str(self.clause))
def _lua_repr(value):
if isinstance(value, bytes):
return repr(value.decode("utf-8"))
elif isinstance(value, bool):
if value:
return "true"
return "false"
elif isinstance(value, str):
return repr(value)
elif isinstance(value, (float, int)):
return value
elif isinstance(value, type(None)):
return "nil"
# XXX(tsileo): should `dict`/`list` be supported?
else:
raise ValueError("unsupported data type: {}".format(type(value)))
class LuaShortQuery:
def __init__(self, key, value, operator):
self.key = key
self.value = value
self.operator = operator
def query(self):
return "match(doc, '{}', '{}', {})".format(self.key, self.operator, self.value)
def __str__(self):
return self.query()
class LuaShortQueryComplex:
def __init__(self, query):
self.query = query
def __str__(self):
return self.query
class _MetaQuery(type):
def __getitem__(cls, key):
if isinstance(key, int):
return cls("[{}]".format(key + 1))
return cls(".{}".format(key))
class Q(metaclass=_MetaQuery):
"""Allow for query:
>>> Q['persons_count'] > 5
>>> Q['persons'][0]['name'] == 'thomas'
>>> Q['l'].contains(10)
>>> Q['persons'].contains(Q['name'] == 'thomas')
"""
def __init__(self, path=None):
self._path = path or ""
def __getitem__(self, key):
if isinstance(key, int):
self._path = self._path + "[{}]".format(key + 1)
return self
self._path = self._path + ".{}".format(key)
return self
def path(self):
return self._path[1:]
def __repr__(self):
return "Q(path={})".format(self._path)
def any(self, values):
return LuaShortQueryComplex(
" or ".join(
[
"get_path(doc, '{}') == {}".format(self.path(), _lua_repr(value))
for value in values
]
)
)
def not_any(self, values):
return LuaShortQueryComplex(
" or ".join(
[
"get_path(doc, '{}') ~= {}".format(self.path(), _lua_repr(value))
for value in values
]
)
)
def contains(self, q):
if isinstance(q, LuaShortQuery):
if q.operator != "EQ":
raise ValueError("contains only support pure equality query")
return LuaShortQueryComplex(
"in_list(doc, '{}', {}, '{}')".format(
self.path(), _lua_repr(q.value), q.key
)
)
elif isinstance(q, LuaShortQueryComplex):
raise ValueError("query too complex to use in contains")
return LuaShortQueryComplex(
"in_list(doc, '{}', {})".format(self.path(), _lua_repr(q))
)
def __eq__(self, other):
return LuaShortQuery(self.path(), _lua_repr(other), "EQ")
def __ne__(self, other):
return LuaShortQuery(self.path(), _lua_repr(other), "NE")
def __lt__(self, other):
return LuaShortQuery(self.path(), _lua_repr(other), "LT")
def __le__(self, other):
return LuaShortQuery(self.path(), _lua_repr(other), "LE")
def __ge__(self, other):
return LuaShortQuery(self.path(), _lua_repr(other), "GE")
def __gt__(self, other):
return LuaShortQuery(self.path(), _lua_repr(other), "GT")
| tsileo/blobstash-python-docstore | blobstash/docstore/query.py | Python | mit | 4,218 | 0.000711 |
# -*- coding: UTF-8 -*-
import responses
from helpers import TestCase
from wptranslate.mediawiki import query
class TestMediaWiki(TestCase):
def setUp(self):
self.lang = 'foo'
self.url = 'https://%s.wikipedia.org/w/api.php' % self.lang
@responses.activate
def test_query_return_none_on_error(self):
responses.add(responses.GET, self.url, body='{}', status=404)
self.assertNone(query({}, lang=self.lang))
self.assertEquals(1, len(responses.calls))
@responses.activate
def test_query_return_none_on_wrong_resp(self):
responses.add(responses.GET, self.url, body='{}', status=200)
self.assertNone(query({}, lang=self.lang))
self.assertEquals(1, len(responses.calls))
@responses.activate
def test_query_return_query_param(self):
responses.add(responses.GET, self.url, body='{"query": 42}', status=200)
self.assertEquals(42, query({}, lang=self.lang))
self.assertEquals(1, len(responses.calls))
| bfontaine/wptranslate | tests/test_mediawiki.py | Python | mit | 1,013 | 0.002962 |
from __future__ import print_function
from argparse import ArgumentParser
import logging
from nptdms import tdms
def main():
parser = ArgumentParser(
description="List the contents of a LabView TDMS file.")
parser.add_argument(
'-p', '--properties', action="store_true",
help="Include channel properties.")
parser.add_argument(
'-d', '--debug', action="store_true",
help="Print debugging information to stderr.")
parser.add_argument(
'tdms_file',
help="TDMS file to read.")
args = parser.parse_args()
if args.debug:
logging.getLogger(tdms.__name__).setLevel(logging.DEBUG)
tdmsfile = tdms.TdmsFile(args.tdms_file)
level = 0
root = tdmsfile.object()
display('/', level)
if args.properties:
display_properties(root, level)
for group in tdmsfile.groups():
level = 1
try:
group_obj = tdmsfile.object(group)
display("%s" % group_obj.path, level)
if args.properties:
display_properties(group_obj, level)
except KeyError:
# It is possible to have a group without an object
display("/'%s'" % group, level)
for channel in tdmsfile.group_channels(group):
level = 2
display("%s" % channel.path, level)
if args.properties:
level = 3
if channel.data_type is not None:
display("data type: %s" % channel.data_type.name, level)
display_properties(channel, level)
def display_properties(tdms_object, level):
if tdms_object.properties:
display("properties:", level)
for prop, val in tdms_object.properties.items():
display("%s: %s" % (prop, val), level)
def display(s, level):
print("%s%s" % (" " * 2 * level, s))
| nmgeek/npTDMS | nptdms/tdmsinfo.py | Python | lgpl-3.0 | 1,875 | 0 |
# -*- coding: utf-8 -*-
"""
Wind vector calculations in finite differences
"""
from . import iris_api
from . import standard
from . import tools
from . import utils
# List to define the behaviour of imports of the form:
# from pyveccalc import *
__all__ = []
# Package version number.
__version__ = '0.2.9'
| dennissergeev/pyveccalc | pyveccalc/__init__.py | Python | mit | 314 | 0 |
from gym import core
class ArgumentEnv(core.Env):
calls = 0
def __init__(self, arg):
self.calls += 1
self.arg = arg
def test_env_instantiation():
# This looks like a pretty trivial, but given our usage of
# __new__, it's worth having.
env = ArgumentEnv('arg')
assert env.arg == 'arg'
assert env.calls == 1
| xpharry/Udacity-DLFoudation | tutorials/reinforcement/gym/gym/tests/test_core.py | Python | mit | 353 | 0.005666 |
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("core", "doctype", "sms_parameter")
sms_sender_name = frappe.db.get_single_value("SMS Settings", "sms_sender_name")
if sms_sender_name:
frappe.reload_doc("core", "doctype", "sms_settings")
sms_settings = frappe.get_doc("SMS Settings")
sms_settings.append("parameters", {
"parameter": "sender_name",
"value": sms_sender_name
})
sms_settings.flags.ignore_mandatory = True
sms_settings.flags.ignore_permissions = True
sms_settings.save()
| vjFaLk/frappe | frappe/patches/v9_1/add_sms_sender_name_as_parameters.py | Python | mit | 651 | 0.021505 |
from bisect import bisect
from uhashring.ring_ketama import KetamaRing
from uhashring.ring_meta import MetaRing
class HashRing:
"""Implement a consistent hashing ring."""
def __init__(self, nodes=[], **kwargs):
"""Create a new HashRing given the implementation.
:param nodes: nodes used to create the continuum (see doc for format).
:param hash_fn: use this callable function to hash keys, can be set to
'ketama' to use the ketama compatible implementation.
:param vnodes: default number of vnodes per node.
:param weight_fn: use this function to calculate the node's weight.
"""
hash_fn = kwargs.get("hash_fn", None)
vnodes = kwargs.get("vnodes", None)
weight_fn = kwargs.get("weight_fn", None)
if hash_fn == "ketama":
ketama_args = {k: v for k, v in kwargs.items() if k in ("replicas",)}
if vnodes is None:
vnodes = 40
self.runtime = KetamaRing(**ketama_args)
else:
if vnodes is None:
vnodes = 160
self.runtime = MetaRing(hash_fn)
self._default_vnodes = vnodes
self.hashi = self.runtime.hashi
if weight_fn and not hasattr(weight_fn, "__call__"):
raise TypeError("weight_fn should be a callable function")
self._weight_fn = weight_fn
if self._configure_nodes(nodes):
self.runtime._create_ring(self.runtime._nodes.items())
def _configure_nodes(self, nodes):
"""Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
"""
if isinstance(nodes, str):
nodes = [nodes]
elif not isinstance(nodes, (dict, list)):
raise ValueError(
"nodes configuration should be a list or a dict,"
" got {}".format(type(nodes))
)
conf_changed = False
for node in nodes:
conf = {
"hostname": node,
"instance": None,
"nodename": node,
"port": None,
"vnodes": self._default_vnodes,
"weight": 1,
}
current_conf = self.runtime._nodes.get(node, {})
nodename = node
# new node, trigger a ring update
if not current_conf:
conf_changed = True
# complex config
if isinstance(nodes, dict):
node_conf = nodes[node]
if isinstance(node_conf, int):
conf["weight"] = node_conf
elif isinstance(node_conf, dict):
for k, v in node_conf.items():
if k in conf:
conf[k] = v
# changing those config trigger a ring update
if k in ["nodename", "vnodes", "weight"]:
if current_conf.get(k) != v:
conf_changed = True
else:
raise ValueError(
"node configuration should be a dict or an int,"
" got {}".format(type(node_conf))
)
if self._weight_fn:
conf["weight"] = self._weight_fn(**conf)
# changing the weight of a node trigger a ring update
if current_conf.get("weight") != conf["weight"]:
conf_changed = True
self.runtime._nodes[nodename] = conf
return conf_changed
def __delitem__(self, nodename):
"""Remove the given node.
:param nodename: the node name.
"""
self.runtime._remove_node(nodename)
remove_node = __delitem__
def __getitem__(self, key):
"""Returns the instance of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "instance")
get_node_instance = __getitem__
def __setitem__(self, nodename, conf={"weight": 1}):
"""Add the given node with its associated configuration.
:param nodename: the node name.
:param conf: the node configuration.
"""
if self._configure_nodes({nodename: conf}):
self.runtime._create_ring([(nodename, self._nodes[nodename])])
add_node = __setitem__
def _get_pos(self, key):
"""Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
"""
p = bisect(self.runtime._keys, self.hashi(key))
if p == len(self.runtime._keys):
return 0
else:
return p
def _get(self, key, what):
"""Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight
"""
if not self.runtime._ring:
return None
pos = self._get_pos(key)
if what == "pos":
return pos
nodename = self.runtime._ring[self.runtime._keys[pos]]
if what in ["hostname", "instance", "port", "weight"]:
return self.runtime._nodes[nodename][what]
elif what == "dict":
return self.runtime._nodes[nodename]
elif what == "nodename":
return nodename
elif what == "tuple":
return (self.runtime._keys[pos], nodename)
def get(self, key):
"""Returns the node object dict matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "dict")
def get_instances(self):
"""Returns a list of the instances of all the configured nodes."""
return [
c.get("instance") for c in self.runtime._nodes.values() if c.get("instance")
]
def get_key(self, key):
"""Alias of ketama hashi method, returns the hash of the given key.
This method is present for hash_ring compatibility.
:param key: the key to look for.
"""
return self.hashi(key)
def get_node(self, key):
"""Returns the node name of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "nodename")
def get_node_hostname(self, key):
"""Returns the hostname of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "hostname")
def get_node_port(self, key):
"""Returns the port of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "port")
def get_node_pos(self, key):
"""Returns the index position of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "pos")
def get_node_weight(self, key):
"""Returns the weight of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "weight")
def get_nodes(self):
"""Returns a list of the names of all the configured nodes."""
return self.runtime._nodes.keys()
def get_points(self):
"""Returns a ketama compatible list of (position, nodename) tuples."""
return [(k, self.runtime._ring[k]) for k in self.runtime._keys]
def get_server(self, key):
"""Returns a ketama compatible (position, nodename) tuple.
:param key: the key to look for.
"""
return self._get(key, "tuple")
def iterate_nodes(self, key, distinct=True):
"""hash_ring compatibility implementation.
Given a string key it returns the nodes as a generator that
can hold the key.
The generator iterates one time through the ring
starting at the correct position.
if `distinct` is set, then the nodes returned will be unique,
i.e. no virtual copies will be returned.
"""
if not self.runtime._ring:
yield None
else:
for node in self.range(key, unique=distinct):
yield node["nodename"]
def print_continuum(self):
"""Prints a ketama compatible continuum report."""
numpoints = len(self.runtime._keys)
if numpoints:
print(f"Numpoints in continuum: {numpoints}")
else:
print("Continuum empty")
for p in self.get_points():
point, node = p
print(f"{node} ({point})")
def range(self, key, size=None, unique=True):
"""Returns a generator of nodes' configuration available
in the continuum/ring.
:param key: the key to look for.
:param size: limit the list to at most this number of nodes.
:param unique: a node may only appear once in the list (default True).
"""
all_nodes = set()
if unique:
size = size or len(self.runtime._nodes)
else:
all_nodes = []
pos = self._get_pos(key)
for key in self.runtime._keys[pos:]:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
else:
for i, key in enumerate(self.runtime._keys):
if i < pos:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
def regenerate(self):
self.runtime._create_ring(self.runtime._nodes.items())
@property
def conf(self):
return self.runtime._nodes
nodes = conf
@property
def distribution(self):
return self.runtime._distribution
@property
def ring(self):
return self.runtime._ring
continuum = ring
@property
def size(self):
return len(self.runtime._ring)
@property
def _ring(self):
return self.runtime._ring
@property
def _nodes(self):
return self.runtime._nodes
@property
def _keys(self):
return self.runtime._keys
| ultrabug/uhashring | uhashring/ring.py | Python | bsd-3-clause | 11,224 | 0.000178 |
# -*- coding: UTF-8 -*-
#
# Copyright © 2003 - 2018 Michal Čihař <[email protected]>
#
# This file is part of Wammu <https://wammu.eu/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
'''
Wammu - Phone manager
Main Wammu application
'''
from __future__ import unicode_literals
from __future__ import print_function
import wx
import sys
import Wammu.Main
import Wammu.Error
from Wammu.Locales import StrConv
from Wammu.Locales import ugettext as _
class WammuApp(wx.App):
'''
Wammu appliction class, it initializes wx and creates main Wammu window.
'''
def OnInit(self):
'''
wxWindows call this method to initialize the application.
'''
self.locale = wx.Locale(wx.LANGUAGE_DEFAULT)
self.SetAppName('Wammu')
vendor = StrConv('Michal Čihař')
if vendor.find('?') != -1:
vendor = 'Michal Čihař'
self.SetVendorName(vendor)
frame = Wammu.Main.WammuFrame(None, -1)
Wammu.Error.HANDLER_PARENT = frame
frame.Show(True)
frame.PostInit(self)
self.SetTopWindow(frame)
# Return a success flag
return True
def Run():
'''
Wrapper to execute Wammu. Installs graphical error handler and launches
WammuApp.
'''
try:
sys.excepthook = Wammu.Error.Handler
except:
print(_('Failed to set exception handler.'))
app = WammuApp()
app.MainLoop()
| gammu/wammu | Wammu/App.py | Python | gpl-3.0 | 2,021 | 0.000993 |
#!/usr/bin/python
## CABS_Server.py
# This is the webserver that is at the center of the CABS system.
# It is asynchronous, and as such the callbacks and function flow can be a bit confusing
# The basic idea is that the HandleAgentFactory and HandleClienFactory make new HandleAgents and Handle Clients
# There is one per connection, and it processes all communication on that connection, without blocking
from twisted.internet.protocol import Factory, Protocol
from twisted.internet import ssl, reactor, endpoints, defer, task
from twisted.protocols.basic import LineOnlyReceiver
from twisted.protocols.policies import TimeoutMixin
from twisted.enterprise import adbapi
from twisted.names import client
from twisted.python import log
import ldap
import sys
import logging
import random
import os
from time import sleep
#global settings dictionary
settings = {}
#global database pool
dbpool = adbapi.ConnectionPool
#global blacklist set
blacklist = set()
#make a logger
logger=logging.getLogger()
random.seed()
## Handles each Agent connection
class HandleAgent(LineOnlyReceiver, TimeoutMixin):
def __init__(self, factory):
self.factory = factory
#timeout after 9 seconds
self.setTimeout(9)
def connectionMade(self):
self.agentAddr = self.transport.getPeer()
logger.debug('Connection made with {0}'.format(self.agentAddr))
self.factory.numConnections = self.factory.numConnections + 1
logger.debug('There are {0} Agent connections'.format(self.factory.numConnections))
def connectionLost(self, reason):
logger.debug('Connection lost with {0} due to {1}'.format(self.agentAddr,reason))
self.factory.numConnections = self.factory.numConnections - 1
logger.debug('There are {0} Agent connections'.format(self.factory.numConnections))
def lineLengthExceeded(self, line):
logger.error('Agent at {0} exceeded the Line Length'.format(self.agentAddr))
self.transport.abortConnection()
def lineReceived(self, line):
#types of reports = status report (sr) and status process report (spr)
report = line.split(':')
if report[0] == 'sr' or report[0] == 'spr':
status = None
if report[0] == 'spr':
status = report.pop(1)
if status.endswith('-1'):
status = status.rstrip('-1') + ' : Unknown'
elif status.endswith('0'):
status = status.rstrip('0') + ' : Not Found'
elif status.endswith('1'):
status = status.rstrip('1') + ' : Not Running'
elif status.endswith('2'):
status = status.rstrip('2') + ' : Not Connected'
elif status.endswith('3'):
status = status.rstrip('3') + ' : Okay'
logger.debug("The process on {0} is {1}".format(report[1], status))
logger.debug('There are {0} users on {1}'.format(len(report)-2, report[1]))
#Mark the machine as active, and update timestamp
querystring = "UPDATE machines SET active = True, last_heartbeat = NOW(), status = %s WHERE machine = %s"
r1 = dbpool.runQuery(querystring, (status, report[1]))
#confirm any users that reserved the machine if they are there, or unconfirm them if they are not
#For now we don't support assigning multiple users per machine, so only one should be on at a time
#but, if we do have multiple, let it be so
#Try to write an entry under the first listed users name, if duplicate machine update the old entry to confirmed
users = ''
if len(report) > 2:
for item in range(2, len(report)):
users += report[item] + ', '
users = users[0:-2]
logger.info("Machine {0} reports user {1}".format(report[1],users))
regexstr = ''
for item in range(2, len(report)):
regexstr += '(^'
regexstr += report[item]
regexstr += '$)|'
regexstr = regexstr[0:-1]
if settings.get("One_Connection") == 'True' or settings.get("One_Connection") == True:
querystring = "INSERT INTO current VALUES (%s, NULL, %s, True, NOW()) ON DUPLICATE KEY UPDATE confirmed = True, connecttime = Now(), user = %s"
r2 = dbpool.runQuery(querystring,(report[2],report[1],users))
querystring = "UPDATE current SET confirmed = True, connecttime = Now() WHERE (machine = %s AND user REGEXP %s)"
r2 = dbpool.runQuery(querystring,(report[1],regexstr))
else:
querystring = "DELETE FROM current WHERE machine = %s"
r2 = dbpool.runQuery(querystring, (report[1],))
## Creates a HandleAgent for each connection
class HandleAgentFactory(Factory):
def __init__(self):
self.numConnections = 0
def buildProtocol(self, addr):
#Blacklist check here
if addr.host in blacklist:
logger.debug("Blacklisted address {0} tried to connect".format(addr.host))
protocol = DoNothing()
protocol.factory = self
return protocol
#limit connection number here
if (settings.get("Max_Agents") is not None and settings.get("Max_Agents") != 'None') and (int(self.numConnections) >= int(settings.get("Max_Agents"))):
logger.warning("Reached maximum Agent connections")
protocol = DoNothing()
protocol.factory = self
return protocol
return HandleAgent(self)
## Handles each Client Connection
class HandleClient(LineOnlyReceiver, TimeoutMixin):
def __init__(self, factory):
self.factory = factory
self.setTimeout(9)#set timeout of 9 seconds
def connectionMade(self):
#if auto then add to blacklist
self.clientAddr = self.transport.getPeer()
logger.debug('Connection made with {0}'.format(self.clientAddr))
self.factory.numConnections = self.factory.numConnections + 1
logger.debug('There are {0} Client connections'.format(self.factory.numConnections))
def connectionLost(self, reason):
logger.debug('Connection lost with {0} due to {1}'.format(self.clientAddr,reason))
self.factory.numConnections = self.factory.numConnections - 1
logger.debug('There are {0} Client connections'.format(self.factory.numConnections))
def lineLengthExceeded(self, line):
logger.error('Client at {0} exceeded the Line Length'.format(self.clientAddr))
self.transport.abortConnection()
def lineReceived(self, line):
#We can receieve 2 types of lines from a client, pool request (pr), machine request(mr)
request = line.split(':')
if request[0].startswith('pr'):
if request[0].endswith('v') and settings.get('RGS_Ver_Min') != 'False':
#check version
print "###################################" + settings.get('RGS_Ver_Min')
logger.debug('User {0} at {1} is using RGS {2}'.format(request[1], self.clientAddr, request[-1]))
if request[-1] < settings.get('RGS_Ver_Min'):
self.transport.write("Err:Sorry, your RGS reciever is out of date, it should be at least {0}".format(settings.get('RGS_Ver_Min')))
self.transport.loseConnection()
logger.info('User {0} requested pool info from {1}'.format(request[1],self.clientAddr))
#authenticate_user
#get pools for user
try:
self.getAuthLDAP(request[1],request[2]).addCallback(self.writePools)
except:
logger.debug("Could not get Pools")
self.transport.write("Err:Could not authenticate to authentication server")
self.transport.loseConnection()
elif request[0] == 'mr':
logger.info('User {0} requested a machine in pool {1} from {2}'.format(request[1],request[3],self.clientAddr))
if (request[3] is not None) and (request[3] != ''):
#check authentication and get machine for the user
try:
deferredtouple = self.getAuthLDAP(request[1],request[2],request[3])
deferredtouple[0].addCallback(self.checkSeat,deferredtouple[1],request[1],request[3])
except:
logger.debug("Could not get a machine")
self.transport.abortConnection()
## called after getAuthLDAP
# Checks the return to see if the user had previously had a machine
# then gives the user a new machine (or their old one) through writeMachine
def checkSeat(self, previousmachine, deferredmachine, user, pool):
#Give user machine
if len(previousmachine) == 0:
deferredmachine.addBoth(self.writeMachine, user, pool, False)
else:
self.writeMachine(previousmachine, user, pool, True)
## Writes a machine to the user, if none are availible, calls getSecondary
def writeMachine(self, machines, user, pool, restored, secondary=False):
if restored:
stringmachine = random.choice(machines)[0]
logger.info("Restored machine {0} in pool {1} to {2}".format(stringmachine, pool, user))
self.transport.write(stringmachine)
self.transport.loseConnection()
elif len(machines) == 0:
#check secondary pools here
if not secondary:
self.getSecondary(pool).addBoth(self.getSecondaryMachines, user, pool)
else:
logger.info("Could not find an open machine in {0} or its secondaries".format(pool))
self.transport.write("Err:Sorry, There are no open machines in {0}.".format(pool))
self.transport.loseConnection()
else:
stringmachine = random.choice(machines)[0]
#write to database to reserve machine
self.reserveMachine(user, pool, stringmachine).addBoth(self.verifyReserve, user, pool, stringmachine)
## Makes sure we can assign that machine, if all went well, we give them the machine
# This is needed so we make sure we have set aside the machine before we give it to the Client
def verifyReserve(self, error, user, pool, machine):
#if we get an error, then we had a collision, so give them another machine
if error:
#don't send anything, client will try again a few times
logger.warning("Tried to reserve machine {0} but was unable".format(machine))
self.transport.write("Err:RETRY")
self.transport.loseConnection()
else:
logger.info("Gave machine {0} in pool {1} to {2}".format(machine, pool, user))
self.transport.write(machine)
self.transport.loseConnection()
## Sends the SQL request to reserve the machine for the user
def reserveMachine(self, user, pool, machine):
opstring = "INSERT INTO current VALUES (%s, %s, %s, False, CURRENT_TIMESTAMP)"
logger.debug("Reserving {0} in pool {1} for {2}".format(machine, pool, user))
return dbpool.runQuery(opstring, (user, pool, machine))
## Sends the list of pools accesable to the user
def writePools(self, listpools):
logger.debug("Sending {0} to {1}".format(listpools, self.clientAddr))
for item in listpools:
self.transport.write(str(item))
self.transport.write("\n")
self.transport.loseConnection()
## Attempts to authenticate the user to the LDAP server via their username and password
# Returns a touple of deffereds to getPreviousSession and getMachine
def getAuthLDAP(self, user, password, requestedpool = None):
auth = True
groups = []
pools = {}
if (settings.get("Auth_Server") is None) or (settings.get("Auth_Server") == 'None'):
#Don't try to authenticate, just give back the list of pools
auth = False
else:
Server = settings.get("Auth_Server")
if Server.startswith("AUTO"):
logger.warning("A user tried to Authenticate before a LDAP server could be found")
raise ReferenceError("No AUTO Authentication Server yet")
if not Server.startswith("ldap"):
Server = "ldap://" + Server
DN = settings.get("Auth_Prefix") + user + settings.get("Auth_Postfix")
Base = settings.get("Auth_Base")
Scope = ldap.SCOPE_SUBTREE
Attrs = [ settings.get("Auth_Grp_Attr") ]
UsrAttr = settings.get("Auth_Usr_Attr")
logger.debug("Attempting to Autenticate to {0} as {1}".format(Server, DN))
try:
if settings.get("Auth_Secure") == 'True':
if settings.get("Auth_Cert") != 'None' and settings.get("Auth_Cert") is not None:
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, settings.get("Auth_Cert"))
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)
else:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
l = ldap.initialize(Server)
l.set_option(ldap.OPT_REFERRALS,0)
l.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
if settings.get("Auth_Secure") == 'True':
try:
l.start_tls_s()
except Exception as e:
logger.error("Could not start a TLS connection to the server at {0} with the certificate {1}".format(Server, settings.get("Auth_Cert")))
logger.debug("error = {0}".format(e))
return
l.bind_s(DN, password, ldap.AUTH_SIMPLE)
r = l.search(Base, Scope, UsrAttr + '=' + user, Attrs)
result = l.result(r,9)
logger.debug("Sucessfully returned {0}".format(result))
try:
l.unbind()
except:
pass
except:
logger.warning("User {0} was unable to authenticate.".format(user))
return
#get user groups
AttrsDict = result[1][0][1]
for key in AttrsDict:
for x in AttrsDict[key]:
#take only the substring after the first =, and before the comma
groups.append(x[x.find('=')+1:x.find(',')])
logger.debug("User {0} belongs to {1}".format(user, groups))
if requestedpool == None:
#pool request, give list of user available
return self.getPools(groups, auth)
else:
#machine request
#returned touple of (deferred from getprevsession, deferred from getmachine)
return (self.getPreviousSession(user,requestedpool), self.getMachine(groups, auth, requestedpool))
## Runs the SQL to see if the user already has a machine checked out
def getPreviousSession(self, user, requestedpool):
#only find a previous machine if it had been in the same pool, and confirmed
querystring = "SELECT machine FROM current WHERE (user = %s AND name = %s AND confirmed = True)"
return dbpool.runQuery(querystring, (user, requestedpool))
## Runs the SQL to get machines the user could use
def getMachine(self, groups, auth, requestedpool):
r = defer.Deferred
if auth and (len(groups) > 0):
regexstring = ""
for group in groups:
regexstring += "(.*"
regexstring += group
regexstring += ".*)|"
regexstring = regexstring[0:-1]
querystring = "SELECT machines.machine FROM machines INNER JOIN pools ON pools.name = machines.name WHERE ((machines.machine NOT IN (SELECT machine FROM current)) AND (active = True) AND (status = 'Okay') AND (pools.name = %s) AND (groups IS NULL OR groups REGEXP %s) AND (machines.deactivated = False) AND (pools.deactivated = False))"
r = dbpool.runQuery(querystring, (requestedpool, regexstring))
else:
querystring = "SELECT machines.machine FROM machines INNER JOIN pools ON pools.name = machines.name WHERE ((machines.machine NOT IN (SELECT machine FROM current)) AND (active = True) AND (status = 'Okay') AND (pools.name = %s) AND (groups IS NULL) AND (machines.deactivated = False) AND (pools.deactivated = False))"
r = dbpool.runQuery(querystring, (requestedpool,))
return r
## finds a pools secondary pools
def getSecondary(self, requestedpool):
#get secondary pools for this pool
querystring = "SELECT secondary FROM pools WHERE name=%s"
return dbpool.runQuery(querystring, (requestedpool,))
## gets machines in a secondary pool
def getSecondaryMachines(self, pools, user, originalpool):
#parse secondary pools and do a machine request
if pools[0][0] is not None:
args = tuple(pools[0][0].split(','))
else:
args = None
querystring = "SELECT machines.machine FROM machines INNER JOIN pools ON pools.name = machines.name WHERE ((machines.machine NOT IN (SELECT machine FROM current)) AND (active = True) AND (machines.deactivated = False) AND (pools.deactivated = False) AND ((pools.name = %s)"
if args is not None:
for pool in args:
querystring += " OR (pools.name = %s)"
querystring += "))"
args = (originalpool,) + (args if args is not None else ())
r = dbpool.runQuery(querystring, args)
r.addBoth(self.writeMachine, user, originalpool, False, True)
## runs the SQL to see what pools the user can access
def getPools(self, groups, auth):
poolset = set()
r = defer.Deferred
if auth and (len(groups) > 0):
regexstring = ""
for group in groups:
regexstring += "(.*"
regexstring += group
regexstring += ".*)|"
regexstring = regexstring[0:-1]
r = dbpool.runQuery("SELECT name, description FROM pools WHERE (deactivated = False AND (groups IS NULL OR groups REGEXP %s))",(regexstring,))
else:
r = dbpool.runQuery("SELECT name, description FROM pools WHERE (groups IS NULL AND deactivated = False)")
return r
#class PauseAndStoreTransport(Protocol):
# def makeConnection(self, transport):
# transport.pauseProducting()
# self.factory.addPausedTransport(self, transport)
## A place to direct blacklisted addresses, or if we have too many connections at once
class DoNothing(Protocol):
def makeConnection(self, transport):
transport.abortConnection()
## creates a HandleClient for each Client connection
class HandleClientFactory(Factory):
def __init__(self):
self.numConnections = 0
self.transports = []
def buildProtocol(self, addr):
#Blacklist check here
if addr.host in blacklist:
logger.debug("Blacklisted address {0} tried to connect".format(addr.host))
protocol = DoNothing()
protocol.factory = self
return protocol
#limit connection number here
if (settings.get("Max_Clients") is not None and settings.get("Max_Clients") != 'None') and (int(self.numConnections) >= int(settings.get("Max_Clients"))):
logger.warning("Reached maximum Client connections")
protocol = DoNothing()
protocol.factory = self
return protocol
return HandleClient(self)
##This might not be needed in this case, I might implement it later if it helps speed.
##For now, let's just let Clients try to reconnect a few times after a few seconds
# def addPausedTransport(originalProtocol, transport):
# self.transports.append((originalProtocol,transport))
#
# def oneConnectionDisconnected(self):
# if (settings.get("Max_Clients") is not None and settings.get("Max_Clients") != 'None') and (int(self.numConnections) < int(settings.get("Max_Clients"))):
# originalProtocol, transport = self.transports.pop(0)
# newProtocol = self.buildProtocol(address)
#
# originalProtocol.dataReceived = newProtocol.dataReceived
# originalProtocol.connectionLost = newProtocol.connectionLost
#
# newProtocol.makeConnection(transport)
# transport.resumeProducing()
## Checks the machines table for inactive machines, and sets them as so
# Called periodically
def checkMachines():
logger.debug("Checking Machines")
#check for inactive machines
if (settings.get("Timeout_time") is not None) or (settings.get("Timeout_time") != 'None'):
querystring = "UPDATE machines SET active = False, status = NULL WHERE last_heartbeat < DATE_SUB(NOW(), INTERVAL %s SECOND)"
r1 = dbpool.runQuery(querystring, (settings.get("Timeout_Time"),))
#check for reserved machines without confirmation
#querystring = "DELETE FROM current WHERE (confirmed = False AND connecttime < DATE_SUB(NOW(), INTERVAL %s SECOND))"
querystring = "DELETE FROM current WHERE (connecttime < DATE_SUB(NOW(), INTERVAL %s SECOND))"
r2 = dbpool.runQuery(querystring, (settings.get("Reserve_Time"),))
## Gets the blacklist from the database, and updates is
def cacheBlacklist():
logger.debug("Cacheing the Blacklist")
querystring = "SELECT blacklist.address FROM blacklist LEFT JOIN whitelist ON blacklist.address = whitelist.address WHERE (banned = True AND whitelist.address IS NULL)"
r = dbpool.runQuery(querystring)
r.addBoth(setBlacklist)
## Sets the blacklist variable, given data from the Database
def setBlacklist(data):
global blacklist
blacklist = set()
logger.debug("Blacklist:")
for item in data:
blacklist.add(item[0])
logger.debug(item[0])
## Chooses a LDAP/Active Directory server
def setAuthServer(results):
results = results[0]
if not results[0].payload.target:
logger.error("Could not find LDAP server from AUTO")
else:
logger.debug("Found AUTO authentication servers : {0}".format(', '.join(str(x.payload.target) for x in results)))
logger.info("Using LDAP server {0}".format(str(results[0].payload.target)))
settings["Auth_Server"] = str(results[0].payload.target)
## Does a DNS service request for an LDAP service
def getAuthServer():
logger.debug("Getting LDAP server from AUTO")
domain = settings.get("Auth_Auto").replace('AUTO', '', 1)
domain = '_ldap._tcp' + domain
resolver = client.Resolver('/etc/resolv.conf')
d = resolver.lookupService(domain)
d.addCallback(setAuthServer)
## Reads the configuration file
def readConfigFile():
#open the .conf file and return the variables as a dictionary
global settings
filelocation = os.path.dirname(os.path.abspath(__file__)) + "/CABS_server.conf"
with open(filelocation, 'r') as f:
for line in f:
line = line.strip()
if (not line.startswith('#')) and line:
try:
(key,val) = line.split(':\t',1)
except:
print "Warning : Check .conf syntax for {0}".format(line)
try:
(key,val) = line.split(None,1)
key = key[:-1]
except:
key = line
key = key.strip()
key = key[:-1]
val = ''
settings[key] = val
f.close()
#insert default settings for all not specified
if not settings.get("Max_Clients"):
settings["Max_Clients"] = '62'
if not settings.get("Max_Agents"):
settings["Max_Agents"] = '120'
if not settings.get("Client_Port"):
settings["Client_Port"] = '18181'
if not settings.get("Agent_Port"):
settings["Agent_Port"] = '18182'
if not settings.get("Use_Agents"):
settings["User_Agents"] = 'True'
if not settings.get("Database_Addr"):
settings["Database_Addr"] = "127.0.0.1"
if not settings.get("Database_Port"):
settings["Database_Port"] = 3306
if not settings.get("Database_Usr"):
settings["Database_Usr"] = "CABS"
if not settings.get("Database_Pass"):
settings["Database_Pass"] = "BACS"
if not settings.get("Database_Name"):
settings["Database_Name"] = "test"
if not settings.get("Reserve_Time"):
settings["Reserve_Time"] = '360'
if not settings.get("Timeout_Time"):
settings["Timeout_Time"] = '540'
if not settings.get("Use_Blacklist"):
settings["Use_Blacklist"] = 'False'
if not settings.get("Auto_Blacklist"):
settings["Auto_Blacklist"] = 'False'
if not settings.get("Auto_Max"):
settings["Auto_Max"] = '300'
if not settings.get("Auth_Server"):
settings["Auth_Server"] = 'None'
if not settings.get("Auth_Prefix"):
settings["Auth_Prefix"] = ''
if not settings.get("Auth_Postfix"):
settings["Auth_Postfix"] = ''
if not settings.get("Auth_Base"):
settings["Auth_Base"] = 'None'
if not settings.get("Auth_Usr_Attr"):
settings["Auth_Usr_Attr"] = 'None'
if not settings.get("Auth_Grp_Attr"):
settings["Auth_Grp_Attr"] = 'None'
if not settings.get("Auth_Secure"):
settings["Auth_Secure"] = 'False'
if not settings.get("Auth_Cert"):
settings["Auth_Cert"] = 'None'
if not settings.get("SSL_Priv_Key"):
settings["SSL_Priv_Key"] = 'None'
if not settings.get("SSL_Cert"):
settings["SSL_Cert"] = 'None'
if not settings.get("RGS_Ver_Min"):
settings["RGS_Ver_Min"] = 'False'
if not settings.get("Verbose_Out"):
settings["Verbose_Out"] = 'False'
if not settings.get("Log_Amount"):
settings["Log_Amount"] = '4'
if not settings.get("Log_Keep"):
settings["Log_Keep"] = '500'
if not settings.get("Log_Time"):
settings["Log_Time"] = '1800'
if not settings.get("One_Connection"):
settings["One_Connection"] = 'True'
## gets additional settings overrides from the Database
def readDatabaseSettings():
#This needs to be a "blocked" call to ensure order, it can't be asynchronous.
querystring = "SELECT * FROM settings"
con = dbpool.connect()
cursor = con.cursor()
cursor.execute(querystring)
data = cursor.fetchall()
global settings
for rule in data:
settings[str(rule[0])] = rule[1]
cursor.close()
dbpool.disconnect(con)
con = dbpool.connect()
cursor = con.cursor()
querystring = "UPDATE settings SET applied = True"
cursor.execute(querystring)
data = cursor.fetchall()
cursor.close()
con.commit()
dbpool.disconnect(con)
## See if we need to restart because Database settings have changed
def checkSettingsChanged():
querystring = "select COUNT(*) FROM settings WHERE (applied = False OR applied IS NULL)"
r = dbpool.runQuery(querystring)
r.addBoth(getSettingsChanged)
## Sees the result from checkSettingsChanged, and acts accordingly
def getSettingsChanged(data):
if int(data[0][0]) > 0:
logger.error("Interface settings had changed... Restarting the Server")
reactor.stop()
#should probably sleep here for a few seconds?
os.execv(__file__, sys.argv)
## A logging.Handler class for writing out log to the Database
class MySQLHandler(logging.Handler):
#This is our logger to the Database, it will handle out logger calls
def __init__(self):
logging.Handler.__init__(self)
def emit(self, record):
querystring = "INSERT INTO log VALUES(NOW(), %s, %s, DEFAULT)"
r = dbpool.runQuery(querystring, (str(record.getMessage()), record.levelname))
## Makes sure the log doesn't grow too big, removes execess
def pruneLog():
querystring = "DELETE FROM log WHERE id <= (SELECT id FROM (SELECT id FROM log ORDER BY id DESC LIMIT 1 OFFSET %s)foo )"
r = dbpool.runQuery(querystring, (int(settings.get("Log_Keep")),))
## Starts the logging
def setLogging():
global logger
loglevel = int(settings.get("Log_Amount"))
if loglevel <= 0:
loglevel = logging.CRITICAL
#For our purposes, CRITICAL means no logging
elif loglevel == 1:
loglevel = logging.ERROR
elif loglevel == 2:
loglevel = logging.WARNING
elif loglevel == 3:
loglevel = logging.INFO
elif loglevel >= 4:
loglevel = logging.DEBUG
logger.setLevel(loglevel)
if settings.get("Verbose_Out") == 'True':
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.addHandler(MySQLHandler())
logger.info("Server Settings:")
for key in settings:
if not key.endswith("Pass"):
logger.info("{0} = {1}".format(key, settings.get(key)))
def main():
#Read the settings
readConfigFile()
#create database pool
global dbpool
dbpool = adbapi.ConnectionPool(
"MySQLdb",
db=settings.get("Database_Name"),
port=int(settings.get("Database_Port")),
user=settings.get("Database_Usr"),
passwd=settings.get("Database_Pass"),
host=settings.get("Database_Addr"),
cp_reconnect=True
)
#get override settings from the database, then start logger
readDatabaseSettings()
#SetLogging
setLogging()
#Create Client Listening Server
if (settings.get("SSL_Priv_Key") is None) or (settings.get("SSL_Priv_Key") == 'None') or (settings.get("SSL_Cert") is None) or (settings.get("SSL_Cert") == 'None'):
serverstring = "tcp:" + str(settings.get("Client_Port"))
endpoints.serverFromString(reactor, serverstring).listen(HandleClientFactory())
else:
serverstring = "ssl:" + str(settings.get("Client_Port")) + ":privateKey=" + settings.get("SSL_Priv_Key") + ":certKey=" + settings.get("SSL_Cert")
endpoints.serverFromString(reactor, serverstring).listen(HandleClientFactory())
logger.warning("Starting Client Server {0}".format(serverstring))
#Set up Agents listening
if (settings.get("Use_Agents") == 'True'):
#Use Agents, so start the listening server
if (settings.get("SSL_Priv_Key") is None) or (settings.get("SSL_Priv_Key") == 'None') or (settings.get("SSL_Cert") is None) or (settings.get("SSL_Cert") == 'None'):
agentserverstring = "tcp:" + str(settings.get("Agent_Port"))
endpoints.serverFromString(reactor, agentserverstring).listen(HandleAgentFactory())
else:
agentserverstring = "ssl:" + str(settings.get("Agent_Port")) + ":privateKey=" + settings.get("SSL_Priv_Key") + ":certKey=" + settings.get("SSL_Cert")
endpoints.serverFromString(reactor, agentserverstring).listen(HandleAgentFactory())
logger.warning("Starting Agent Server {0}".format(agentserverstring))
#Check Machine status every 1/2 Reserve_Time
checkup = task.LoopingCall(checkMachines)
checkup.start( int(settings.get("Reserve_Time"))/2 )
else:
#this to do so things kinda work without agents
querystring = "UPDATE machines SET active = True, status = 'Okay'"
r1 = dbpool.runQuery(querystring)
#resolve LDAP server
if settings.get("Auth_Server").startswith("AUTO"):
settings["Auth_Auto"] = settings.get("Auth_Server")
resolveldap = task.LoopingCall(getAuthServer)
#Get the LDAP server every 2 hours
resolveldap.start(7200)
#Start Blacklist cacheing
if settings.get("Use_Blacklist") == 'True':
getblacklist = task.LoopingCall(cacheBlacklist)
#refresh blacklist every 15 minutes
getblacklist.start(900)
#Start Pruning log
if int(settings.get("Log_Amount")) != 0:
prune = task.LoopingCall(pruneLog)
prune.start(int(settings.get("Log_Time")))
#Check the online settings every 9 minutes, and restart if they changed
checksettingschange = task.LoopingCall(checkSettingsChanged)
checksettingschange.start(540)
#Start Everything
reactor.run()
if __name__ == "__main__":
main()
| jordan9001/CABS | Source/Broker/CABS_server.py | Python | apache-2.0 | 32,754 | 0.009007 |
import PyQtExtras
from PyQt5.QtWidgets import QFrame, QApplication
import sys
def main(args):
app = QApplication([])
main_frame = QFrame()
list_view = PyQtExtras.ListScrollArea(main_frame)
list_view.add_item_by_string('Item 1')
list_view.add_item_by_string('Item 2')
list_view.add_item_by_string('Item 3')
list_view.remove_item_by_string('Item 1')
main_frame.show()
app.exec_()
if __name__ == '__main__':
main(sys.argv) | jhavstad/model_runner | src/ScrollListViewTest.py | Python | gpl-2.0 | 469 | 0.006397 |
# Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets
# in the array which gives the sum of zero.
#
# Note: The solution set must not contain duplicate triplets.
#
# For example, given array S = [-1, 0, 1, 2, -1, -4],
#
# A solution set is:
# [
# [-1, 0, 1],
# [-1, -1, 2]
# ]
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
nums.sort()
for i in xrange(len(nums) - 2):
if i > 0 and nums[i] == nums[i - 1]:
continue
l, r = i + 1, len(nums) - 1
while l < r:
s = nums[i] + nums[l] + nums[r]
if s < 0:
l += 1
elif s > 0:
r -= 1
else:
res.append((nums[i], nums[l], nums[r]))
while l < r and nums[l] == nums[l + 1]:
l += 1
while l < r and nums[r] == nums[r - 1]:
r -= 1
l += 1
r -= 1
return res
# Note:
# Iterating through the list with the pointer i and then we try to find two extra numbers to sum to 0.
# Since the list is ordered, the right pointer will always be higher than the left pointer.
# So if the sum is too large, you can move the right pointer back one. On the other hand, if the sum is
# too small (below 0), then move the middle pointer up one.
#
# To avoid duplicates, we skip further evaluation if pointer i equals pointer i-1
| jigarkb/CTCI | LeetCode/015-M-3Sum.py | Python | mit | 1,646 | 0.00486 |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from devtools_testutils.aio import recorded_by_proxy_async
from azure.ai.formrecognizer._generated.models import AnalyzeResultOperation
from azure.ai.formrecognizer.aio import DocumentAnalysisClient
from azure.ai.formrecognizer import AnalyzeResult
from preparers import FormRecognizerPreparer
from asynctestcase import AsyncFormRecognizerTest
from preparers import GlobalClientPreparer as _GlobalClientPreparer
DocumentAnalysisClientPreparer = functools.partial(_GlobalClientPreparer, DocumentAnalysisClient)
class TestDACAnalyzeDocumentAsync(AsyncFormRecognizerTest):
def teardown(self):
self.sleep(4)
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_stream_transform_pdf(self, client):
with open(self.invoice_pdf, "rb") as fd:
document = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)
extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(extracted_document)
async with client:
poller = await client.begin_analyze_document("prebuilt-document", document, cls=callback)
result = await poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_stream_transform_jpg(self, client):
with open(self.form_jpg, "rb") as fd:
document = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)
extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(extracted_document)
async with client:
poller = await client.begin_analyze_document("prebuilt-document", document, cls=callback)
result = await poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_multipage_transform(self, client):
with open(self.multipage_invoice_pdf, "rb") as fd:
document = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)
extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(extracted_document)
async with client:
poller = await client.begin_analyze_document("prebuilt-document", document, cls=callback)
result = await poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@pytest.mark.live_test_only
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_multipage_table_span_pdf(self, client, **kwargs):
with open(self.multipage_table_pdf, "rb") as fd:
my_file = fd.read()
async with client:
poller = await client.begin_analyze_document("prebuilt-document", my_file)
document = await poller.result()
assert len(document.tables) == 3
assert document.tables[0].row_count == 30
assert document.tables[0].column_count == 5
assert document.tables[1].row_count == 6
assert document.tables[1].column_count == 5
assert document.tables[2].row_count == 23
assert document.tables[2].column_count == 5
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_specify_pages(self, client):
with open(self.multipage_invoice_pdf, "rb") as fd:
document = fd.read()
async with client:
poller = await client.begin_analyze_document("prebuilt-document", document, pages="1")
result = await poller.result()
assert len(result.pages) == 1
poller = await client.begin_analyze_document("prebuilt-document", document, pages="1, 3")
result = await poller.result()
assert len(result.pages) == 2
poller = await client.begin_analyze_document("prebuilt-document", document, pages="1-2")
result = await poller.result()
assert len(result.pages) == 2
poller = await client.begin_analyze_document("prebuilt-document", document, pages="1-2, 3")
result = await poller.result()
assert len(result.pages) == 3
| Azure/azure-sdk-for-python | sdk/formrecognizer/azure-ai-formrecognizer/tests/test_dac_analyze_general_document_async.py | Python | mit | 8,184 | 0.004399 |
#
# $Id: sphinxapi.py 2970 2011-09-23 16:50:22Z klirichek $
#
# Python version of Sphinx searchd client (Python API)
#
# Copyright (c) 2006, Mike Osadnik
# Copyright (c) 2006-2011, Andrew Aksyonoff
# Copyright (c) 2008-2011, Sphinx Technologies Inc
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License. You should have
# received a copy of the GPL license along with this program; if you
# did not, you can find it at http://www.gnu.org/
#
import sys
import select
import socket
import re
from struct import *
# known searchd commands
SEARCHD_COMMAND_SEARCH = 0
SEARCHD_COMMAND_EXCERPT = 1
SEARCHD_COMMAND_UPDATE = 2
SEARCHD_COMMAND_KEYWORDS = 3
SEARCHD_COMMAND_PERSIST = 4
SEARCHD_COMMAND_STATUS = 5
SEARCHD_COMMAND_FLUSHATTRS = 7
# current client-side command implementation versions
VER_COMMAND_SEARCH = 0x119
VER_COMMAND_EXCERPT = 0x104
VER_COMMAND_UPDATE = 0x102
VER_COMMAND_KEYWORDS = 0x100
VER_COMMAND_STATUS = 0x100
VER_COMMAND_FLUSHATTRS = 0x100
# known searchd status codes
SEARCHD_OK = 0
SEARCHD_ERROR = 1
SEARCHD_RETRY = 2
SEARCHD_WARNING = 3
# known match modes
SPH_MATCH_ALL = 0
SPH_MATCH_ANY = 1
SPH_MATCH_PHRASE = 2
SPH_MATCH_BOOLEAN = 3
SPH_MATCH_EXTENDED = 4
SPH_MATCH_FULLSCAN = 5
SPH_MATCH_EXTENDED2 = 6
# known ranking modes (extended2 mode only)
SPH_RANK_PROXIMITY_BM25 = 0 # default mode, phrase proximity major factor and BM25 minor one
SPH_RANK_BM25 = 1 # statistical mode, BM25 ranking only (faster but worse quality)
SPH_RANK_NONE = 2 # no ranking, all matches get a weight of 1
SPH_RANK_WORDCOUNT = 3 # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
SPH_RANK_PROXIMITY = 4
SPH_RANK_MATCHANY = 5
SPH_RANK_FIELDMASK = 6
SPH_RANK_SPH04 = 7
SPH_RANK_EXPR = 8
SPH_RANK_TOTAL = 9
# known sort modes
SPH_SORT_RELEVANCE = 0
SPH_SORT_ATTR_DESC = 1
SPH_SORT_ATTR_ASC = 2
SPH_SORT_TIME_SEGMENTS = 3
SPH_SORT_EXTENDED = 4
SPH_SORT_EXPR = 5
# known filter types
SPH_FILTER_VALUES = 0
SPH_FILTER_RANGE = 1
SPH_FILTER_FLOATRANGE = 2
# known attribute types
SPH_ATTR_NONE = 0
SPH_ATTR_INTEGER = 1
SPH_ATTR_TIMESTAMP = 2
SPH_ATTR_ORDINAL = 3
SPH_ATTR_BOOL = 4
SPH_ATTR_FLOAT = 5
SPH_ATTR_BIGINT = 6
SPH_ATTR_STRING = 7
SPH_ATTR_MULTI = 0X40000001L
SPH_ATTR_MULTI64 = 0X40000002L
SPH_ATTR_TYPES = (SPH_ATTR_NONE,
SPH_ATTR_INTEGER,
SPH_ATTR_TIMESTAMP,
SPH_ATTR_ORDINAL,
SPH_ATTR_BOOL,
SPH_ATTR_FLOAT,
SPH_ATTR_BIGINT,
SPH_ATTR_STRING,
SPH_ATTR_MULTI,
SPH_ATTR_MULTI64)
# known grouping functions
SPH_GROUPBY_DAY = 0
SPH_GROUPBY_WEEK = 1
SPH_GROUPBY_MONTH = 2
SPH_GROUPBY_YEAR = 3
SPH_GROUPBY_ATTR = 4
SPH_GROUPBY_ATTRPAIR = 5
class SphinxClient:
def __init__ (self):
"""
Create a new client object, and fill defaults.
"""
self._host = 'localhost' # searchd host (default is "localhost")
self._port = 9312 # searchd port (default is 9312)
self._path = None # searchd unix-domain socket path
self._socket = None
self._offset = 0 # how much records to seek from result-set start (default is 0)
self._limit = 20 # how much records to return from result-set starting at offset (default is 20)
self._mode = SPH_MATCH_ALL # query matching mode (default is SPH_MATCH_ALL)
self._weights = [] # per-field weights (default is 1 for all fields)
self._sort = SPH_SORT_RELEVANCE # match sorting mode (default is SPH_SORT_RELEVANCE)
self._sortby = '' # attribute to sort by (defualt is "")
self._min_id = 0 # min ID to match (default is 0)
self._max_id = 0 # max ID to match (default is UINT_MAX)
self._filters = [] # search filters
self._groupby = '' # group-by attribute name
self._groupfunc = SPH_GROUPBY_DAY # group-by function (to pre-process group-by attribute value with)
self._groupsort = '@group desc' # group-by sorting clause (to sort groups in result set with)
self._groupdistinct = '' # group-by count-distinct attribute
self._maxmatches = 1000 # max matches to retrieve
self._cutoff = 0 # cutoff to stop searching at
self._retrycount = 0 # distributed retry count
self._retrydelay = 0 # distributed retry delay
self._anchor = {} # geographical anchor point
self._indexweights = {} # per-index weights
self._ranker = SPH_RANK_PROXIMITY_BM25 # ranking mode
self._rankexpr = '' # ranking expression for SPH_RANK_EXPR
self._maxquerytime = 0 # max query time, milliseconds (default is 0, do not limit)
self._timeout = 1.0 # connection timeout
self._fieldweights = {} # per-field-name weights
self._overrides = {} # per-query attribute values overrides
self._select = '*' # select-list (attributes or expressions, with optional aliases)
self._error = '' # last error message
self._warning = '' # last warning message
self._reqs = [] # requests array for multi-query
def __del__ (self):
if self._socket:
self._socket.close()
def GetLastError (self):
"""
Get last error message (string).
"""
return self._error
def GetLastWarning (self):
"""
Get last warning message (string).
"""
return self._warning
def SetServer (self, host, port = None):
"""
Set searchd server host and port.
"""
assert(isinstance(host, str))
if host.startswith('/'):
self._path = host
return
elif host.startswith('unix://'):
self._path = host[7:]
return
assert(isinstance(port, int))
self._host = host
self._port = port
self._path = None
def SetConnectTimeout ( self, timeout ):
"""
Set connection timeout ( float second )
"""
assert (isinstance(timeout, float))
# set timeout to 0 make connaection non-blocking that is wrong so timeout got clipped to reasonable minimum
self._timeout = max ( 0.001, timeout )
def _Connect (self):
"""
INTERNAL METHOD, DO NOT CALL. Connects to searchd server.
"""
if self._socket:
# we have a socket, but is it still alive?
sr, sw, _ = select.select ( [self._socket], [self._socket], [], 0 )
# this is how alive socket should look
if len(sr)==0 and len(sw)==1:
return self._socket
# oops, looks like it was closed, lets reopen
self._socket.close()
self._socket = None
try:
if self._path:
af = socket.AF_UNIX
addr = self._path
desc = self._path
else:
af = socket.AF_INET
addr = ( self._host, self._port )
desc = '%s;%s' % addr
sock = socket.socket ( af, socket.SOCK_STREAM )
sock.settimeout ( self._timeout )
sock.connect ( addr )
except socket.error, msg:
if sock:
sock.close()
self._error = 'connection to %s failed (%s)' % ( desc, msg )
return
v = unpack('>L', sock.recv(4))
if v<1:
sock.close()
self._error = 'expected searchd protocol version, got %s' % v
return
# all ok, send my version
sock.send(pack('>L', 1))
return sock
def _GetResponse (self, sock, client_ver):
"""
INTERNAL METHOD, DO NOT CALL. Gets and checks response packet from searchd server.
"""
(status, ver, length) = unpack('>2HL', sock.recv(8))
response = ''
left = length
while left>0:
chunk = sock.recv(left)
if chunk:
response += chunk
left -= len(chunk)
else:
break
if not self._socket:
sock.close()
# check response
read = len(response)
if not response or read!=length:
if length:
self._error = 'failed to read searchd response (status=%s, ver=%s, len=%s, read=%s)' \
% (status, ver, length, read)
else:
self._error = 'received zero-sized searchd response'
return None
# check status
if status==SEARCHD_WARNING:
wend = 4 + unpack ( '>L', response[0:4] )[0]
self._warning = response[4:wend]
return response[wend:]
if status==SEARCHD_ERROR:
self._error = 'searchd error: '+response[4:]
return None
if status==SEARCHD_RETRY:
self._error = 'temporary searchd error: '+response[4:]
return None
if status!=SEARCHD_OK:
self._error = 'unknown status code %d' % status
return None
# check version
if ver<client_ver:
self._warning = 'searchd command v.%d.%d older than client\'s v.%d.%d, some options might not work' \
% (ver>>8, ver&0xff, client_ver>>8, client_ver&0xff)
return response
def SetLimits (self, offset, limit, maxmatches=0, cutoff=0):
"""
Set offset and count into result set, and optionally set max-matches and cutoff limits.
"""
assert ( type(offset) in [int,long] and 0<=offset<16777216 )
assert ( type(limit) in [int,long] and 0<limit<16777216 )
assert(maxmatches>=0)
self._offset = offset
self._limit = limit
if maxmatches>0:
self._maxmatches = maxmatches
if cutoff>=0:
self._cutoff = cutoff
def SetMaxQueryTime (self, maxquerytime):
"""
Set maximum query time, in milliseconds, per-index. 0 means 'do not limit'.
"""
assert(isinstance(maxquerytime,int) and maxquerytime>0)
self._maxquerytime = maxquerytime
def SetMatchMode (self, mode):
"""
Set matching mode.
"""
assert(mode in [SPH_MATCH_ALL, SPH_MATCH_ANY, SPH_MATCH_PHRASE, SPH_MATCH_BOOLEAN, SPH_MATCH_EXTENDED, SPH_MATCH_FULLSCAN, SPH_MATCH_EXTENDED2])
self._mode = mode
def SetRankingMode ( self, ranker, rankexpr='' ):
"""
Set ranking mode.
"""
assert(ranker>=0 and ranker<SPH_RANK_TOTAL)
self._ranker = ranker
self._rankexpr = rankexpr
def SetSortMode ( self, mode, clause='' ):
"""
Set sorting mode.
"""
assert ( mode in [SPH_SORT_RELEVANCE, SPH_SORT_ATTR_DESC, SPH_SORT_ATTR_ASC, SPH_SORT_TIME_SEGMENTS, SPH_SORT_EXTENDED, SPH_SORT_EXPR] )
assert ( isinstance ( clause, str ) )
self._sort = mode
self._sortby = clause
def SetWeights (self, weights):
"""
Set per-field weights.
WARNING, DEPRECATED; do not use it! use SetFieldWeights() instead
"""
assert(isinstance(weights, list))
for w in weights:
AssertUInt32 ( w )
self._weights = weights
def SetFieldWeights (self, weights):
"""
Bind per-field weights by name; expects (name,field_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in weights.items():
assert(isinstance(key,str))
AssertUInt32 ( val )
self._fieldweights = weights
def SetIndexWeights (self, weights):
"""
Bind per-index weights by name; expects (name,index_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in weights.items():
assert(isinstance(key,str))
AssertUInt32(val)
self._indexweights = weights
def SetIDRange (self, minid, maxid):
"""
Set IDs range to match.
Only match records if document ID is beetwen $min and $max (inclusive).
"""
assert(isinstance(minid, (int, long)))
assert(isinstance(maxid, (int, long)))
assert(minid<=maxid)
self._min_id = minid
self._max_id = maxid
def SetFilter ( self, attribute, values, exclude=0 ):
"""
Set values set filter.
Only match records where 'attribute' value is in given 'values' set.
"""
assert(isinstance(attribute, str))
assert iter(values)
for value in values:
AssertInt32 ( value )
self._filters.append ( { 'type':SPH_FILTER_VALUES, 'attr':attribute, 'exclude':exclude, 'values':values } )
def SetFilterRange (self, attribute, min_, max_, exclude=0 ):
"""
Set range filter.
Only match records if 'attribute' value is beetwen 'min_' and 'max_' (inclusive).
"""
assert(isinstance(attribute, str))
AssertInt32(min_)
AssertInt32(max_)
assert(min_<=max_)
self._filters.append ( { 'type':SPH_FILTER_RANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_ } )
def SetFilterFloatRange (self, attribute, min_, max_, exclude=0 ):
assert(isinstance(attribute,str))
assert(isinstance(min_,float))
assert(isinstance(max_,float))
assert(min_ <= max_)
self._filters.append ( {'type':SPH_FILTER_FLOATRANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_} )
def SetGeoAnchor (self, attrlat, attrlong, latitude, longitude):
assert(isinstance(attrlat,str))
assert(isinstance(attrlong,str))
assert(isinstance(latitude,float))
assert(isinstance(longitude,float))
self._anchor['attrlat'] = attrlat
self._anchor['attrlong'] = attrlong
self._anchor['lat'] = latitude
self._anchor['long'] = longitude
def SetGroupBy ( self, attribute, func, groupsort='@group desc' ):
"""
Set grouping attribute and function.
"""
assert(isinstance(attribute, str))
assert(func in [SPH_GROUPBY_DAY, SPH_GROUPBY_WEEK, SPH_GROUPBY_MONTH, SPH_GROUPBY_YEAR, SPH_GROUPBY_ATTR, SPH_GROUPBY_ATTRPAIR] )
assert(isinstance(groupsort, str))
self._groupby = attribute
self._groupfunc = func
self._groupsort = groupsort
def SetGroupDistinct (self, attribute):
assert(isinstance(attribute,str))
self._groupdistinct = attribute
def SetRetries (self, count, delay=0):
assert(isinstance(count,int) and count>=0)
assert(isinstance(delay,int) and delay>=0)
self._retrycount = count
self._retrydelay = delay
def SetOverride (self, name, type, values):
assert(isinstance(name, str))
assert(type in SPH_ATTR_TYPES)
assert(isinstance(values, dict))
self._overrides[name] = {'name': name, 'type': type, 'values': values}
def SetSelect (self, select):
assert(isinstance(select, str))
self._select = select
def ResetOverrides (self):
self._overrides = {}
def ResetFilters (self):
"""
Clear all filters (for multi-queries).
"""
self._filters = []
self._anchor = {}
def ResetGroupBy (self):
"""
Clear groupby settings (for multi-queries).
"""
self._groupby = ''
self._groupfunc = SPH_GROUPBY_DAY
self._groupsort = '@group desc'
self._groupdistinct = ''
def Query (self, query, index='*', comment=''):
"""
Connect to searchd server and run given search query.
Returns None on failure; result set hash on success (see documentation for details).
"""
assert(len(self._reqs)==0)
self.AddQuery(query,index,comment)
results = self.RunQueries()
self._reqs = [] # we won't re-run erroneous batch
if not results or len(results)==0:
return None
self._error = results[0]['error']
self._warning = results[0]['warning']
if results[0]['status'] == SEARCHD_ERROR:
return None
return results[0]
def AddQuery (self, query, index='*', comment=''):
"""
Add query to batch.
"""
# build request
req = []
req.append(pack('>4L', self._offset, self._limit, self._mode, self._ranker))
if self._ranker==SPH_RANK_EXPR:
req.append(pack('>L', len(self._rankexpr)))
req.append(self._rankexpr)
req.append(pack('>L', self._sort))
req.append(pack('>L', len(self._sortby)))
req.append(self._sortby)
if isinstance(query,unicode):
query = query.encode('utf-8')
assert(isinstance(query,str))
req.append(pack('>L', len(query)))
req.append(query)
req.append(pack('>L', len(self._weights)))
for w in self._weights:
req.append(pack('>L', w))
req.append(pack('>L', len(index)))
req.append(index)
req.append(pack('>L',1)) # id64 range marker
req.append(pack('>Q', self._min_id))
req.append(pack('>Q', self._max_id))
# filters
req.append ( pack ( '>L', len(self._filters) ) )
for f in self._filters:
req.append ( pack ( '>L', len(f['attr'])) + f['attr'])
filtertype = f['type']
req.append ( pack ( '>L', filtertype))
if filtertype == SPH_FILTER_VALUES:
req.append ( pack ('>L', len(f['values'])))
for val in f['values']:
req.append ( pack ('>q', val))
elif filtertype == SPH_FILTER_RANGE:
req.append ( pack ('>2q', f['min'], f['max']))
elif filtertype == SPH_FILTER_FLOATRANGE:
req.append ( pack ('>2f', f['min'], f['max']))
req.append ( pack ( '>L', f['exclude'] ) )
# group-by, max-matches, group-sort
req.append ( pack ( '>2L', self._groupfunc, len(self._groupby) ) )
req.append ( self._groupby )
req.append ( pack ( '>2L', self._maxmatches, len(self._groupsort) ) )
req.append ( self._groupsort )
req.append ( pack ( '>LLL', self._cutoff, self._retrycount, self._retrydelay))
req.append ( pack ( '>L', len(self._groupdistinct)))
req.append ( self._groupdistinct)
# anchor point
if len(self._anchor) == 0:
req.append ( pack ('>L', 0))
else:
attrlat, attrlong = self._anchor['attrlat'], self._anchor['attrlong']
latitude, longitude = self._anchor['lat'], self._anchor['long']
req.append ( pack ('>L', 1))
req.append ( pack ('>L', len(attrlat)) + attrlat)
req.append ( pack ('>L', len(attrlong)) + attrlong)
req.append ( pack ('>f', latitude) + pack ('>f', longitude))
# per-index weights
req.append ( pack ('>L',len(self._indexweights)))
for indx,weight in self._indexweights.items():
req.append ( pack ('>L',len(indx)) + indx + pack ('>L',weight))
# max query time
req.append ( pack ('>L', self._maxquerytime) )
# per-field weights
req.append ( pack ('>L',len(self._fieldweights) ) )
for field,weight in self._fieldweights.items():
req.append ( pack ('>L',len(field)) + field + pack ('>L',weight) )
# comment
req.append ( pack('>L',len(comment)) + comment )
# attribute overrides
req.append ( pack('>L', len(self._overrides)) )
for v in self._overrides.values():
req.extend ( ( pack('>L', len(v['name'])), v['name'] ) )
req.append ( pack('>LL', v['type'], len(v['values'])) )
for id, value in v['values'].iteritems():
req.append ( pack('>Q', id) )
if v['type'] == SPH_ATTR_FLOAT:
req.append ( pack('>f', value) )
elif v['type'] == SPH_ATTR_BIGINT:
req.append ( pack('>q', value) )
else:
req.append ( pack('>l', value) )
# select-list
req.append ( pack('>L', len(self._select)) )
req.append ( self._select )
# send query, get response
req = ''.join(req)
self._reqs.append(req)
return
def RunQueries (self):
"""
Run queries batch.
Returns None on network IO failure; or an array of result set hashes on success.
"""
if len(self._reqs)==0:
self._error = 'no queries defined, issue AddQuery() first'
return None
sock = self._Connect()
if not sock:
return None
req = ''.join(self._reqs)
length = len(req)+8
req = pack('>HHLLL', SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, length, 0, len(self._reqs))+req
sock.send(req)
response = self._GetResponse(sock, VER_COMMAND_SEARCH)
if not response:
return None
nreqs = len(self._reqs)
# parse response
max_ = len(response)
p = 0
results = []
for i in range(0,nreqs,1):
result = {}
results.append(result)
result['error'] = ''
result['warning'] = ''
status = unpack('>L', response[p:p+4])[0]
p += 4
result['status'] = status
if status != SEARCHD_OK:
length = unpack('>L', response[p:p+4])[0]
p += 4
message = response[p:p+length]
p += length
if status == SEARCHD_WARNING:
result['warning'] = message
else:
result['error'] = message
continue
# read schema
fields = []
attrs = []
nfields = unpack('>L', response[p:p+4])[0]
p += 4
while nfields>0 and p<max_:
nfields -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
fields.append(response[p:p+length])
p += length
result['fields'] = fields
nattrs = unpack('>L', response[p:p+4])[0]
p += 4
while nattrs>0 and p<max_:
nattrs -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
attr = response[p:p+length]
p += length
type_ = unpack('>L', response[p:p+4])[0]
p += 4
attrs.append([attr,type_])
result['attrs'] = attrs
# read match count
count = unpack('>L', response[p:p+4])[0]
p += 4
id64 = unpack('>L', response[p:p+4])[0]
p += 4
# read matches
result['matches'] = []
while count>0 and p<max_:
count -= 1
if id64:
doc, weight = unpack('>QL', response[p:p+12])
p += 12
else:
doc, weight = unpack('>2L', response[p:p+8])
p += 8
match = { 'id':doc, 'weight':weight, 'attrs':{} }
for i in range(len(attrs)):
if attrs[i][1] == SPH_ATTR_FLOAT:
match['attrs'][attrs[i][0]] = unpack('>f', response[p:p+4])[0]
elif attrs[i][1] == SPH_ATTR_BIGINT:
match['attrs'][attrs[i][0]] = unpack('>q', response[p:p+8])[0]
p += 4
elif attrs[i][1] == SPH_ATTR_STRING:
slen = unpack('>L', response[p:p+4])[0]
p += 4
match['attrs'][attrs[i][0]] = ''
if slen>0:
match['attrs'][attrs[i][0]] = response[p:p+slen]
p += slen-4
elif attrs[i][1] == SPH_ATTR_MULTI:
match['attrs'][attrs[i][0]] = []
nvals = unpack('>L', response[p:p+4])[0]
p += 4
for n in range(0,nvals,1):
match['attrs'][attrs[i][0]].append(unpack('>L', response[p:p+4])[0])
p += 4
p -= 4
elif attrs[i][1] == SPH_ATTR_MULTI64:
match['attrs'][attrs[i][0]] = []
nvals = unpack('>L', response[p:p+4])[0]
nvals = nvals/2
p += 4
for n in range(0,nvals,1):
match['attrs'][attrs[i][0]].append(unpack('>q', response[p:p+8])[0])
p += 8
p -= 4
else:
match['attrs'][attrs[i][0]] = unpack('>L', response[p:p+4])[0]
p += 4
result['matches'].append ( match )
result['total'], result['total_found'], result['time'], words = unpack('>4L', response[p:p+16])
result['time'] = '%.3f' % (result['time']/1000.0)
p += 16
result['words'] = []
while words>0:
words -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
word = response[p:p+length]
p += length
docs, hits = unpack('>2L', response[p:p+8])
p += 8
result['words'].append({'word':word, 'docs':docs, 'hits':hits})
self._reqs = []
return results
def BuildExcerpts (self, docs, index, words, opts=None):
"""
Connect to searchd server and generate exceprts from given documents.
"""
if not opts:
opts = {}
if isinstance(words,unicode):
words = words.encode('utf-8')
assert(isinstance(docs, list))
assert(isinstance(index, str))
assert(isinstance(words, str))
assert(isinstance(opts, dict))
sock = self._Connect()
if not sock:
return None
# fixup options
opts.setdefault('before_match', '<b>')
opts.setdefault('after_match', '</b>')
opts.setdefault('chunk_separator', ' ... ')
opts.setdefault('html_strip_mode', 'index')
opts.setdefault('limit', 256)
opts.setdefault('limit_passages', 0)
opts.setdefault('limit_words', 0)
opts.setdefault('around', 5)
opts.setdefault('start_passage_id', 1)
opts.setdefault('passage_boundary', 'none')
# build request
# v.1.0 req
flags = 1 # (remove spaces)
if opts.get('exact_phrase'): flags |= 2
if opts.get('single_passage'): flags |= 4
if opts.get('use_boundaries'): flags |= 8
if opts.get('weight_order'): flags |= 16
if opts.get('query_mode'): flags |= 32
if opts.get('force_all_words'): flags |= 64
if opts.get('load_files'): flags |= 128
if opts.get('allow_empty'): flags |= 256
if opts.get('emit_zones'): flags |= 512
if opts.get('load_files_scattered'): flags |= 1024
# mode=0, flags
req = [pack('>2L', 0, flags)]
# req index
req.append(pack('>L', len(index)))
req.append(index)
# req words
req.append(pack('>L', len(words)))
req.append(words)
# options
req.append(pack('>L', len(opts['before_match'])))
req.append(opts['before_match'])
req.append(pack('>L', len(opts['after_match'])))
req.append(opts['after_match'])
req.append(pack('>L', len(opts['chunk_separator'])))
req.append(opts['chunk_separator'])
req.append(pack('>L', int(opts['limit'])))
req.append(pack('>L', int(opts['around'])))
req.append(pack('>L', int(opts['limit_passages'])))
req.append(pack('>L', int(opts['limit_words'])))
req.append(pack('>L', int(opts['start_passage_id'])))
req.append(pack('>L', len(opts['html_strip_mode'])))
req.append((opts['html_strip_mode']))
req.append(pack('>L', len(opts['passage_boundary'])))
req.append((opts['passage_boundary']))
# documents
req.append(pack('>L', len(docs)))
for doc in docs:
if isinstance(doc,unicode):
doc = doc.encode('utf-8')
assert(isinstance(doc, str))
req.append(pack('>L', len(doc)))
req.append(doc)
req = ''.join(req)
# send query, get response
length = len(req)
# add header
req = pack('>2HL', SEARCHD_COMMAND_EXCERPT, VER_COMMAND_EXCERPT, length)+req
wrote = sock.send(req)
response = self._GetResponse(sock, VER_COMMAND_EXCERPT )
if not response:
return []
# parse response
pos = 0
res = []
rlen = len(response)
for i in range(len(docs)):
length = unpack('>L', response[pos:pos+4])[0]
pos += 4
if pos+length > rlen:
self._error = 'incomplete reply'
return []
res.append(response[pos:pos+length])
pos += length
return res
def UpdateAttributes ( self, index, attrs, values, mva=False ):
"""
Update given attribute values on given documents in given indexes.
Returns amount of updated documents (0 or more) on success, or -1 on failure.
'attrs' must be a list of strings.
'values' must be a dict with int key (document ID) and list of int values (new attribute values).
optional boolean parameter 'mva' points that there is update of MVA attributes.
In this case the 'values' must be a dict with int key (document ID) and list of lists of int values
(new MVA attribute values).
Example:
res = cl.UpdateAttributes ( 'test1', [ 'group_id', 'date_added' ], { 2:[123,1000000000], 4:[456,1234567890] } )
"""
assert ( isinstance ( index, str ) )
assert ( isinstance ( attrs, list ) )
assert ( isinstance ( values, dict ) )
for attr in attrs:
assert ( isinstance ( attr, str ) )
for docid, entry in values.items():
AssertUInt32(docid)
assert ( isinstance ( entry, list ) )
assert ( len(attrs)==len(entry) )
for val in entry:
if mva:
assert ( isinstance ( val, list ) )
for vals in val:
AssertInt32(vals)
else:
AssertInt32(val)
# build request
req = [ pack('>L',len(index)), index ]
req.append ( pack('>L',len(attrs)) )
mva_attr = 0
if mva: mva_attr = 1
for attr in attrs:
req.append ( pack('>L',len(attr)) + attr )
req.append ( pack('>L', mva_attr ) )
req.append ( pack('>L',len(values)) )
for docid, entry in values.items():
req.append ( pack('>Q',docid) )
for val in entry:
val_len = val
if mva: val_len = len ( val )
req.append ( pack('>L',val_len ) )
if mva:
for vals in val:
req.append ( pack ('>L',vals) )
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = ''.join(req)
length = len(req)
req = pack ( '>2HL', SEARCHD_COMMAND_UPDATE, VER_COMMAND_UPDATE, length ) + req
wrote = sock.send ( req )
response = self._GetResponse ( sock, VER_COMMAND_UPDATE )
if not response:
return -1
# parse response
updated = unpack ( '>L', response[0:4] )[0]
return updated
def BuildKeywords ( self, query, index, hits ):
"""
Connect to searchd server, and generate keywords list for a given query.
Returns None on failure, or a list of keywords on success.
"""
assert ( isinstance ( query, str ) )
assert ( isinstance ( index, str ) )
assert ( isinstance ( hits, int ) )
# build request
req = [ pack ( '>L', len(query) ) + query ]
req.append ( pack ( '>L', len(index) ) + index )
req.append ( pack ( '>L', hits ) )
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = ''.join(req)
length = len(req)
req = pack ( '>2HL', SEARCHD_COMMAND_KEYWORDS, VER_COMMAND_KEYWORDS, length ) + req
wrote = sock.send ( req )
response = self._GetResponse ( sock, VER_COMMAND_KEYWORDS )
if not response:
return None
# parse response
res = []
nwords = unpack ( '>L', response[0:4] )[0]
p = 4
max_ = len(response)
while nwords>0 and p<max_:
nwords -= 1
length = unpack ( '>L', response[p:p+4] )[0]
p += 4
tokenized = response[p:p+length]
p += length
length = unpack ( '>L', response[p:p+4] )[0]
p += 4
normalized = response[p:p+length]
p += length
entry = { 'tokenized':tokenized, 'normalized':normalized }
if hits:
entry['docs'], entry['hits'] = unpack ( '>2L', response[p:p+8] )
p += 8
res.append ( entry )
if nwords>0 or p>max_:
self._error = 'incomplete reply'
return None
return res
def Status ( self ):
"""
Get the status
"""
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = pack ( '>2HLL', SEARCHD_COMMAND_STATUS, VER_COMMAND_STATUS, 4, 1 )
wrote = sock.send ( req )
response = self._GetResponse ( sock, VER_COMMAND_STATUS )
if not response:
return None
# parse response
res = []
p = 8
max_ = len(response)
while p<max_:
length = unpack ( '>L', response[p:p+4] )[0]
k = response[p+4:p+length+4]
p += 4+length
length = unpack ( '>L', response[p:p+4] )[0]
v = response[p+4:p+length+4]
p += 4+length
res += [[k, v]]
return res
### persistent connections
def Open(self):
if self._socket:
self._error = 'already connected'
return None
server = self._Connect()
if not server:
return None
# command, command version = 0, body length = 4, body = 1
request = pack ( '>hhII', SEARCHD_COMMAND_PERSIST, 0, 4, 1 )
server.send ( request )
self._socket = server
return True
def Close(self):
if not self._socket:
self._error = 'not connected'
return
self._socket.close()
self._socket = None
def EscapeString(self, string):
return re.sub(r"([=\(\)|\-!@~\"&/\\\^\$\=])", r"\\\1", string)
def FlushAttributes(self):
sock = self._Connect()
if not sock:
return -1
request = pack ( '>hhI', SEARCHD_COMMAND_FLUSHATTRS, VER_COMMAND_FLUSHATTRS, 0 ) # cmd, ver, bodylen
sock.send ( request )
response = self._GetResponse ( sock, VER_COMMAND_FLUSHATTRS )
if not response or len(response)!=4:
self._error = 'unexpected response length'
return -1
tag = unpack ( '>L', response[0:4] )[0]
return tag
def AssertInt32 ( value ):
assert(isinstance(value, (int, long)))
assert(value>=-2**32-1 and value<=2**32-1)
def AssertUInt32 ( value ):
assert(isinstance(value, (int, long)))
assert(value>=0 and value<=2**32-1)
#
# $Id: sphinxapi.py 2970 2011-09-23 16:50:22Z klirichek $
#
| apahomov/django-sphinx | djangosphinx/apis/api281/__init__.py | Python | bsd-3-clause | 30,427 | 0.052059 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^groups/$', views.groups, name='groups'),
url(r'^sitemanager/$', views.sitemanager, name='sitemanager'),
url(r'^(?P<user_id>[0-9]+)/user_view/$', views.user_view, name='user_view'),
url(r'^(?P<user_id>[0-9]+)/deactivate/$', views.deactivate, name='deactivate'),
url(r'^(?P<user_id>[0-9]+)/activate/$', views.activate, name='activate'),
url(r'^(?P<user_id>[0-9]+)/makeSiteManager/$', views.makeSiteManager, name='makeSiteManager'),
url(r'^(?P<user_id>[0-9]+)/unmakeSiteManager/$', views.unmakeSiteManager, name='unmakeSiteManager'),
url(r'^groups/sitemanager/$', views.groupsSM, name='groupsSM'),
]
| j-windsor/cs3240-f15-team21-v2 | accounts/urls.py | Python | mit | 876 | 0.004566 |
from flask import render_template, jsonify, request
from jsonrpclib import jsonrpc
import base64
import urllib
from maraschino import app, logger
from maraschino.tools import *
def nzbget_http():
if get_setting_value('nzbget_https') == '1':
return 'https://'
else:
return 'http://'
def nzbget_auth():
return 'nzbget:%s@' % (get_setting_value('nzbget_password'))
def nzbget_url():
return '%s%s%s:%s' % (nzbget_http(), \
nzbget_auth(), \
get_setting_value('nzbget_host'), \
get_setting_value('nzbget_port'))
def nzbget_exception(e):
logger.log('NZBGet :: EXCEPTION -- %s' % e, 'DEBUG')
@app.route('/xhr/nzbget/')
@requires_auth
def xhr_nzbget():
downloads = status = nzbget = None
logger.log('NZBGet :: Getting download list', 'INFO')
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
status = nzbget.status()
downloads = nzbget.listgroups()
except Exception as e:
nzbget_exception(e)
logger.log('NZBGet :: Getting download list (DONE)', 'INFO')
return render_template('nzbget/queue.html',
nzbget=status,
downloads=downloads,
)
@app.route('/xhr/nzbget/queue/<action>/')
@requires_auth
def queue_action_nzbget(action):
status = False
logger.log('NZBGet :: Queue action: %s' % action, 'INFO')
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
if 'resume' in action:
status = nzbget.resume()
elif 'pause' in action:
status = nzbget.pause()
except Exception as e:
nzbget_exception(e)
return jsonify({'success': status})
@app.route('/xhr/nzbget/queue/add/', methods=['POST'])
@requires_auth
def queue_add_nzbget():
status = False
if len(nzb):
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
nzb = request.form['url']
nzb = urllib.urlopen(nzb).read()
status = nzbget.append('test', '', False, base64.encode(nzb))
except Exception as e:
nzbget_exception(e)
return jsonify({'success': status})
@app.route('/xhr/nzbget/individual/<int:id>/<action>/')
@requires_auth
def individual_action_nzbget(id, action):
status = False
logger.log('NZBGet :: Item %s action: %s' % (id, action), 'INFO')
if 'resume' in action:
action = 'GroupResume'
elif 'pause' in action:
action = 'GroupPause'
elif 'delete' in action:
action = 'GroupDelete'
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
status = nzbget.editqueue(action, 0, '', id)
except Exception as e:
nzbget_exception(e)
return jsonify({'success': status, 'id': id, 'action': action})
@app.route('/xhr/nzbget/set_speed/<int:speed>/')
@requires_auth
def set_speed_nzbget(speed):
logger.log('NZBGet :: Setting speed limit: %s' % speed, 'INFO')
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
status = nzbget.rate(speed)
except Exception as e:
nzbget_exception(e)
return jsonify({'success': status})
| hephaestus9/Ironworks | modules/plugins/nzbget.py | Python | mit | 3,148 | 0.002224 |
import bottle
from cork import Cork
from utils import skeleton
aaa = Cork('users', email_sender='[email protected]',
smtp_url='smtp://smtp.magnet.ie')
authorize = aaa.make_auth_decorator(fail_redirect='/login', role="user")
def postd():
return bottle.request.forms
def post_get(name, default=''):
return bottle.request.POST.get(name, default).strip()
@bottle.post('/login')
def login():
"""Authenticate users"""
username = post_get('username')
password = post_get('password')
aaa.login(username, password, success_redirect='/', fail_redirect='/login')
@bottle.route('/logout')
def logout():
aaa.logout(success_redirect='/login')
@bottle.post('/register')
def register():
"""Send out registration email"""
aaa.register(post_get('username'), post_get('password'),
post_get('email_address'))
return 'Please check your mailbox.'
@bottle.route('/validate_registration/:registration_code')
def validate_registration(registration_code):
"""Validate registration, create user account"""
aaa.validate_registration(registration_code)
return 'Thanks. <a href="/login">Go to login</a>'
@bottle.post('/change_password')
def change_password():
"""Change password"""
aaa.reset_password(post_get('reset_code'), post_get('password'))
return 'Thanks. <a href="/login">Go to login</a>'
@bottle.post('/create_user')
def create_user():
try:
aaa.create_user(postd().username, postd().role, postd().password)
return dict(ok=True, msg='')
except Exception as e:
return dict(ok=False, msg=e.message)
@bottle.post('/delete_user')
def delete_user():
try:
aaa.delete_user(post_get('username'))
return dict(ok=True, msg='')
except Exception as e:
print(repr(e))
return dict(ok=False, msg=e.message)
@bottle.post('/create_role')
def create_role():
try:
aaa.create_role(post_get('role'), post_get('level'))
return dict(ok=True, msg='')
except Exception as e:
return dict(ok=False, msg=e.message)
@bottle.post('/delete_role')
def delete_role():
try:
aaa.delete_role(post_get('role'))
return dict(ok=True, msg='')
except Exception as e:
return dict(ok=False, msg=e.message)
# Static pages
@bottle.route('/login')
def login_form():
"""Serve login form"""
return skeleton(bottle.template('login_form'))
@bottle.route('/sorry_page')
def sorry_page():
"""Serve sorry page"""
return '<p>Sorry, you are not authorized to perform this action</p>'
| cmry/ebacs | corks.py | Python | bsd-3-clause | 2,583 | 0 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import torch.nn as nn
class SyncBatchNorm:
"""
This mixin converts the BatchNorm modules to SyncBatchNorm modules when utilizing
distributed training on GPUs.
Example config:
config=dict(
use_sync_batchnorm=True
)
"""
def create_model(self, config, device):
model = super().create_model(config, device)
use_sync_batchnorm = config.get("use_sync_batchnorm", True)
distributed = config.get("distributed", False)
if use_sync_batchnorm and distributed and next(model.parameters()).is_cuda:
# Convert batch norm to sync batch norms
model = nn.modules.SyncBatchNorm.convert_sync_batchnorm(module=model)
return model
@classmethod
def get_execution_order(cls):
eo = super().get_execution_order()
eo["setup_experiment"].insert(0, "Sync Batchnorm begin")
eo["setup_experiment"].append("Sync Batchnorm end")
return eo
| mrcslws/nupic.research | src/nupic/research/frameworks/vernon/mixins/sync_batchnorm.py | Python | agpl-3.0 | 1,943 | 0.001544 |
#!/usr/bin/env python
import os
import sys
import glob
import argparse
import logging
import rpy2.robjects as robjects
import utils
rscript = ''
R = robjects.r
def run_rscript(command=None):
"""Run R command, log it, append to rscript"""
global rscript
if not command:
return
logging.debug(command)
rscript += '{}\n'.format(command)
msg = R(command)
def plot_transcript(rdata_load='Metagene.rda', transcript_name='',
transcript_length='27', transcript_cap='',
html_file='Plot-ribosome-profile.html',
output_path=os.getcwd()):
"""Plot ribosome profile for a given transcript. """
options = {}
for key, value, rtype, rmode in (
('transcript_name', transcript_name, 'str', None),
('transcript_length', transcript_length, 'int', 'charvector'),
('transcript_cap', transcript_cap, 'int', None)):
options[key] = utils.process_args(value, ret_type=rtype, ret_mode=rmode)
run_rscript('suppressMessages(library(riboSeqR))')
run_rscript('load("{}")'.format(rdata_load))
html = """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2//EN">
<html>
<head>
<title>Ribosome Profile Plot - Report</title>
</head>
<body>
"""
html += '<h2>Plot ribosome profile - results</h2>\n<hr>\n'
if len(transcript_name):
cmd_args = (
'"{transcript_name}", main="{transcript_name}",'
'coordinates=ffCs@CDS, riboData=riboDat,'
'length={transcript_length}'.format(**options))
if transcript_cap:
cmd_args += ', cap={transcript_cap}'.format(**options)
plot_file = os.path.join(output_path, 'Ribosome-profile-plot')
for fmat in ('pdf', 'png'):
if fmat == 'png':
cmd = 'png(file="{}_%1d.png", type="cairo")'.format(plot_file)
else:
cmd = 'pdf(file="{}.pdf")'.format(plot_file)
run_rscript(cmd)
cmd = 'plotTranscript({})'.format(cmd_args)
run_rscript(cmd)
run_rscript('dev.off()')
html += ('<p>Selected ribosome footprint length: '
'<strong>{0}</strong>\n'.format(transcript_length))
for image in sorted(glob.glob('{}_*.png'.format(plot_file))):
html += '<p><img border="1" src="{0}" alt="{0}"></p>\n'.format(
os.path.basename(image))
html += '<p><a href="Ribosome-profile-plot.pdf">PDF version</a></p>\n'
else:
msg = 'No transcript name was provided. Did not generate plot.'
html += '<p>{}</p>'.format(msg)
logging.debug(msg)
logging.debug('\n{:#^80}\n{}\n{:#^80}\n'.format(
' R script for this session ', rscript, ' End R script '))
with open(os.path.join(output_path, 'ribosome-profile.R'), 'w') as r:
r.write(rscript)
html += ('<h4>R script for this session</h4>\n'
'<p><a href="ribosome-profile.R">ribosome-profile.R</a></p>\n'
'</body>\n</html>\n')
with open(html_file, 'w') as f:
f.write(html)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot Ribosome profile')
# required arguments
flags = parser.add_argument_group('required arguments')
flags.add_argument('--rdata_load', required=True,
help='Saved riboSeqR data from Step 2')
flags.add_argument('--transcript_name', required=True,
help='Name of the transcript to be plotted')
flags.add_argument(
'--transcript_length', required=True,
help='Size class of ribosome footprint data to be plotted',
default='27')
flags.add_argument(
'--transcript_cap', required=True,
help=('Cap on the largest value that will be plotted as an abundance '
'of the ribosome footprint data'))
parser.add_argument('--html_file', help='HTML file with reports')
parser.add_argument('--output_path', help='Directory to save output files')
parser.add_argument('--debug', help='Produce debug output',
action='store_true')
args = parser.parse_args()
if args.debug:
logging.basicConfig(format='%(levelname)s - %(message)s',
level=logging.DEBUG, stream=sys.stdout)
logging.debug('Supplied Arguments\n{}\n'.format(vars(args)))
if not os.path.exists(args.output_path):
os.mkdir(args.output_path)
plot_transcript(rdata_load=args.rdata_load,
transcript_name=args.transcript_name,
transcript_length=args.transcript_length,
transcript_cap=args.transcript_cap,
html_file=args.html_file, output_path=args.output_path)
logging.debug('Done!')
| vimalkvn/riboseqr_wrapper | riboseqr/ribosome_profile.py | Python | gpl-2.0 | 4,816 | 0.000208 |
from __future__ import absolute_import
from .nihts_xcam import XenicsCamera
| henryroe/xenics_pluto | nihts_xcam/__init__.py | Python | mit | 77 | 0 |
from mock import patch, Mock
from nose.tools import istest
from unittest import TestCase
from structominer import Document, Field
class DocumentTests(TestCase):
@istest
def creating_document_object_with_string_should_automatically_parse(self):
html = '<html></html>'
with patch('structominer.Document.parse') as mocked_parse:
doc = Document(html)
mocked_parse.assert_called_with(html)
@istest
def document_should_store_fields_in_order(self):
class Doc(Document):
three = Mock(Field, _field_counter=3)
two = Mock(Field, _field_counter=2)
one = Mock(Field, _field_counter=1)
doc = Doc()
self.assertEquals([field._field_counter for field in doc._fields.values()], [1, 2, 3])
@istest
def document_should_only_parse_fields_with_auto_parse_attributes(self):
html = '<html></html>'
class Doc(Document):
one = Mock(Field, _field_counter=1, auto_parse=True)
two = Mock(Field, _field_counter=2, auto_parse=False)
doc = Doc(html)
self.assertTrue(doc.one.parse.called)
self.assertFalse(doc.two.parse.called)
| aGHz/structominer | tests/test_document.py | Python | mit | 1,199 | 0.003336 |
#===- disassembler.py - Python LLVM Bindings -----------------*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
from ctypes import CFUNCTYPE
from ctypes import POINTER
from ctypes import addressof
from ctypes import c_byte
from ctypes import c_char_p
from ctypes import c_int
from ctypes import c_size_t
from ctypes import c_ubyte
from ctypes import c_uint64
from ctypes import c_void_p
from ctypes import cast
from .common import LLVMObject
from .common import c_object_p
from .common import get_library
__all__ = [
'Disassembler',
]
lib = get_library()
callbacks = {}
# Constants for set_options
Option_UseMarkup = 1
_initialized = False
_targets = ['AArch64', 'ARM', 'Hexagon', 'MSP430', 'Mips', 'NVPTX', 'PowerPC', 'R600', 'Sparc', 'SystemZ', 'X86', 'XCore']
def _ensure_initialized():
global _initialized
if not _initialized:
# Here one would want to call the functions
# LLVMInitializeAll{TargetInfo,TargetMC,Disassembler}s, but
# unfortunately they are only defined as static inline
# functions in the header files of llvm-c, so they don't exist
# as symbols in the shared library.
# So until that is fixed use this hack to initialize them all
for tgt in _targets:
for initializer in ("TargetInfo", "TargetMC", "Disassembler"):
try:
f = getattr(lib, "LLVMInitialize" + tgt + initializer)
except AttributeError:
continue
f()
_initialized = True
class Disassembler(LLVMObject):
"""Represents a disassembler instance.
Disassembler instances are tied to specific "triple," which must be defined
at creation time.
Disassembler instances can disassemble instructions from multiple sources.
"""
def __init__(self, triple):
"""Create a new disassembler instance.
The triple argument is the triple to create the disassembler for. This
is something like 'i386-apple-darwin9'.
"""
_ensure_initialized()
ptr = lib.LLVMCreateDisasm(c_char_p(triple), c_void_p(None), c_int(0),
callbacks['op_info'](0), callbacks['symbol_lookup'](0))
if not ptr:
raise Exception('Could not obtain disassembler for triple: %s' %
triple)
LLVMObject.__init__(self, ptr, disposer=lib.LLVMDisasmDispose)
def get_instruction(self, source, pc=0):
"""Obtain the next instruction from an input source.
The input source should be a str or bytearray or something that
represents a sequence of bytes.
This function will start reading bytes from the beginning of the
source.
The pc argument specifies the address that the first byte is at.
This returns a 2-tuple of:
long number of bytes read. 0 if no instruction was read.
str representation of instruction. This will be the assembly that
represents the instruction.
"""
buf = cast(c_char_p(source), POINTER(c_ubyte))
out_str = cast((c_byte * 255)(), c_char_p)
result = lib.LLVMDisasmInstruction(self, buf, c_uint64(len(source)),
c_uint64(pc), out_str, 255)
return (result, out_str.value)
def get_instructions(self, source, pc=0):
"""Obtain multiple instructions from an input source.
This is like get_instruction() except it is a generator for all
instructions within the source. It starts at the beginning of the
source and reads instructions until no more can be read.
This generator returns 3-tuple of:
long address of instruction.
long size of instruction, in bytes.
str representation of instruction.
"""
source_bytes = c_char_p(source)
out_str = cast((c_byte * 255)(), c_char_p)
# This could probably be written cleaner. But, it does work.
buf = cast(source_bytes, POINTER(c_ubyte * len(source))).contents
offset = 0
address = pc
end_address = pc + len(source)
while address < end_address:
b = cast(addressof(buf) + offset, POINTER(c_ubyte))
result = lib.LLVMDisasmInstruction(self, b,
c_uint64(len(source) - offset), c_uint64(address),
out_str, 255)
if result == 0:
break
yield (address, result, out_str.value)
address += result
offset += result
def set_options(self, options):
if not lib.LLVMSetDisasmOptions(self, options):
raise Exception('Unable to set all disassembler options in %i' % options)
def register_library(library):
library.LLVMCreateDisasm.argtypes = [c_char_p, c_void_p, c_int,
callbacks['op_info'], callbacks['symbol_lookup']]
library.LLVMCreateDisasm.restype = c_object_p
library.LLVMDisasmDispose.argtypes = [Disassembler]
library.LLVMDisasmInstruction.argtypes = [Disassembler, POINTER(c_ubyte),
c_uint64, c_uint64, c_char_p, c_size_t]
library.LLVMDisasmInstruction.restype = c_size_t
library.LLVMSetDisasmOptions.argtypes = [Disassembler, c_uint64]
library.LLVMSetDisasmOptions.restype = c_int
callbacks['op_info'] = CFUNCTYPE(c_int, c_void_p, c_uint64, c_uint64, c_uint64,
c_int, c_void_p)
callbacks['symbol_lookup'] = CFUNCTYPE(c_char_p, c_void_p, c_uint64,
POINTER(c_uint64), c_uint64,
POINTER(c_char_p))
register_library(lib)
| endlessm/chromium-browser | third_party/llvm/llvm/bindings/python/llvm/disassembler.py | Python | bsd-3-clause | 5,918 | 0.002028 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from isis_powder.routines import common, yaml_parser
import os
def create_run_details_object(run_number_string, inst_settings, is_vanadium_run, empty_run_number,
grouping_file_name, vanadium_string, splined_name_list=None, van_abs_file_name=None):
"""
Creates and returns a run details object which holds various
properties about the current run.
:param run_number_string: The user string for the current run
:param inst_settings: The current instrument object
:param is_vanadium_run: Boolean of if the current run is a vanadium run
:param empty_run_number: Empty run number(s) from mapping file
:param grouping_file_name: Filename of the grouping file found in the calibration folder
:param vanadium_string: Vanadium run number(s) from mapping file
:param splined_name_list: (Optional) List of unique properties to generate a splined vanadium name from
:param van_abs_file_name: (Optional) The name of the vanadium absorption file
:return: RunDetails object with attributes set to applicable values
"""
cal_map_dict = get_cal_mapping_dict(run_number_string=run_number_string,
cal_mapping_path=inst_settings.cal_mapping_path)
run_number = common.get_first_run_number(run_number_string=run_number_string)
# Get names of files we will be using
calibration_dir = os.path.normpath(os.path.expanduser(inst_settings.calibration_dir))
label = common.cal_map_dictionary_key_helper(dictionary=cal_map_dict, key="label")
offset_file_name = common.cal_map_dictionary_key_helper(dictionary=cal_map_dict, key="offset_file_name")
# Prepend the properties used for creating a van spline so we can fingerprint the file
new_splined_list = splined_name_list if splined_name_list else []
new_splined_list.append(os.path.basename(offset_file_name))
splined_van_name = common.generate_splined_name(vanadium_string, new_splined_list)
unsplined_van_name = common.generate_unsplined_name(vanadium_string, new_splined_list)
if is_vanadium_run:
# The run number should be the vanadium number in this case
run_number = vanadium_string
output_run_string = vanadium_string if is_vanadium_run else run_number_string
# Get the file extension if set
file_extension = getattr(inst_settings, "file_extension")
if file_extension:
# Prefix dot if user has forgotten to
file_extension = file_extension if file_extension.startswith('.') else '.' + file_extension
# Get the output name suffix if set
suffix = getattr(inst_settings, "suffix", None)
# Sample empty if there is one as this is instrument specific
sample_empty = getattr(inst_settings, "sample_empty", None)
# By default, offset file sits in the calibration folder, but it can also be given as an absolute path
if os.path.exists(offset_file_name):
offset_file_path = offset_file_name
else:
offset_file_path = os.path.join(calibration_dir, label, offset_file_name)
# Generate the paths
grouping_file_path = os.path.join(calibration_dir, grouping_file_name)
splined_van_path = os.path.join(calibration_dir, label, splined_van_name)
unsplined_van_path = os.path.join(calibration_dir, label, unsplined_van_name)
van_absorb_path = os.path.join(calibration_dir, van_abs_file_name) if van_abs_file_name else None
return _RunDetails(empty_run_number=empty_run_number, file_extension=file_extension,
run_number=run_number, output_run_string=output_run_string, label=label,
offset_file_path=offset_file_path, grouping_file_path=grouping_file_path,
splined_vanadium_path=splined_van_path, vanadium_run_number=vanadium_string,
sample_empty=sample_empty, vanadium_abs_path=van_absorb_path,
unsplined_vanadium_path=unsplined_van_path, output_suffix=suffix)
def get_cal_mapping_dict(run_number_string, cal_mapping_path):
# Get the python dictionary from the YAML mapping
run_number = common.get_first_run_number(run_number_string=run_number_string)
cal_mapping_dict = yaml_parser.get_run_dictionary(run_number_string=run_number,
file_path=cal_mapping_path)
return cal_mapping_dict
class _RunDetails(object):
"""
This class holds the full file paths associated with each run and various other useful attributes
"""
def __init__(self, empty_run_number, file_extension, run_number, output_run_string, label,
offset_file_path, grouping_file_path, splined_vanadium_path, vanadium_run_number,
sample_empty, vanadium_abs_path, unsplined_vanadium_path, output_suffix):
# Essential attribute
self.empty_runs = empty_run_number
self.run_number = run_number
self.output_run_string = output_run_string
self.label = label
self.offset_file_path = offset_file_path
self.grouping_file_path = grouping_file_path
self.splined_vanadium_file_path = splined_vanadium_path
self.unsplined_vanadium_file_path = unsplined_vanadium_path
self.vanadium_run_numbers = vanadium_run_number
# Optional
self.file_extension = str(file_extension) if file_extension else None
self.sample_empty = sample_empty
self.vanadium_absorption_path = vanadium_abs_path
self.output_suffix = output_suffix
| mganeva/mantid | scripts/Diffraction/isis_powder/routines/run_details.py | Python | gpl-3.0 | 5,875 | 0.005447 |
import logging
import vcf
from typing import List, Tuple
_logger = logging.getLogger(__name__)
# TODO: for now I'm going to do the lazy thing and just traverse the VCF each time for each sample
def read_sample_segments_and_calls(intervals_vcf: str,
clustered_vcf: str,
sample_name: str,
contig: str) -> List[Tuple[int, int, int]]:
"""
Get the segmentation "path" to use for calculating qualities based on the VCF with clustered breakpoints
:param intervals_vcf:
:param clustered_vcf:
:param sample_name:
:param contig:
:return: {copy number, start index, stop index (inclusive)}
"""
intervals = vcf.Reader(filename=intervals_vcf)
intervals2 = vcf.Reader(filename=intervals_vcf)
segments = vcf.Reader(filename=clustered_vcf)
path: List[Tuple[int, int, int]] = []
segment_start_index = 0
segment_end_index = 0
# A record corresponds to [CHROM,POS,REF,ALT]
try:
interval_start_iter = iter(intervals.fetch(contig))
interval_end_iter = iter(intervals2.fetch(contig))
except ValueError:
print('ERROR: could not fetch intervals')
raise
else:
start_interval = next(interval_start_iter)
end_interval = next(interval_end_iter)
intervals_copy_number = try_getting_format_attribute(end_interval, sample_name, 'CN')
try:
segments_iter = iter(segments.fetch(contig))
except ValueError:
return path
else:
segments_rec = next(segments_iter)
segment_copy_number = try_getting_format_attribute(segments_rec, sample_name, 'CN')
# we assume segments are sorted by start, but may be overlapping
while segments_rec is not None and start_interval is not None:
# make sure interval start matches
while start_interval is not None and start_interval.POS < segments_rec.POS:
try:
start_interval = next(interval_start_iter)
segment_start_index += 1
end_interval = next(interval_end_iter)
segment_end_index += 1
except StopIteration:
print('ERROR: ran out of intervals with unmatched segments remaining')
raise
# once start matches, move the interval end
while end_interval is not None and try_getting_info_attribute(segments_rec, 'END') > \
try_getting_info_attribute(end_interval, 'END'):
try:
end_interval = next(interval_end_iter)
segment_end_index += 1
intervals_copy_number = try_getting_format_attribute(end_interval, sample_name, 'CN')
except StopIteration:
print('WARN: ran out of intervals with segment end unmatched')
end_interval = None
# add the segment
if segment_end_index < segment_start_index:
print('Sample {0} contains segment at {1}:{2} with end index greater than start index'.format(sample_name, contig, segments_rec.POS))
path.append((segment_copy_number, segment_start_index, segment_end_index))
# do this the dumb way because each reader gets the same iterator
segment_end_index = 0
interval_end_iter = iter(intervals2.fetch(contig))
end_interval = next(interval_end_iter)
# get the next segment
try:
segments_rec = next(segments_iter)
segment_copy_number = try_getting_format_attribute(segments_rec, sample_name, 'CN')
except StopIteration:
segments_rec = None
segments_iter = None
return path
def try_getting_info_attribute(record,
attribute: str) -> int:
try:
value = record.INFO[attribute]
except AttributeError:
print('No {} field for record at position:{}'.format(attribute, record.POS))
else:
return value
def try_getting_format_attribute(record,
sample_name: str,
attribute: str) -> int:
try:
value = record.genotype(sample_name)[attribute]
except AttributeError:
print('No {} field for {} intervals at position:{}'.format(attribute, sample_name, record.POS))
else:
return value
| broadinstitute/hellbender | src/main/python/org/broadinstitute/hellbender/gcnvkernel/io/io_vcf_parsing.py | Python | bsd-3-clause | 4,367 | 0.002977 |
# ARP Suite - Run ARP Commands From Command Line
import sys
import arp_mitm as mitm
import arp_sslstrip as sslstrip
import arp_listen as listen
import arp_request as request
import arp_cache as cache
import arp_reconnaissance as recon
import arp_interactive as interactive
if __name__ == "__main__":
arguments = sys.argv[1:]
if '-h' in arguments or '--help' in arguments:
print '[INFO]\tARP Suite\n'
print '[USAGE] arp.py -c/i/L/r\n'
print '[FUNCTIONS]'
print ' -c --cache = Work with ARP Cache.'
print ' -i --interactive = Runs Interactive ARP Suite.'
print ' -L --listen = Runs an arpclient in listen Mode.'
print ' -r --request = Generate an ARP Request Message.'
print '\n\t* Use --h with any of these functions to learn more about them.'
print '\t\tex. arp.py -c --h'
print ''
sys.exit(1)
if '-i' in arguments or '--interactive' in arguments:
interactive.run()
sys.exit(1)
if '-L' in arguments or'--listen' in arguments:
if '--h' in arguments:
print '[INFO]\tCreates an instance of arpclient in listen mode.'
print '\tHandles ARP Messages and ARP Table.'
print ''
print '[USAGE] arp.py -l\n'
print '[ARGUMENTS]'
print '\tNONE'
sys.exit(1)
listen.listen()
sys.exit(1)
if '-r' in arguments or '--request' in arguments:
if '--h' in arguments:
print '[INFO]\tCreate an ARP Request message to given IP Address.'
print '\tMake sure there is an instance of arpclient in listen mode'
print '\tto handle ARP messages and manipulate ARP table ("arp.py -l").'
print ''
print '[USAGE] arp.py -r --ip [ip]\n'
print '[ARGUMENTS]'
print '\t"--ip" = IP Address You Wish To Resolve'
print ''
sys.exit(1)
if '--ip' in arguments:
option_index = arguments.index('--ip')
ip = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -r --h"'
sys.exit(0)
request.send(ip)
sys.exit(1)
if '-c' in arguments or '--cache' in arguments:
if '--h' in arguments:
print '[INFO]\tWork with the ARP Cache\n'
print '[USAGE] arp.py -c --d/l/a/r --i [ip] --m [mac]\n'
print '[ARGUMENTS]'
print '"--d" = Display ARP Cache.'
print '"--l" = Look Up ARP Cache. Must Specify Either Address'
print '"--a" = Add ARP Cache Entry. Must Specify Both Addresses'
print '"--r" = Remove ARP Cache Entry. Must Specify Both Addresses'
print '"--i" = An IP Address'
print '"--m" = A MAC Address'
print ''
# Display
if '--d' in arguments:
cache.cache(1)
# Look Up
if '--l' in arguments:
if '--i' in arguments:
option_index = arguments.index('--i')
ipoption = arguments[option_index+1]
cache.cache(2,ip=ipoption)
sys.exit(1)
elif '--m' in arguments:
option_index = arguments.index('--m')
macoption = arguments[option_index+1]
cache.cache(2,mac=macoption)
sys.exit(1)
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
# ADD an Entry
if '--a' in arguments:
if '--i' in arguments: # use --i to indicate you are giving an ip address
option_index = arguments.index('--i')
ipoption = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
if '--m' in arguments: # use --m to indicate you are giving a mac address
option_index = arguments.index('--m')
macoption = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
cache.cache(3,ip=ipoption,mac=macoption)
sys.exit(1)
# REMOVE an Entry
if '--r' in arguments:
if '--i' in arguments: # use --i to indicate you are giving an ip address
option_index = arguments.index('--i')
ipoption = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
if '--m' in arguments: # use --m to indicate you are giving a mac address
option_index = arguments.index('--m')
macoption = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for cache by typing "python arp.py -c --h"'
sys.exit(0)
cache.cache(4,ip=ipoption,mac=macoption)
sys.exit(1)
if '-m' in arguments or '--mitm' in arguments:
if '--h' in arguments:
print '[Info]\tLaunch an ARP Poisoning Man in the Middle Attack.\n'
print '[Usage] arp.py -m --aI [ip] --aM [mac] --bI [ip] --bM [mac]\n'
print '[Arguments]'
print '\t"--aI" = target A\'s IP Address'
print '\t"--aM" = target A\'s MAC Address'
print '\t"--bI" = target B\'s IP Address'
print '\t"--bM" = target B\'s MAC Address'
print ''
sys.exit(1)
if '--aI' in arguments:
option_index = arguments.index('--aI')
aIP = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--aM' in arguments:
option_index = arguments.index('--aM')
aMAC = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--bI' in arguments:
option_index = arguments.index('--bI')
bIP = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--bM' in arguments:
option_index = arguments.index('--bM')
bMAC = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
mitm.mitm(aIP,aMAC,bIP,bMAC)
sys.exit(1)
if '--sslstrip' in arguments:
if '--h' in arguments:
print '[Info]\tLaunch a SSL Strip Attack.\n'
print '[Usage] arp.py --sslstrip --gI [ip] --gM [mac] --tI [ip] --tM [mac]\n'
print '[Arguments]'
print '\t"--gI" = gateway\'s IP Address'
print '\t"--gM" = gateway\'s MAC Address'
print '\t"--tI" = target\'s IP Address'
print '\t"--tM" = target\'s MAC Address'
print ''
sys.exit(1)
if '--gI' in arguments:
option_index = arguments.index('--gI')
gIP = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--gM' in arguments:
option_index = arguments.index('--gM')
gMAC = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--tI' in arguments:
option_index = arguments.index('--tI')
tIP = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
if '--tM' in arguments:
option_index = arguments.index('--tM')
tMAC = arguments[option_index+1]
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py -m --h"'
sys.exit(0)
sslstrip.sslstrip(gIP,gMAC,tIP,tMAC)
sys.exit(1)
if '--recon' in arguments:
if '--h' in arguments:
print '[Info]\tLearn Address of Those on Network.\n'
print '[Usage] arp.py --recon --ip [iprange], wildcards * allowed\n'
print '[Arguments]'
print '\t"--ip" = A Range of IP Adresses to Scan'
if '--ip' in arguments:
option_index = arguments.index('--ip')
iprange = arguments[option_index+1]
recon.run(str(iprange))
sys.exit(1)
else:
print 'Missing Argument!'
print 'See help for mitm by typing "python arp.py --recon --h"'
sys.exit(0)
| monkeesuit/school | Network Security/ARP/arp suite/py/arp.py | Python | mit | 7,562 | 0.031605 |
import sys
reload(sys)
sys.setdefaultencoding("utf8")
from urllib import urlencode
from flask import jsonify, redirect, url_for, abort, request, render_template
from syzoj import oj, controller
from syzoj.models import User, Problem, File, FileParser
from syzoj.controller import Paginate, Tools
from .common import need_login, not_have_permission, show_error
@oj.route("/problem")
def problem_set():
query = Problem.query
problem_title = request.args.get("problem_title")
if request.args.get("problem_title"):
query = query.filter(Problem.title.like((u"%" + problem_title + u"%")))
else:
problem_title = ''
def make_url(page, other):
other["page"] = page
return url_for("problem_set") + "?" + urlencode(other)
sorter = Paginate(query, make_url=make_url, other={"problem_title": problem_title},
cur_page=request.args.get("page"), edge_display_num=50, per_page=50)
return render_template("problem_set.html", tool=Tools, tab="problem_set", sorter=sorter, problems=sorter.get())
@oj.route("/problem/<int:problem_id>")
def problem(problem_id):
user = User.get_cur_user()
problem = Problem.query.filter_by(id=problem_id).first()
if not problem:
abort(404)
if problem.is_allowed_use(user) == False:
return not_have_permission()
return render_template("problem.html", tool=Tools, tab="problem_set", problem=problem)
@oj.route("/problem/<int:problem_id>/edit", methods=["GET", "POST"])
def edit_problem(problem_id):
user = User.get_cur_user()
if not user:
return need_login()
problem = Problem.query.filter_by(id=problem_id).first()
if problem and problem.is_allowed_edit(user) == False:
return not_have_permission()
if request.method == "POST":
if not problem:
problem_id = controller.create_problem(user=user, title=request.form.get("title"))
problem = Problem.query.filter_by(id=problem_id).first()
problem.update(title=request.form.get("title"),
description=request.form.get("description"),
input_format=request.form.get("input_format"),
output_format=request.form.get("output_format"),
example=request.form.get("example"),
limit_and_hint=request.form.get("limit_and_hint"))
problem.save()
return redirect(url_for("problem", problem_id=problem.id))
else:
return render_template("edit_problem.html", tool=Tools, problem=problem)
@oj.route("/problem/<int:problem_id>/upload", methods=["GET", "POST"])
def upload_testdata(problem_id):
user = User.get_cur_user()
if not user:
return need_login()
problem = Problem.query.filter_by(id=problem_id).first()
if not problem:
abort(404)
if problem.is_allowed_edit(user) == False:
return not_have_permission()
if request.method == "POST":
file = request.files.get("testdata")
if file:
problem.update_testdata(file)
if request.form.get("time_limit"):
problem.time_limit = int(request.form.get("time_limit"))
if request.form.get("memory_limit"):
problem.memory_limit = int(request.form.get("memory_limit"))
problem.save()
return redirect(url_for("upload_testdata", problem_id=problem_id))
else:
return render_template("upload_testdata.html", tool=Tools, problem=problem, parse=FileParser.parse_as_testdata)
# TODO:Maybe need add the metho of toggle is_public attr to Problem
@oj.route("/api/problem/<int:problem_id>/public", methods=["POST", "DELETE"])
def change_public_attr(problem_id):
session_id = request.args.get('session_id')
user = User.get_cur_user(session_id=session_id)
problem = Problem.query.filter_by(id=problem_id).first()
if problem and user and user.have_privilege(2):
if request.method == "POST":
problem.is_public = True
elif request.method == "DELETE":
problem.is_public = False
problem.save()
else:
abort(404)
return jsonify({"status": 0})
| cdcq/jzyzj | syzoj/views/problem.py | Python | mit | 4,193 | 0.00477 |
# -*- coding: utf-8 -*-
from text.classifiers import NaiveBayesClassifier
from textblob import TextBlob
import feedparser
import time
import redis
import hashlib
import json
TIMEOUT = 60*60
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
def feature_extractor(text):
if not isinstance(text, TextBlob):
text = TextBlob(text.lower())
return {
'has_rumor': 'rumor' in text.words,
'has_gosip': 'gosip' in text.words,
'has_urbanesia': 'urbanesia' in text.words,
'has_batista': 'batista' in text.words,
'has_harahap': 'harahap' in text.words,
'has_pemasaran': 'pemasaran' in text.words,
'has_saham': 'saham' in text.words,
'has_hackathon': 'hackathon' in text.words,
'has_ipo': 'ipo' in text.words,
'has_akuisisi': 'akuisisi' in text.words,
'has_startup': 'startup' in text.words,
'has_android': 'android' in text.words,
'has_aplikasi': 'aplikasi' in text.words,
'has_payment': 'payment' in text.words,
'has_pembayaran': 'pembayaran' in text.words,
'has_api': 'api' in text.words,
'has_kompetisi': 'kompetisi' in text.words,
'has_ide': 'ide' in text.words,
'has_permainan': 'permainan' in text.words,
'has_game': 'game' in text.words,
'has_fundraising': 'fundraising' in text.words,
'has_askds': '[Ask@DailySocial]' in text.words,
'has_investasi': 'investasi' in text.words,
'has_musik': 'musik' in text.words,
'has_lagu': 'lagu' in text.words,
'has_bhinneka': 'bhinneka' in text.words,
'has_marketplace': 'marketplace' in text.words,
'has_mobile': 'mobile' in text.words,
'has_cto': 'cto' in text.words,
'has_traffic': 'traffic' in text.words,
'starts_with_[': text[0] == '['
}
train_set = [
('Berbarengan dengan Launch Festival, Ice House Buka Kompetisi Wujudkan Ide-Ide Aplikasi Mobile.', 'ok'),
('Ulang Tahun Ke-21, Layanan E-Commerce Bhinneka Segera Perbarui Platform E-Commerce dan Luncurkan Marketplace Terkurasi.', 'ko'),
('Aplikasi Pencatat Blastnote Hadir di Android.', 'ok'),
('Portal Hiburan Digital UZone Kini Hadir Dalam Versi Aplikasi Mobile.', 'ok'),
('CTI IT Infrastructure Summit 2014 Bahas Big Data Sebagai Tren Teknologi', 'ko'),
('Dua Berita Buruk Besar Bagi Blackberry', 'ok'),
('Tanggapan Pelaku Industri Digital di Indonesia tentang Fenomena Permainan Mobile Flappy Bird', 'ok'),
('[Ask@DailySocial] Proses Fundraising Untuk Startup', 'ok'),
('Investasi $1 Miliar, Foxconn Pastikan Bangun Pabriknya di DKI Jakarta', 'ok'),
('Raksasa Digital Cina Tencent Dikabarkan Akuisisi Portal Berita Okezone', 'ko'),
('Wego Tawarkan Akses Reservasi Tiket dan Hotel Lebih Mudah Melalui Aplikasi Mobile', 'ok'),
('Telkom Hadirkan Agen Wisata Online Hi Indonesia', 'ko'),
('Meski Didera Isu Fake Likes, Facebook Tetap Jadi Pilihan Utama Untuk Pemasaran Digital', 'ok'),
('Dave Morin Pastikan Saham Bakrie Global Group di Path Kurang dari 1%', 'ok'),
('Kecil Kemungkinan Pemerintah Tutup Telkomsel dan Indosat Terkait Dugaan Penyadapan oleh Australia', 'ok'),
('Kakao Dikabarkan Gelar Penawaran Saham Perdana Tahun Depan', 'ok'),
('Ericsson Akan Hadirkan Layanan Streaming TV', 'ok'),
('Ryu Kawano: Ingin Startup Anda Go Global? Tunggu Dulu!', 'ok'),
('Kerja Sama dengan GHL Systems Malaysia, Peruri Digital Security Kembangkan Sistem Pembayaran Online', 'ok'),
('Aplikasi Logbook Travel Kini Telah Hadir di Android', 'ok'),
('Musikator Hadirkan Layanan Agregator Lagu Untuk Distribusi Digital', 'ok'),
('[Manic Monday] Strategi Produksi Konten Di Era Multilayar', 'ok'),
('Bakrie Telecom Jajaki Kemungkinan Carrier Billing untuk Path', 'ok'),
('Viber Secara Resmi Telah Diakuisisi Oleh Rakuten Sebesar US$ 900 Juta', 'ok'),
('Situs Panduan Angkutan Umum Kiri.travel Buka API, Tantang Pengembang Buat Aplikasi Windows Phone', 'ok'),
('Wego Luncurkan Jaringan Afiliasi WAN.Travel', 'ko'),
('Business Insider Masuki Pasar Indonesia Bekerja Sama dengan REV Asia', 'ko'),
('Waze Memiliki 750.000 Pengguna di Indonesia', 'ok'),
('Survei Nielsen: Masyarakat Asia Tenggara Lebih Suka Gunakan Uang Tunai untuk Belanja Online', 'ok'),
('CTI IT Infrastructure Summit 2014 Bahas Big Data Sebagai Tren Teknologi', 'ko'),
('Pacu Bisnis di Asia Tenggara, Game Online Asing Kini Lebih Lokal', 'ko'),
('Enam Pilihan Layanan Streaming Musik Yang Dapat Dinikmati di Indonesia', 'ok'),
('Country Manager Yahoo Indonesia Roy Simangunsong Mengundurkan Diri', 'ko'),
('Investasi $1 Miliar, Foxconn Pastikan Bangun Pabriknya di DKI Jakarta', 'ok'),
('Jomblo.com Tawarkan Media Sosial Untuk Mencari Jodoh', 'ko'),
('Mitra Adiperkasa dan Groupon Pilih aCommerce Indonesia untuk Pusat Logistik dan Pengiriman Layanan E-Commerce', 'ko'),
('Transformasi Portal Informasi Kecantikan Female Daily Disambut Positif, Beberkan Rencana-Rencana 2014', 'ko'),
('Visa Gelar Promosi Diskon Setiap Jumat Bekerja Sama dengan Enam Layanan E-Commerce Lokal', 'ko'),
('Kerjasama Strategis, Blue Bird Group Benamkan Teknologi Interkoneksi Microsoft Ke Armada Premium Big Bird', 'ko'),
('Ramaikan Industri Fashion E-Commerce Indonesia, VIP Plaza Hadir Tawarkan Promo Flash Sale', 'ko'),
('Bidik Citizen Journalism, Detik Hadirkan Media Warga PasangMata', 'ko'),
('Asia Pasifik Jadi Kawasan E-Commerce B2C Terbesar di Dunia Tahun 2014', 'ko'),
('CTO Urbanesia Batista Harahap Mengundurkan Diri', 'ok'),
('Tees Indonesia Alami Peningkatan Traffic Hingga 7x, Namun Tidak Seperti Yang Anda Kira', 'ok')
]
cl = NaiveBayesClassifier(train_set=train_set,
feature_extractor=feature_extractor)
redis_conn = redis.StrictRedis(host=REDIS_HOST,
port=REDIS_PORT)
def get_feed():
feed_url = 'http://feeds.feedburner.com/dsnet?format=xml'
feeds = feedparser.parse(feed_url).get('entries')
if feeds is None:
return
def process_entry(entry):
def process_tags(tags):
return [tag.get('term') for tag in tags]
cls = cl.classify(text=entry.get('title'))
data = {
'author': entry.get('author'),
'title': entry.get('title'),
'link': entry.get('link'),
'published': int(time.mktime(entry.get('published_parsed'))),
'summary': entry.get('summary'),
'tags': process_tags(entry.get('tags')),
'class': cls
}
return data if cls == 'ok' else None
feeds = [process_entry(entry) for entry in feeds]
return [entry for entry in feeds if entry is not None]
def md5(text):
m = hashlib.md5()
m.update(text.encode('utf-8'))
return m.hexdigest()
def cycle():
try:
posts = get_feed()
except KeyError:
print 'Unreadable RSS feed, bailing..'
return
if not posts:
print 'Got nothing, bailing..'
return
def redis_insert(post):
name = 'ds-articles-ok'
redis_conn.zadd(name, post.get('published'), json.dumps(post))
[redis_insert(post=post) for post in posts]
print 'Got %d posts this time.' % len(posts)
if __name__ == '__main__':
print 'Starting up..'
while True:
cycle()
print 'Sleeping for %s seconds.' % TIMEOUT
time.sleep(TIMEOUT) | tistaharahap/ds-for-me | extractor.py | Python | mit | 7,464 | 0.004555 |
from typing import Sequence
from numbers import Number
from tabulate import tabulate
class Matrix(Sequence):
def __init__(self, matrix: Sequence[Sequence[float]]):
assert (isinstance(matrix, Sequence) and
isinstance(matrix, Sequence)), "Wrong data"
self.__matrix = [[float(x) for x in row] for row in matrix]
@staticmethod
def one(rows: int, columns: int):
return [
[1 if i == j else 0 for j in range(columns)] for i in range(rows)
]
@staticmethod
def zero(rows: int, columns: int):
return [[0] * columns for _ in range(rows)]
def __repr__(self):
return 'Matrix({})'.format(self.__matrix)
def __str__(self):
return tabulate(self.__matrix)
def __len__(self):
return len(self.__matrix)
def __getitem__(self, item):
return self.__matrix.__getitem__(item)
def __iter__(self):
return iter(self.__matrix)
def __mul__(self, other):
assert isinstance(other, Sequence)
# Количество столбцов равно количеству строк / элементов
assert len(self.__matrix[0]) == len(other), "Wrong data"
if isinstance(other[0], Sequence):
return Matrix([
[
sum(self[i][k] * other[k][j] for k in range(len(other))) for j in range(len(other[0]))
] for i in range(len(self))
])
else:
return [
sum(x * y for x, y in zip(row, other)) for row in self
]
def __rmul__(self, other):
assert isinstance(other, Number)
return Matrix([
[other * x for x in row] for row in self.__matrix
])
def __add__(self, other):
# and all(len(other) == len(row) for row in other)), "Wrong data"
assert (isinstance(other, Sequence) and
isinstance(other[0], Sequence) and
len(self) == len(other) and
len(self[0]) == len(other[0])), "Wrong data"
return Matrix([
[x + y for x, y in zip(r1, r2)] for r1, r2 in zip(self.__matrix, other)
])
def __neg__(self):
return Matrix([
[-x for x in row] for row in self.__matrix
])
def __sub__(self, other):
assert (isinstance(other, Sequence) and
isinstance(other[0], Sequence) and
all(len(other) == len(row) for row in other)), "Wrong data"
return Matrix([
[x - y for x, y in zip(r1, r2)] for r1, r2 in zip(self, other)
])
@property
def shape(self):
return len(self.__matrix), len(self.__matrix[0])
if __name__ == '__main__':
m = Matrix([[1, 2, 1], [2, 3, 0]])
a = Matrix([[1, 0, 0], [2, 1, 0], [1, 1, 0]])
# print(m, m.shape)
# print(a, a.shape)
print(m * a)
| FeodorM/Computer-Graphics | util/matrix.py | Python | mit | 2,886 | 0.000704 |
"""Code for constructing CTMCs and computing transition probabilities
in them."""
from numpy import zeros
from scipy import matrix
from scipy.linalg import expm
class CTMC(object):
"""Class representing the CTMC for the back-in-time coalescent."""
def __init__(self, state_space, rates_table):
"""Create the CTMC based on a state space and a mapping
from transition labels to rates.
:param state_space: The state space the CTMC is over.
:type state_space: IMCoalHMM.CoalSystem
:param rates_table: A table where transition rates can
be looked up.
:type rates_table: dict
"""
# Remember this, just to decouple state space from CTMC
# in other parts of the code...
self.state_space = state_space
# noinspection PyCallingNonCallable
self.rate_matrix = matrix(zeros((len(state_space.states),
len(state_space.states))))
for src, trans, dst in state_space.transitions:
self.rate_matrix[src, dst] = rates_table[trans]
for i in xrange(len(state_space.states)):
self.rate_matrix[i, i] = - self.rate_matrix[i, :].sum()
self.prob_matrix_cache = dict()
def probability_matrix(self, delta_t):
"""Computes the transition probability matrix for a
time period of delta_t.
:param delta_t: The time period the CTMC should run for.
:type delta_t: float
:returns: The probability transition matrix
:rtype: matrix
"""
if not delta_t in self.prob_matrix_cache:
self.prob_matrix_cache[delta_t] = expm(self.rate_matrix * delta_t)
return self.prob_matrix_cache[delta_t]
# We cache the CTMCs because in the optimisations, especially the models with a large number
# of parameters, we are creating the same CTMCs again and again and computing the probability
# transition matrices is where we spend most of the time.
from cache import Cache
CTMC_CACHE = Cache()
def make_ctmc(state_space, rates_table):
"""Create the CTMC based on a state space and a mapping
from transition labels to rates.
:param state_space: The state space the CTMC is over.
:type state_space: IMCoalHMM.CoalSystem
:param rates_table: A table where transition rates can be looked up.
:type rates_table: dict
"""
cache_key = (state_space, tuple(rates_table.items()))
if not cache_key in CTMC_CACHE:
CTMC_CACHE[cache_key] = CTMC(state_space, rates_table)
return CTMC_CACHE[cache_key]
| mailund/IMCoalHMM | src/IMCoalHMM/CTMC.py | Python | gpl-2.0 | 2,581 | 0.001937 |
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_policy import policy as oslo_policy
import webob
from nova.api.openstack.compute import keypairs as keypairs_v21
from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
from nova import context as nova_context
from nova import exception
from nova import objects
from nova import policy
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_keypair
QUOTAS = quota.QUOTAS
keypair_data = {
'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
}
FAKE_UUID = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
def fake_keypair(name):
return dict(test_keypair.fake_keypair,
name=name, **keypair_data)
def db_key_pair_get_all_by_user(self, user_id, limit, marker):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
return fake_keypair(name=keypair['name'])
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_create_duplicate(context):
raise exception.KeyPairExists(key_name='create_duplicate')
class KeypairsTestV21(test.TestCase):
base_url = '/v2/%s' % fakes.FAKE_PROJECT_ID
validation_error = exception.ValidationError
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
def _setup_app_and_controller(self):
self.app_server = fakes.wsgi_app_v21()
self.controller = keypairs_v21.KeypairController()
def setUp(self):
super(KeypairsTestV21, self).setUp()
fakes.stub_out_networking(self)
fakes.stub_out_secgroup_api(self)
self.stub_out("nova.db.api.key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stub_out("nova.db.api.key_pair_create",
db_key_pair_create)
self.stub_out("nova.db.api.key_pair_destroy",
db_key_pair_destroy)
self._setup_app_and_controller()
self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
def test_keypair_list(self):
res_dict = self.controller.index(self.req)
response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
res_dict = self.controller.create(self.req, body=body)
self.assertGreater(len(res_dict['keypair']['fingerprint']), 0)
self.assertGreater(len(res_dict['keypair']['private_key']), 0)
self._assert_keypair_type(res_dict)
def _test_keypair_create_bad_request_case(self,
body,
exception):
self.assertRaises(exception,
self.controller.create, self.req, body=body)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_create_with_name_too_long(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_create_with_name_leading_trailing_spaces(self):
body = {
'keypair': {
'name': ' test '
}
}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_create_with_name_leading_trailing_spaces_compat_mode(
self):
body = {'keypair': {'name': ' test '}}
self.req.set_legacy_v2()
res_dict = self.controller.create(self.req, body=body)
self.assertEqual('test', res_dict['keypair']['name'])
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'name': 'test/keypair'
}
}
self._test_keypair_create_bad_request_case(body,
webob.exc.HTTPBadRequest)
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
self._test_keypair_create_bad_request_case(body,
webob.exc.HTTPBadRequest)
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
res_dict = self.controller.create(self.req, body=body)
# FIXME(ja): Should we check that public_key was sent to create?
self.assertGreater(len(res_dict['keypair']['fingerprint']), 0)
self.assertNotIn('private_key', res_dict['keypair'])
self._assert_keypair_type(res_dict)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_keypair_import_quota_limit(self, mock_check):
mock_check.side_effect = exception.OverQuota(overs='key_pairs',
usages={'key_pairs': 100})
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=body)
self.assertIn('Quota exceeded, too many key pairs.', ex.explanation)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_keypair_create_quota_limit(self, mock_check):
mock_check.side_effect = exception.OverQuota(overs='key_pairs',
usages={'key_pairs': 100})
body = {
'keypair': {
'name': 'create_test',
},
}
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=body)
self.assertIn('Quota exceeded, too many key pairs.', ex.explanation)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_keypair_create_over_quota_during_recheck(self, mock_check):
# Simulate a race where the first check passes and the recheck fails.
# First check occurs in compute/api.
exc = exception.OverQuota(overs='key_pairs', usages={'key_pairs': 100})
mock_check.side_effect = [None, exc]
body = {
'keypair': {
'name': 'create_test',
},
}
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=body)
ctxt = self.req.environ['nova.context']
self.assertEqual(2, mock_check.call_count)
call1 = mock.call(ctxt, {'key_pairs': 1}, ctxt.user_id)
call2 = mock.call(ctxt, {'key_pairs': 0}, ctxt.user_id)
mock_check.assert_has_calls([call1, call2])
# Verify we removed the key pair that was added after the first
# quota check passed.
key_pairs = objects.KeyPairList.get_by_user(ctxt, ctxt.user_id)
names = [key_pair.name for key_pair in key_pairs]
self.assertNotIn('create_test', names)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_keypair_create_no_quota_recheck(self, mock_check):
# Disable recheck_quota.
self.flags(recheck_quota=False, group='quota')
body = {
'keypair': {
'name': 'create_test',
},
}
self.controller.create(self.req, body=body)
ctxt = self.req.environ['nova.context']
# check_deltas should have been called only once.
mock_check.assert_called_once_with(ctxt, {'key_pairs': 1},
ctxt.user_id)
def test_keypair_create_duplicate(self):
self.stub_out("nova.objects.KeyPair.create",
db_key_pair_create_duplicate)
body = {'keypair': {'name': 'create_duplicate'}}
ex = self.assertRaises(webob.exc.HTTPConflict,
self.controller.create, self.req, body=body)
self.assertIn("Key pair 'create_duplicate' already exists.",
ex.explanation)
@mock.patch('nova.objects.KeyPair.get_by_name')
def test_keypair_delete(self, mock_get_by_name):
mock_get_by_name.return_value = objects.KeyPair(
nova_context.get_admin_context(), **fake_keypair('FAKE'))
self.controller.delete(self.req, 'FAKE')
def test_keypair_get_keypair_not_found(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, 'DOESNOTEXIST')
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stub_out("nova.db.api.key_pair_destroy",
db_key_pair_get_not_found)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, self.req, 'FAKE')
def test_keypair_show(self):
def _db_key_pair_get(context, user_id, name):
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY',
type='ssh')
self.stub_out("nova.db.api.key_pair_get", _db_key_pair_get)
res_dict = self.controller.show(self.req, 'FAKE')
self.assertEqual('foo', res_dict['keypair']['name'])
self.assertEqual('XXX', res_dict['keypair']['public_key'])
self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
self._assert_keypair_type(res_dict)
def test_keypair_show_not_found(self):
def _db_key_pair_get(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stub_out("nova.db.api.key_pair_get", _db_key_pair_get)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, 'FAKE')
def _assert_keypair_type(self, res_dict):
self.assertNotIn('type', res_dict['keypair'])
class KeypairPolicyTestV21(test.NoDBTestCase):
KeyPairController = keypairs_v21.KeypairController()
policy_path = 'os_compute_api:os-keypairs'
def setUp(self):
super(KeypairPolicyTestV21, self).setUp()
@staticmethod
def _db_key_pair_get(context, user_id, name=None):
if name is not None:
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY',
type='ssh')
else:
return db_key_pair_get_all_by_user(context, user_id)
self.stub_out("nova.objects.keypair.KeyPair._get_from_db",
_db_key_pair_get)
self.req = fakes.HTTPRequest.blank('')
def test_keypair_list_fail_policy(self):
rules = {self.policy_path + ':index': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden,
self.KeyPairController.index,
self.req)
@mock.patch('nova.objects.KeyPairList.get_by_user')
def test_keypair_list_pass_policy(self, mock_get):
rules = {self.policy_path + ':index': ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
res = self.KeyPairController.index(self.req)
self.assertIn('keypairs', res)
def test_keypair_show_fail_policy(self):
rules = {self.policy_path + ':show': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden,
self.KeyPairController.show,
self.req, 'FAKE')
def test_keypair_show_pass_policy(self):
rules = {self.policy_path + ':show': ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
res = self.KeyPairController.show(self.req, 'FAKE')
self.assertIn('keypair', res)
def test_keypair_create_fail_policy(self):
body = {'keypair': {'name': 'create_test'}}
rules = {self.policy_path + ':create': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden,
self.KeyPairController.create,
self.req, body=body)
def _assert_keypair_create(self, mock_create, req):
mock_create.assert_called_with(req, 'fake_user', 'create_test', 'ssh')
@mock.patch.object(compute_api.KeypairAPI, 'create_key_pair')
def test_keypair_create_pass_policy(self, mock_create):
keypair_obj = objects.KeyPair(name='', public_key='',
fingerprint='', user_id='')
mock_create.return_value = (keypair_obj, 'dummy')
body = {'keypair': {'name': 'create_test'}}
rules = {self.policy_path + ':create': ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
res = self.KeyPairController.create(self.req, body=body)
self.assertIn('keypair', res)
req = self.req.environ['nova.context']
self._assert_keypair_create(mock_create, req)
def test_keypair_delete_fail_policy(self):
rules = {self.policy_path + ':delete': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden,
self.KeyPairController.delete,
self.req, 'FAKE')
@mock.patch('nova.objects.KeyPair.destroy_by_name')
def test_keypair_delete_pass_policy(self, mock_destroy):
rules = {self.policy_path + ':delete': ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.KeyPairController.delete(self.req, 'FAKE')
class KeypairsTestV22(KeypairsTestV21):
wsgi_api_version = '2.2'
def test_keypair_list(self):
res_dict = self.controller.index(self.req)
expected = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE',
type='ssh')}]}
self.assertEqual(expected, res_dict)
def _assert_keypair_type(self, res_dict):
self.assertEqual('ssh', res_dict['keypair']['type'])
def test_keypair_create_with_name_leading_trailing_spaces_compat_mode(
self):
pass
def test_create_server_keypair_name_with_leading_trailing_compat_mode(
self):
pass
class KeypairsTestV210(KeypairsTestV22):
wsgi_api_version = '2.10'
def test_keypair_create_with_name_leading_trailing_spaces_compat_mode(
self):
pass
def test_create_server_keypair_name_with_leading_trailing_compat_mode(
self):
pass
def test_keypair_list_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs?user_id=foo',
version=self.wsgi_api_version,
use_admin_context=True)
with mock.patch.object(self.controller.api, 'get_key_pairs') as mock_g:
self.controller.index(req)
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('foo', userid)
def test_keypair_list_other_user_not_admin(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs?user_id=foo',
version=self.wsgi_api_version)
with mock.patch.object(self.controller.api, 'get_key_pairs'):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_keypair_show_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
version=self.wsgi_api_version,
use_admin_context=True)
with mock.patch.object(self.controller.api, 'get_key_pair') as mock_g:
self.controller.show(req, 'FAKE')
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('foo', userid)
def test_keypair_show_other_user_not_admin(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
version=self.wsgi_api_version)
with mock.patch.object(self.controller.api, 'get_key_pair'):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req, 'FAKE')
def test_keypair_delete_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
version=self.wsgi_api_version,
use_admin_context=True)
with mock.patch.object(self.controller.api,
'delete_key_pair') as mock_g:
self.controller.delete(req, 'FAKE')
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('foo', userid)
def test_keypair_delete_other_user_not_admin(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
version=self.wsgi_api_version)
with mock.patch.object(self.controller.api, 'delete_key_pair'):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.delete, req, 'FAKE')
def test_keypair_create_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs',
version=self.wsgi_api_version,
use_admin_context=True)
body = {'keypair': {'name': 'create_test',
'user_id': '8861f37f-034e-4ca8-8abe-6d13c074574a'}}
with mock.patch.object(self.controller.api,
'create_key_pair',
return_value=(mock.MagicMock(), 1)) as mock_g:
res = self.controller.create(req, body=body)
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('8861f37f-034e-4ca8-8abe-6d13c074574a', userid)
self.assertIn('keypair', res)
def test_keypair_import_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs',
version=self.wsgi_api_version,
use_admin_context=True)
body = {'keypair': {'name': 'create_test',
'user_id': '8861f37f-034e-4ca8-8abe-6d13c074574a',
'public_key': 'public_key'}}
with mock.patch.object(self.controller.api,
'import_key_pair') as mock_g:
res = self.controller.create(req, body=body)
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('8861f37f-034e-4ca8-8abe-6d13c074574a', userid)
self.assertIn('keypair', res)
def test_keypair_create_other_user_not_admin(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs',
version=self.wsgi_api_version)
body = {'keypair': {'name': 'create_test',
'user_id': '8861f37f-034e-4ca8-8abe-6d13c074574a'}}
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create,
req, body=body)
def test_keypair_list_other_user_invalid_in_old_microversion(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs?user_id=foo',
version="2.9",
use_admin_context=True)
with mock.patch.object(self.controller.api, 'get_key_pairs') as mock_g:
self.controller.index(req)
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('fake_user', userid)
class KeypairsTestV235(test.TestCase):
base_url = '/v2/%s' % fakes.FAKE_PROJECT_ID
wsgi_api_version = '2.35'
def _setup_app_and_controller(self):
self.app_server = fakes.wsgi_app_v21()
self.controller = keypairs_v21.KeypairController()
def setUp(self):
super(KeypairsTestV235, self).setUp()
self._setup_app_and_controller()
@mock.patch("nova.db.api.key_pair_get_all_by_user")
def test_keypair_list_limit_and_marker(self, mock_kp_get):
mock_kp_get.side_effect = db_key_pair_get_all_by_user
req = fakes.HTTPRequest.blank(
self.base_url + '/os-keypairs?limit=3&marker=fake_marker',
version=self.wsgi_api_version, use_admin_context=True)
res_dict = self.controller.index(req)
mock_kp_get.assert_called_once_with(
req.environ['nova.context'], 'fake_user',
limit=3, marker='fake_marker')
response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE',
type='ssh')}]}
self.assertEqual(res_dict, response)
@mock.patch('nova.compute.api.KeypairAPI.get_key_pairs')
def test_keypair_list_limit_and_marker_invalid_marker(self, mock_kp_get):
mock_kp_get.side_effect = exception.MarkerNotFound(marker='unknown_kp')
req = fakes.HTTPRequest.blank(
self.base_url + '/os-keypairs?limit=3&marker=unknown_kp',
version=self.wsgi_api_version, use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
def test_keypair_list_limit_and_marker_invalid_limit(self):
req = fakes.HTTPRequest.blank(
self.base_url + '/os-keypairs?limit=abc&marker=fake_marker',
version=self.wsgi_api_version, use_admin_context=True)
self.assertRaises(exception.ValidationError, self.controller.index,
req)
@mock.patch("nova.db.api.key_pair_get_all_by_user")
def test_keypair_list_limit_and_marker_invalid_in_old_microversion(
self, mock_kp_get):
mock_kp_get.side_effect = db_key_pair_get_all_by_user
req = fakes.HTTPRequest.blank(
self.base_url + '/os-keypairs?limit=3&marker=fake_marker',
version="2.30", use_admin_context=True)
self.controller.index(req)
mock_kp_get.assert_called_once_with(
req.environ['nova.context'], 'fake_user',
limit=None, marker=None)
class KeypairsTestV275(test.TestCase):
def setUp(self):
super(KeypairsTestV275, self).setUp()
self.controller = keypairs_v21.KeypairController()
@mock.patch("nova.db.api.key_pair_get_all_by_user")
@mock.patch('nova.objects.KeyPair.get_by_name')
def test_keypair_list_additional_param_old_version(self, mock_get_by_name,
mock_kp_get):
req = fakes.HTTPRequest.blank(
'/os-keypairs?unknown=3',
version='2.74', use_admin_context=True)
self.controller.index(req)
self.controller.show(req, 1)
with mock.patch.object(self.controller.api,
'delete_key_pair'):
self.controller.delete(req, 1)
def test_keypair_list_additional_param(self):
req = fakes.HTTPRequest.blank(
'/os-keypairs?unknown=3',
version='2.75', use_admin_context=True)
self.assertRaises(exception.ValidationError, self.controller.index,
req)
def test_keypair_show_additional_param(self):
req = fakes.HTTPRequest.blank(
'/os-keypairs?unknown=3',
version='2.75', use_admin_context=True)
self.assertRaises(exception.ValidationError, self.controller.show,
req, 1)
def test_keypair_delete_additional_param(self):
req = fakes.HTTPRequest.blank(
'/os-keypairs?unknown=3',
version='2.75', use_admin_context=True)
self.assertRaises(exception.ValidationError, self.controller.delete,
req, 1)
| rahulunair/nova | nova/tests/unit/api/openstack/compute/test_keypairs.py | Python | apache-2.0 | 27,057 | 0 |
from webargs import fields
from ..api.validators import Email, password
user_args = {
'email': fields.Str(validate=Email, required=True),
'password': fields.Str(validate=password, required=True)
}
role_args = {
'name': fields.Str(required=True),
'description': fields.Str(required=True)
}
| teracyhq/flask-boilerplate | app/api_1_0/args.py | Python | bsd-3-clause | 308 | 0 |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ContactsService extends the GDataService to streamline Google Contacts operations.
ContactsService: Provides methods to query feeds and manipulate items. Extends
GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'dbrattli (Dag Brattli)'
import gdata
import atom.service
import gdata.service
import gdata.calendar
import atom
class Error(Exception):
pass
class RequestError(Error):
pass
class ContactsService(gdata.service.GDataService):
"""Client for the Google Contats service."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com',
additional_headers=None):
gdata.service.GDataService.__init__(self, email=email, password=password,
service='cp', source=source,
server=server,
additional_headers=additional_headers)
def GetContactsFeed(self,
uri='http://www.google.com/m8/feeds/contacts/default/base'):
return self.Get(uri, converter=gdata.contacts.ContactsFeedFromString)
def CreateContact(self, new_contact,
insert_uri='/m8/feeds/contacts/default/base', url_params=None,
escape_params=True):
"""Adds an event to Google Contacts.
Args:
new_contact: atom.Entry or subclass A new event which is to be added to
Google Contacts.
insert_uri: the URL to post new contacts to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the contact created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Post(new_contact, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.ContactEntryFromString)
def UpdateContact(self, edit_uri, updated_contact, url_params=None,
escape_params=True):
"""Updates an existing contact.
Args:
edit_uri: string The edit link URI for the element being updated
updated_contact: string, atom.Entry or subclass containing
the Atom Entry which will replace the event which is
stored at the edit_url
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
url_prefix = 'http://%s/' % self.server
if edit_uri.startswith(url_prefix):
edit_uri = edit_uri[len(url_prefix):]
response = self.Put(updated_contact, '/%s' % edit_uri,
url_params=url_params,
escape_params=escape_params)
if isinstance(response, atom.Entry):
return gdata.contacts.ContactEntryFromString(response.ToString())
else:
return response
def DeleteContact(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
"""Removes an event with the specified ID from Google Contacts.
Args:
edit_uri: string The edit URL of the entry to be deleted. Example:
'http://www.google.com/m8/feeds/contacts/default/base/xxx/yyy'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful delete, a httplib.HTTPResponse containing the server's
response to the DELETE request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
url_prefix = 'http://%s/' % self.server
if edit_uri.startswith(url_prefix):
edit_uri = edit_uri[len(url_prefix):]
return self.Delete('/%s' % edit_uri,
url_params=url_params, escape_params=escape_params)
class ContactsQuery(gdata.service.Query):
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
self.feed = feed or '/m8/feeds/contacts/default/base'
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
| alon/polinax | libs/external_libs/gdata.py-1.0.13/src/gdata/contacts/service.py | Python | gpl-2.0 | 5,972 | 0.009377 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-17 01:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0037_auto_20170813_0319'),
]
operations = [
migrations.AddField(
model_name='issue',
name='upvotes',
field=models.IntegerField(default=0),
),
]
| goyal-sidd/BLT | website/migrations/0038_issue_upvotes.py | Python | agpl-3.0 | 447 | 0 |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for pre-processing the data into individual, standardized formats."""
import collections
import datetime
import itertools
import os
import pathlib
import re
from typing import Callable, Dict, Set, Tuple
from absl import logging
from dm_c19_modelling.england_data import constants
import pandas as pd
import yaml
_PATH_FILENAME_REGEXES = "filename_regexes.yaml"
_COLUMNS = constants.Columns
_DATE_FORMAT = "%Y-%m-%d"
def _order_columns(df: pd.DataFrame) -> pd.DataFrame:
"""Orders the columns of the dataframe as: date, region, observations."""
df.insert(0, _COLUMNS.DATE.value, df.pop(_COLUMNS.DATE.value))
reg_columns = []
obs_columns = []
for col in df.columns[1:]:
if col.startswith(constants.REGION_PREFIX):
reg_columns.append(col)
elif col.startswith(constants.OBSERVATION_PREFIX):
obs_columns.append(col)
else:
raise ValueError(f"Unknown column: '{col}'")
columns = [_COLUMNS.DATE.value] + reg_columns + obs_columns
return df[columns]
def _raw_data_formatter_daily_deaths(filepath: str) -> pd.DataFrame:
"""Loads and formats daily deaths data."""
sheet_name = "Tab4 Deaths by trust"
header = 15
df = pd.read_excel(filepath, sheet_name=sheet_name, header=header)
# Drop rows and columns which are all nans.
df.dropna(axis=0, how="all", inplace=True)
df.dropna(axis=1, how="all", inplace=True)
# Drop unneeded columns and rows.
drop_columns = ["Total", "Awaiting verification"]
up_to_mar_1_index = "Up to 01-Mar-20"
if sum(i for i in df[up_to_mar_1_index] if isinstance(i, int)) == 0.0:
drop_columns.append(up_to_mar_1_index)
df.drop(columns=drop_columns, inplace=True)
df = df[df["Code"] != "-"]
# Melt the death counts by date into "Date" and "Death Count" columns.
df = df.melt(
id_vars=["NHS England Region", "Code", "Name"],
var_name="Date",
value_name="Death Count")
# Rename the columns to their standard names.
df.rename(
columns={
"Date": _COLUMNS.DATE.value,
"Death Count": _COLUMNS.OBS_DEATHS.value,
"Code": _COLUMNS.REG_TRUST_CODE.value,
"Name": _COLUMNS.REG_TRUST_NAME.value,
"NHS England Region": _COLUMNS.REG_NHSER_NAME.value,
},
inplace=True)
_order_columns(df)
df[_COLUMNS.DATE.value] = df[_COLUMNS.DATE.value].map(
lambda x: x.strftime(_DATE_FORMAT))
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_TRUST_NAME.value,
_COLUMNS.REG_TRUST_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
if df.isna().any().any():
raise ValueError("Formatted data 'daily_deaths' contains nans")
return df
def _raw_data_formatter_daily_cases(filepath: str) -> pd.DataFrame:
"""Loads and formats daily cases data."""
df = pd.read_csv(filepath)
df.rename(columns={"Area type": "Area_type"}, inplace=True)
df.query("Area_type == 'ltla'", inplace=True)
# Drop unneeded columns and rows.
drop_columns = [
"Area_type", "Cumulative lab-confirmed cases",
"Cumulative lab-confirmed cases rate"
]
df.drop(columns=drop_columns, inplace=True)
# Rename the columns to their standard names.
df.rename(
columns={
"Area name": _COLUMNS.REG_LTLA_NAME.value,
"Area code": _COLUMNS.REG_LTLA_CODE.value,
"Specimen date": _COLUMNS.DATE.value,
"Daily lab-confirmed cases": _COLUMNS.OBS_CASES.value,
},
inplace=True)
_order_columns(df)
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_LTLA_NAME.value,
_COLUMNS.REG_LTLA_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
if df.isna().any().any():
raise ValueError("Formatted data 'daily_cases' contains nans")
return df
def _raw_data_formatter_google_mobility(filepath: str) -> pd.DataFrame:
"""Loads and formats Google mobility data."""
df = pd.read_csv(filepath)
# Filter to UK.
df.query("country_region_code == 'GB'", inplace=True)
# Drop unneeded columns and rows.
drop_columns = [
"country_region_code", "country_region", "metro_area", "census_fips_code"
]
df.drop(columns=drop_columns, inplace=True)
# Fill missing region info with "na".
df[["sub_region_1", "sub_region_2", "iso_3166_2_code"]].fillna(
"na", inplace=True)
# Rename the columns to their standard names.
df.rename(
columns={
"sub_region_1":
_COLUMNS.REG_SUB_REGION_1.value,
"sub_region_2":
_COLUMNS.REG_SUB_REGION_2.value,
"iso_3166_2_code":
_COLUMNS.REG_ISO_3166_2_CODE.value,
"date":
_COLUMNS.DATE.value,
"retail_and_recreation_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_RETAIL_AND_RECREATION.value,
"grocery_and_pharmacy_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_GROCERY_AND_PHARMACY.value,
"parks_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_PARKS.value,
"transit_stations_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_TRANSIT_STATIONS.value,
"workplaces_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_WORKPLACES.value,
"residential_percent_change_from_baseline":
_COLUMNS.OBS_MOBILITY_RESIDENTIAL.value,
},
inplace=True)
_order_columns(df)
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_SUB_REGION_1.value,
_COLUMNS.REG_SUB_REGION_2.value,
_COLUMNS.REG_ISO_3166_2_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
return df
def _raw_data_formatter_online_111(filepath: str) -> pd.DataFrame:
"""Loads and formats online 111 data."""
df = pd.read_csv(filepath)
# Drop nans.
df.dropna(subset=["ccgcode"], inplace=True)
# Reformat dates.
remap_dict = {
"journeydate":
lambda x: datetime.datetime.strptime(x, "%d/%m/%Y").strftime( # pylint: disable=g-long-lambda
_DATE_FORMAT),
"ccgname":
lambda x: x.replace("&", "and"),
"sex": {
"Female": "f",
"Male": "m",
"Indeterminate": "u",
},
"ageband": {
"0-18 years": "0",
"19-69 years": "19",
"70+ years": "70"
}
}
for col, remap in remap_dict.items():
df[col] = df[col].map(remap)
journeydate_values = pd.date_range(
df.journeydate.min(), df.journeydate.max()).strftime(_DATE_FORMAT)
ccgcode_values = df.ccgcode.unique()
df.sex.fillna("u", inplace=True)
sex_values = ["f", "m", "u"]
assert set(sex_values) >= set(df.sex.unique()), "unsupported sex value"
df.ageband.fillna("u", inplace=True)
ageband_values = ["0", "19", "70", "u"]
assert set(ageband_values) >= set(
df.ageband.unique()), "unsupported ageband value"
ccg_code_name_map = df[["ccgcode", "ccgname"
]].set_index("ccgcode")["ccgname"].drop_duplicates()
# Some CCG codes have duplicate names, which differ by their commas. Keep the
# longer ones.
fn = lambda x: sorted(x["ccgname"].map(lambda y: (len(y), y)))[-1][1]
ccg_code_name_map = ccg_code_name_map.reset_index().groupby("ccgcode").apply(
fn)
df_full = pd.DataFrame(
list(
itertools.product(journeydate_values, ccgcode_values, sex_values,
ageband_values)),
columns=["journeydate", "ccgcode", "sex", "ageband"])
df = pd.merge(df_full, df, how="outer")
# 0 calls don't have rows, so are nans.
df["Total"].fillna(0, inplace=True)
df["ccgname"] = df["ccgcode"].map(ccg_code_name_map)
# Combine sex and ageband columns into a joint column.
df["sex_ageband"] = df["sex"] + "_" + df["ageband"]
df = df.pivot_table(
index=["journeydate", "ccgcode", "ccgname"],
columns="sex_ageband",
values="Total").reset_index()
df.columns.name = None
# Rename the columns to their standard names.
df.rename(
columns={
"ccgcode": _COLUMNS.REG_CCG_CODE.value,
"ccgname": _COLUMNS.REG_CCG_NAME.value,
"journeydate": _COLUMNS.DATE.value,
"f_0": _COLUMNS.OBS_ONLINE_111_F_0.value,
"f_19": _COLUMNS.OBS_ONLINE_111_F_19.value,
"f_70": _COLUMNS.OBS_ONLINE_111_F_70.value,
"f_u": _COLUMNS.OBS_ONLINE_111_F_U.value,
"m_0": _COLUMNS.OBS_ONLINE_111_M_0.value,
"m_19": _COLUMNS.OBS_ONLINE_111_M_19.value,
"m_70": _COLUMNS.OBS_ONLINE_111_M_70.value,
"m_u": _COLUMNS.OBS_ONLINE_111_M_U.value,
"u_0": _COLUMNS.OBS_ONLINE_111_U_0.value,
"u_19": _COLUMNS.OBS_ONLINE_111_U_19.value,
"u_70": _COLUMNS.OBS_ONLINE_111_U_70.value,
"u_u": _COLUMNS.OBS_ONLINE_111_U_U.value,
},
inplace=True)
_order_columns(df)
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_CCG_NAME.value,
_COLUMNS.REG_CCG_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
if df.isna().any().any():
raise ValueError("Formatted data 'online_111' contains nans")
return df
def _raw_data_formatter_calls_111_999(filepath: str) -> pd.DataFrame:
"""Loads and formats 111 & 999 calls data."""
df = pd.read_csv(filepath)
# Drop unneeded columns and rows.
drop_columns = []
df.drop(columns=drop_columns, inplace=True)
# Drop nans.
df.dropna(subset=["CCGCode", "CCGName"], inplace=True)
# Reformat values.
df["AgeBand"].fillna("u", inplace=True)
remap_dict = {
"Call Date":
lambda x: datetime.datetime.strptime(x, "%d/%m/%Y").strftime( # pylint: disable=g-long-lambda
"%Y-%m-%d"),
"CCGName":
lambda x: x.replace("&", "and"),
"SiteType":
lambda x: str(int(x)),
"Sex": {
"Female": "f",
"Male": "m",
"Unknown": "u",
},
"AgeBand": {
"0-18 years": "0",
"19-69 years": "19",
"70-120 years": "70",
"u": "u",
}
}
for col, remap in remap_dict.items():
df[col] = df[col].map(remap)
call_date_values = pd.date_range(df["Call Date"].min(),
df["Call Date"].max()).strftime(_DATE_FORMAT)
ccgcode_values = df["CCGCode"].unique()
sitetype_values = ["111", "999"]
assert set(sitetype_values) >= set(
df.SiteType.unique()), "unsupported sitetype value"
sex_values = ["f", "m", "u"]
assert set(sex_values) >= set(df.Sex.unique()), "unsupported sex value"
ageband_values = ["0", "19", "70", "u"]
assert set(ageband_values) >= set(
df.AgeBand.unique()), "unsupported ageband value"
ccg_code_name_map = df[["CCGCode", "CCGName"
]].set_index("CCGCode")["CCGName"].drop_duplicates()
df_full = pd.DataFrame(
list(itertools.product(call_date_values, ccgcode_values, sitetype_values,
sex_values, ageband_values)),
columns=["Call Date", "CCGCode", "SiteType", "Sex", "AgeBand"])
df = pd.merge(df_full, df, how="outer")
# 0 calls don't have rows, so are nans.
df["TriageCount"].fillna(0, inplace=True)
df["CCGName"] = df["CCGCode"].map(ccg_code_name_map)
# Combine SiteType, Sex, and AgeBand columns into a joint column.
df["SiteType_Sex_AgeBand"] = (
df["SiteType"] + "_" + df["Sex"] + "_" + df["AgeBand"])
df = df.pivot_table(
index=["Call Date", "CCGCode", "CCGName"],
columns="SiteType_Sex_AgeBand",
values="TriageCount").reset_index()
df.columns.name = None
# Rename the columns to their standard names.
df.rename(
columns={
"CCGCode": _COLUMNS.REG_CCG_CODE.value,
"CCGName": _COLUMNS.REG_CCG_NAME.value,
"Call Date": _COLUMNS.DATE.value,
"111_f_0": _COLUMNS.OBS_CALL_111_F_0.value,
"111_f_19": _COLUMNS.OBS_CALL_111_F_19.value,
"111_f_70": _COLUMNS.OBS_CALL_111_F_70.value,
"111_f_u": _COLUMNS.OBS_CALL_111_F_U.value,
"111_m_0": _COLUMNS.OBS_CALL_111_M_0.value,
"111_m_19": _COLUMNS.OBS_CALL_111_M_19.value,
"111_m_70": _COLUMNS.OBS_CALL_111_M_70.value,
"111_m_u": _COLUMNS.OBS_CALL_111_M_U.value,
"111_u_0": _COLUMNS.OBS_CALL_111_U_0.value,
"111_u_19": _COLUMNS.OBS_CALL_111_U_19.value,
"111_u_70": _COLUMNS.OBS_CALL_111_U_70.value,
"111_u_u": _COLUMNS.OBS_CALL_111_U_U.value,
"999_f_0": _COLUMNS.OBS_CALL_999_F_0.value,
"999_f_19": _COLUMNS.OBS_CALL_999_F_19.value,
"999_f_70": _COLUMNS.OBS_CALL_999_F_70.value,
"999_f_u": _COLUMNS.OBS_CALL_999_F_U.value,
"999_m_0": _COLUMNS.OBS_CALL_999_M_0.value,
"999_m_19": _COLUMNS.OBS_CALL_999_M_19.value,
"999_m_70": _COLUMNS.OBS_CALL_999_M_70.value,
"999_m_u": _COLUMNS.OBS_CALL_999_M_U.value,
"999_u_0": _COLUMNS.OBS_CALL_999_U_0.value,
"999_u_19": _COLUMNS.OBS_CALL_999_U_19.value,
"999_u_70": _COLUMNS.OBS_CALL_999_U_70.value,
"999_u_u": _COLUMNS.OBS_CALL_999_U_U.value,
},
inplace=True)
_order_columns(df)
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_CCG_NAME.value,
_COLUMNS.REG_CCG_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
if df.isna().any().any():
raise ValueError("Formatted data 'calls_111_999' contains nans")
return df
_FORMATTER_FUNCTIONS = {
"daily_deaths": _raw_data_formatter_daily_deaths,
"daily_cases": _raw_data_formatter_daily_cases,
"google_mobility": _raw_data_formatter_google_mobility,
"online_111": _raw_data_formatter_online_111,
"calls_111_999": _raw_data_formatter_calls_111_999,
}
def _get_raw_data_formatter_by_name(name: str) -> Callable[[str], pd.DataFrame]:
return _FORMATTER_FUNCTIONS[name]
def _merge_online_111_and_calls_111_999(
df_online_111: pd.DataFrame,
df_calls_111_999: pd.DataFrame) -> pd.DataFrame:
"""Merges the 111 online and 111/999 calls into a single dataframe."""
df = pd.merge(
df_online_111,
df_calls_111_999,
how="outer",
on=[
_COLUMNS.DATE.value,
_COLUMNS.REG_CCG_CODE.value,
_COLUMNS.REG_CCG_NAME.value,
])
return df
def format_raw_data_files(
paths_dict: Dict[str, str]) -> Dict[str, pd.DataFrame]:
"""Loads and formats the individual raw data files.
Args:
paths_dict: mapping from data names to filepaths.
Returns:
mapping from data names to formatted dataframes.
"""
formatted_dfs = {}
for name, path in paths_dict.items():
logging.info("Formatting raw data: %s", name)
formatter = _get_raw_data_formatter_by_name(name)
formatted_dfs[name] = formatter(path)
logging.info("Merging online 111 and 111/999 calls")
if "online_111" and "calls_111_999" in formatted_dfs:
formatted_dfs[
"online_111_and_calls_111_999"] = _merge_online_111_and_calls_111_999(
formatted_dfs.pop("online_111"), formatted_dfs.pop("calls_111_999"))
elif "online_111" in formatted_dfs:
formatted_dfs["online_111_and_calls_111_999"] = formatted_dfs.pop(
"online_111")
elif "calls_111_999" in formatted_dfs:
formatted_dfs["online_111_and_calls_111_999"] = formatted_dfs.pop(
"calls_111_999")
return formatted_dfs
def merge_formatted_data(
formatted_data: Dict[str, pd.DataFrame]) -> pd.DataFrame:
"""Concatenates all formatted data into a single dataframe.
Args:
formatted_data: mapping from the data name to its dataframe.
Returns:
a dataframe containing all of the input dataframes.
"""
logging.info("Merging all dataframes")
dfs = []
for name, df in formatted_data.items():
df = df.copy()
df.insert(1, _COLUMNS.OBSERVATION_TYPE.value, name)
dfs.append(df)
df_merged = pd.concat(dfs)
reg_columns = [
c for c in df_merged.columns if c.startswith(constants.REGION_PREFIX)
]
df_merged.sort_values(
[_COLUMNS.DATE.value, _COLUMNS.OBSERVATION_TYPE.value] + reg_columns,
inplace=True)
df_merged.reset_index(drop=True, inplace=True)
return df_merged
def _load_filename_regexes() -> Dict[str, str]:
"""Gets a mapping from the data name to the regex for that data's filepath."""
path = pathlib.Path(os.path.dirname(
os.path.realpath(__file__))) / _PATH_FILENAME_REGEXES
with open(path) as fid:
return yaml.load(fid, Loader=yaml.SafeLoader)
def get_paths_for_given_date(
raw_data_directory: str,
scrape_date: str) -> Tuple[Dict[str, str], str, Set[str]]:
"""Get the raw data paths for a scrape date and filename regex.
Args:
raw_data_directory: the directory where the raw data is saved.
scrape_date: the scrape date to use, in the form YYYYMMDD, or 'latest'.
Returns:
mapping of data names to filepaths
the scrape date used
names whose data was not found on disk
"""
filename_regexes = _load_filename_regexes()
if scrape_date == "latest":
rx = re.compile("^[0-9]{8}$")
directories = []
for filename in os.listdir(raw_data_directory):
if rx.match(filename) is None:
continue
path = pathlib.Path(raw_data_directory) / filename
if not os.path.isdir(path):
continue
directories.append(path)
if not directories:
raise ValueError("Could not find latest scrape date directory")
directory = max(directories)
scrape_date_dirname = directory.parts[-1]
else:
try:
datetime.datetime.strptime(scrape_date, "%Y%m%d")
except ValueError:
raise ValueError("Date must be formatted: YYYYMMDD")
scrape_date_dirname = scrape_date
directory = pathlib.Path(raw_data_directory) / scrape_date_dirname
paths_dict = collections.defaultdict(lambda: None)
for name, filename_regex in filename_regexes.items():
rx = re.compile(f"^{filename_regex}$")
for filename in os.listdir(directory):
path = directory / filename
if os.path.isdir(path):
continue
match = rx.match(filename)
if match is None:
continue
if paths_dict[name] is not None:
raise ValueError("There should only be 1 file per name")
paths_dict[name] = str(path)
missing_names = set(filename_regexes.keys()) - set(paths_dict.keys())
return dict(paths_dict), scrape_date_dirname, missing_names
def load_population_dataframe(raw_data_directory: str) -> pd.DataFrame:
"""Load population data from disk, and create a dataframe from it.
Args:
raw_data_directory: the directory where the raw data is saved.
Returns:
a dataframe containing population data.
"""
filename = _load_filename_regexes()["population"]
filepath = pathlib.Path(raw_data_directory) / filename
kwargs = dict(header=0, skiprows=(0, 1, 2, 3, 4, 5, 7))
try:
pop_m = pd.read_excel(filepath, sheet_name="Mid-2019 Males", **kwargs)
pop_f = pd.read_excel(filepath, sheet_name="Mid-2019 Females", **kwargs)
except FileNotFoundError:
return None
# Remove lower resolution columns.
columns_to_remove = ("STP20 Code", "STP20 Name", "NHSER20 Code",
"NHSER20 Name", "All Ages")
for col in columns_to_remove:
del pop_m[col]
del pop_f[col]
mapping = {"CCG Code": _COLUMNS.REG_CCG_CODE.value,
"CCG Name": _COLUMNS.REG_CCG_NAME.value,
"90+": 90}
pop_m.rename(columns=mapping, inplace=True)
pop_f.rename(columns=mapping, inplace=True)
# This labels the male and female data uniquely so they can be merged.
pop_m.rename(
columns=lambda x: f"m_{str(x).lower()}" if isinstance(x, int) else x,
inplace=True)
pop_f.rename(
columns=lambda x: f"f_{str(x).lower()}" if isinstance(x, int) else x,
inplace=True)
region_columns = [_COLUMNS.REG_CCG_NAME.value, _COLUMNS.REG_CCG_CODE.value]
df = pd.merge(pop_m, pop_f, how="outer", on=tuple(region_columns))
mapping = {
f"{gender}_{age}":
_COLUMNS.OBS_POPULATION_GENDER_AGE.value.format(gender=gender, age=age)
for gender, age in itertools.product(("m", "f"), range(91))
}
df.rename(columns=mapping, inplace=True)
return df
| GoogleCloudPlatform/covid-19-open-data | src/england_data/standardize_data.py | Python | apache-2.0 | 21,282 | 0.010384 |
# -*- coding: utf-8 -*-
import datetime
from openerp import http
from openerp.http import request
from openerp.addons.website_portal.controllers.main import website_account
class website_account(website_account):
@http.route(['/my/home'], type='http', auth="user", website=True)
def account(self, **kw):
""" Add sales documents to main account page """
response = super(website_account, self).account()
partner = request.env.user.partner_id
res_sale_order = request.env['sale.order']
res_invoices = request.env['account.invoice']
quotations = res_sale_order.search([
('state', 'in', ['sent', 'cancel'])
])
orders = res_sale_order.search([
('state', 'in', ['sale', 'done'])
])
invoices = res_invoices.search([
('state', 'in', ['open', 'paid', 'cancelled'])
])
response.qcontext.update({
'date': datetime.date.today().strftime('%Y-%m-%d'),
'quotations': quotations,
'orders': orders,
'invoices': invoices,
})
return response
@http.route(['/my/orders/<int:order>'], type='http', auth="user", website=True)
def orders_followup(self, order=None):
partner = request.env['res.users'].browse(request.uid).partner_id
domain = [
('partner_id.id', '=', partner.id),
('state', 'not in', ['draft', 'cancel']),
('id', '=', order)
]
order = request.env['sale.order'].search(domain)
invoiced_lines = request.env['account.invoice.line'].search([('invoice_id', 'in', order.invoice_ids.ids)])
order_invoice_lines = {il.product_id.id: il.invoice_id for il in invoiced_lines}
return request.website.render("website_portal_sale.orders_followup", {
'order': order.sudo(),
'order_invoice_lines': order_invoice_lines,
})
| stephen144/odoo | addons/website_portal_sale/controllers/main.py | Python | agpl-3.0 | 1,940 | 0.001546 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class KeySearchMobilePage(page_module.Page):
def __init__(self, url, page_set):
super(KeySearchMobilePage, self).__init__(
url=url, page_set=page_set, credentials_path = 'data/credentials.json',
shared_page_state_class=shared_page_state.SharedMobilePageState)
self.archive_data_file = 'data/key_search_mobile.json'
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
class KeySearchMobilePageSet(story.StorySet):
""" Key mobile search queries on google """
def __init__(self):
super(KeySearchMobilePageSet, self).__init__(
archive_data_file='data/key_search_mobile.json',
cloud_storage_bucket=story.PUBLIC_BUCKET)
urls_list = [
# Why: An empty page should be as snappy as possible
'http://www.google.com/',
# Why: A reasonable search term with no images or ads usually
'https://www.google.com/search?q=science',
# Why: A reasonable search term with images but no ads usually
'http://www.google.com/search?q=orange',
# Why: An address search
# pylint: disable=line-too-long
'https://www.google.com/search?q=1600+Amphitheatre+Pkwy%2C+Mountain+View%2C+CA',
# Why: A search for a known actor
'http://www.google.com/search?q=tom+hanks',
# Why: A search for weather
'https://www.google.com/search?q=weather+94110',
# Why: A search for a stock
'http://www.google.com/search?q=goog',
# Why: Charts
'https://www.google.com/search?q=population+of+california',
# Why: Flights
'http://www.google.com/search?q=sfo+jfk+flights',
# Why: Movie showtimes
'https://www.google.com/search?q=movies+94110',
# Why: A tip calculator
'http://www.google.com/search?q=tip+on+100+bill',
# Why: Time
'https://www.google.com/search?q=time+in+san+francisco',
# Why: Definitions
'http://www.google.com/search?q=define+define',
# Why: Local results
'https://www.google.com/search?q=burritos+94110',
# Why: Graph
'http://www.google.com/search?q=x^3'
]
for url in urls_list:
self.AddStory(KeySearchMobilePage(url, self))
| js0701/chromium-crosswalk | tools/perf/page_sets/key_search_mobile.py | Python | bsd-3-clause | 2,523 | 0.003567 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.