code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from __future__ import annotations
import asyncio
import logging
import pprint
import uuid
from collections import defaultdict
from copy import deepcopy
from datetime import datetime
from operator import itemgetter
from types import TracebackType
from typing import Any, cast
from aiohttp import ClientSession, ClientWebSocketResponse, WSMsgType, client_exceptions
from .const import (
MAX_SERVER_SCHEMA_VERSION,
MIN_SERVER_SCHEMA_VERSION,
PACKAGE_NAME,
__version__,
)
from .event import Event
from .exceptions import (
CannotConnect,
ConnectionClosed,
ConnectionFailed,
FailedCommand,
FailedZWaveCommand,
InvalidMessage,
InvalidServerVersion,
InvalidState,
NotConnected,
)
from .model.driver import Driver
from .model.version import VersionInfo, VersionInfoDataType
SIZE_PARSE_JSON_EXECUTOR = 8192
# Message IDs
INITIALIZE_MESSAGE_ID = "initialize"
GET_INITIAL_LOG_CONFIG_MESSAGE_ID = "get-initial-log-config"
START_LISTENING_MESSAGE_ID = "start-listening"
LISTEN_MESSAGE_IDS = (
GET_INITIAL_LOG_CONFIG_MESSAGE_ID,
INITIALIZE_MESSAGE_ID,
START_LISTENING_MESSAGE_ID,
)
class Client:
"""Class to manage the IoT connection."""
def __init__(
self,
ws_server_url: str,
aiohttp_session: ClientSession,
schema_version: int = MAX_SERVER_SCHEMA_VERSION,
additional_user_agent_components: dict[str, str] | None = None,
record_messages: bool = False,
):
"""Initialize the Client class."""
self.ws_server_url = ws_server_url
self.aiohttp_session = aiohttp_session
self.driver: Driver | None = None
# The WebSocket client
self._client: ClientWebSocketResponse | None = None
# Version of the connected server
self.version: VersionInfo | None = None
self.schema_version: int = schema_version
self.additional_user_agent_components = {
PACKAGE_NAME: __version__,
**(additional_user_agent_components or {}),
}
self._logger = logging.getLogger(__package__)
self._loop = asyncio.get_running_loop()
self._result_futures: dict[str, asyncio.Future] = {}
self._shutdown_complete_event: asyncio.Event | None = None
self._record_messages = record_messages
self._recorded_commands: defaultdict[str, dict] = defaultdict(dict)
self._recorded_events: list[dict] = []
def __repr__(self) -> str:
"""Return the representation."""
prefix = "" if self.connected else "not "
return f"{type(self).__name__}(ws_server_url={self.ws_server_url!r}, {prefix}connected)"
@property
def connected(self) -> bool:
"""Return if we're currently connected."""
return self._client is not None and not self._client.closed
@property
def recording_messages(self) -> bool:
"""Return True if messages are being recorded."""
return self._record_messages
async def async_send_command(
self, message: dict[str, Any], require_schema: int | None = None
) -> dict:
"""Send a command and get a response."""
if require_schema is not None and require_schema > self.schema_version:
assert self.version
raise InvalidServerVersion(
self.version,
require_schema,
"Command not available due to incompatible server version. Update the Z-Wave "
f"JS Server to a version that supports at least api schema {require_schema}.",
)
future: "asyncio.Future[dict]" = self._loop.create_future()
message_id = message["messageId"] = uuid.uuid4().hex
self._result_futures[message_id] = future
await self._send_json_message(message)
try:
return await future
finally:
self._result_futures.pop(message_id)
async def async_send_command_no_wait(
self, message: dict[str, Any], require_schema: int | None = None
) -> None:
"""Send a command without waiting for the response."""
if require_schema is not None and require_schema > self.schema_version:
assert self.version
raise InvalidServerVersion(
self.version,
require_schema,
"Command not available due to incompatible server version. Update the Z-Wave "
f"JS Server to a version that supports at least api schema {require_schema}.",
)
message["messageId"] = uuid.uuid4().hex
await self._send_json_message(message)
async def connect(self) -> None:
"""Connect to the websocket server."""
if self.driver is not None:
raise InvalidState("Re-connected with existing driver")
self._logger.debug("Trying to connect")
try:
self._client = await self.aiohttp_session.ws_connect(
self.ws_server_url,
heartbeat=55,
compress=15,
max_msg_size=0,
)
except (
client_exceptions.WSServerHandshakeError,
client_exceptions.ClientError,
) as err:
raise CannotConnect(err) from err
self.version = version = VersionInfo.from_message(
cast(VersionInfoDataType, await self._receive_json_or_raise())
)
# basic check for server schema version compatibility
if (
self.version.min_schema_version > MAX_SERVER_SCHEMA_VERSION
or self.version.max_schema_version < MIN_SERVER_SCHEMA_VERSION
):
await self._client.close()
assert self.version
raise InvalidServerVersion(
self.version,
MIN_SERVER_SCHEMA_VERSION,
f"Z-Wave JS Server version ({self.version.server_version}) is "
"incompatible. Update the Z-Wave JS Server to a version that supports "
f"at least api schema {MIN_SERVER_SCHEMA_VERSION}",
)
# store the (highest possible) schema version we're going to use/request
# this is a bit future proof as we might decide to use a pinned version at some point
# for now we just negotiate the highest available schema version and
# guard incompatibility with the MIN_SERVER_SCHEMA_VERSION
if self.version.max_schema_version < MAX_SERVER_SCHEMA_VERSION:
self.schema_version = self.version.max_schema_version
self._logger.info(
"Connected to Home %s (Server %s, Driver %s, Using Schema %s)",
version.home_id,
version.server_version,
version.driver_version,
self.schema_version,
)
async def initialize(self) -> None:
"""Initialize connection to server by setting schema version and user agent."""
assert self._client
# set preferred schema version on the server
# note: we already check for (in)compatible schemas in the connect call
await self._send_json_message(
{
"command": "initialize",
"messageId": INITIALIZE_MESSAGE_ID,
"schemaVersion": self.schema_version,
"additionalUserAgentComponents": self.additional_user_agent_components,
}
)
set_api_msg = await self._receive_json_or_raise()
if not set_api_msg["success"]:
# this should not happen, but just in case
await self._client.close()
raise FailedCommand(set_api_msg["messageId"], set_api_msg["errorCode"])
async def listen(self, driver_ready: asyncio.Event) -> None:
"""Start listening to the websocket."""
if not self.connected:
raise InvalidState("Not connected when start listening")
assert self._client
try:
await self.initialize()
await self._send_json_message(
{
"command": "driver.get_log_config",
"messageId": GET_INITIAL_LOG_CONFIG_MESSAGE_ID,
}
)
log_msg = await self._receive_json_or_raise()
# this should not happen, but just in case
if not log_msg["success"]:
await self._client.close()
raise FailedCommand(log_msg["messageId"], log_msg["errorCode"])
# send start_listening command to the server
# we will receive a full state dump and from now on get events
await self._send_json_message(
{"command": "start_listening", "messageId": START_LISTENING_MESSAGE_ID}
)
state_msg = await self._receive_json_or_raise()
if not state_msg["success"]:
await self._client.close()
raise FailedCommand(state_msg["messageId"], state_msg["errorCode"])
self.driver = cast(
Driver,
await self._loop.run_in_executor(
None,
Driver,
self,
state_msg["result"]["state"],
log_msg["result"]["config"],
),
)
driver_ready.set()
self._logger.info(
"Z-Wave JS initialized. %s nodes", len(self.driver.controller.nodes)
)
await self.receive_until_closed()
except ConnectionClosed:
pass
finally:
self._logger.debug("Listen completed. Cleaning up")
for future in self._result_futures.values():
future.cancel()
self._result_futures.clear()
if not self._client.closed:
await self._client.close()
if self._shutdown_complete_event:
self._shutdown_complete_event.set()
async def disconnect(self) -> None:
"""Disconnect the client."""
self._logger.debug("Closing client connection")
if not self.connected:
return
assert self._client
# 'listen' was never called
if self.driver is None:
await self._client.close()
return
self._shutdown_complete_event = asyncio.Event()
await self._client.close()
await self._shutdown_complete_event.wait()
self._shutdown_complete_event = None
self.driver = None
def begin_recording_messages(self) -> None:
"""Begin recording messages for replay later."""
if self._record_messages:
raise InvalidState("Already recording messages")
self._record_messages = True
def end_recording_messages(self) -> list[dict]:
"""End recording messages and return messages that were recorded."""
if not self._record_messages:
raise InvalidState("Not recording messages")
self._record_messages = False
data = sorted(
(*self._recorded_commands.values(), *self._recorded_events),
key=itemgetter("ts"),
)
self._recorded_commands.clear()
self._recorded_events.clear()
return list(data)
async def receive_until_closed(self) -> None:
"""Receive messages until client is closed."""
assert self._client
while not self._client.closed:
data = await self._receive_json_or_raise()
self._handle_incoming_message(data)
async def _receive_json_or_raise(self) -> dict:
"""Receive json or raise."""
assert self._client
msg = await self._client.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSED, WSMsgType.CLOSING):
raise ConnectionClosed("Connection was closed.")
if msg.type == WSMsgType.ERROR:
raise ConnectionFailed()
if msg.type != WSMsgType.TEXT:
raise InvalidMessage(f"Received non-Text message: {msg.type}")
try:
if len(msg.data) > SIZE_PARSE_JSON_EXECUTOR:
data: dict = await self._loop.run_in_executor(None, msg.json)
else:
data = msg.json()
except ValueError as err:
raise InvalidMessage("Received invalid JSON.") from err
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug("Received message:\n%s\n", pprint.pformat(msg))
return data
def _handle_incoming_message(self, msg: dict) -> None:
"""Handle incoming message.
Run all async tasks in a wrapper to log appropriately.
"""
if msg["type"] == "result":
future = self._result_futures.get(msg["messageId"])
if future is None:
# no listener for this result
return
if self._record_messages and msg["messageId"] not in LISTEN_MESSAGE_IDS:
self._recorded_commands[msg["messageId"]].update(
{
"result_ts": datetime.utcnow().isoformat(),
"result_msg": deepcopy(msg),
}
)
if msg["success"]:
future.set_result(msg["result"])
return
if msg["errorCode"] != "zwave_error":
err = FailedCommand(msg["messageId"], msg["errorCode"])
else:
err = FailedZWaveCommand(
msg["messageId"], msg["zwaveErrorCode"], msg["zwaveErrorMessage"]
)
future.set_exception(err)
return
if msg["type"] != "event":
# Can't handle
self._logger.debug(
"Received message with unknown type '%s': %s",
msg["type"],
msg,
)
return
if self._record_messages:
self._recorded_events.append(
{
"record_type": "event",
"ts": datetime.utcnow().isoformat(),
"type": msg["event"]["event"],
"event_msg": deepcopy(msg),
}
)
event = Event(type=msg["event"]["event"], data=msg["event"])
self.driver.receive_event(event) # type: ignore
async def _send_json_message(self, message: dict[str, Any]) -> None:
"""Send a message.
Raises NotConnected if client not connected.
"""
if not self.connected:
raise NotConnected
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug("Publishing message:\n%s\n", pprint.pformat(message))
assert self._client
assert "messageId" in message
if self._record_messages and message["messageId"] not in LISTEN_MESSAGE_IDS:
# We don't need to deepcopy command_msg because it is always released by
# the caller after the command is sent.
self._recorded_commands[message["messageId"]].update(
{
"record_type": "command",
"ts": datetime.utcnow().isoformat(),
"command": message["command"],
"command_msg": message,
}
)
await self._client.send_json(message)
async def async_start_listening_logs(self) -> None:
"""Send command to start listening to log events."""
await self.async_send_command(
{"command": "start_listening_logs"}, require_schema=31
)
async def async_stop_listening_logs(self) -> None:
"""Send command to stop listening to log events."""
await self.async_send_command(
{"command": "stop_listening_logs"}, require_schema=31
)
async def __aenter__(self) -> "Client":
"""Connect to the websocket."""
await self.connect()
return self
async def __aexit__(
self, exc_type: Exception, exc_value: str, traceback: TracebackType
) -> None:
"""Disconnect from the websocket."""
await self.disconnect() | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/client.py | client.py |
from __future__ import annotations
import asyncio
import aiohttp
from .client import Client
from .model.controller.firmware import (
ControllerFirmwareUpdateData,
ControllerFirmwareUpdateResult,
)
from .model.node import Node
from .model.node.firmware import NodeFirmwareUpdateData, NodeFirmwareUpdateResult
async def update_firmware(
url: str,
node: Node,
updates: list[NodeFirmwareUpdateData],
session: aiohttp.ClientSession,
additional_user_agent_components: dict[str, str] | None = None,
) -> NodeFirmwareUpdateResult:
"""Send updateFirmware command to Node."""
client = Client(
url, session, additional_user_agent_components=additional_user_agent_components
)
await client.connect()
await client.initialize()
receive_task = asyncio.get_running_loop().create_task(client.receive_until_closed())
cmd = {
"command": "node.update_firmware",
"nodeId": node.node_id,
"updates": [update.to_dict() for update in updates],
}
data = await client.async_send_command(cmd, require_schema=29)
await client.disconnect()
if not receive_task.done():
receive_task.cancel()
return NodeFirmwareUpdateResult(node, data["result"])
async def controller_firmware_update_otw(
url: str,
firmware_file: ControllerFirmwareUpdateData,
session: aiohttp.ClientSession,
additional_user_agent_components: dict[str, str] | None = None,
) -> ControllerFirmwareUpdateResult:
"""
Send firmwareUpdateOTW command to Controller.
Sending the wrong firmware to a controller can brick it and make it unrecoverable.
Consumers of this library should build mechanisms to ensure that users understand
the risks.
"""
client = Client(
url, session, additional_user_agent_components=additional_user_agent_components
)
await client.connect()
await client.initialize()
receive_task = asyncio.get_running_loop().create_task(client.receive_until_closed())
data = await client.async_send_command(
{
"command": "controller.firmware_update_otw",
**firmware_file.to_dict(),
},
require_schema=29,
)
await client.disconnect()
if not receive_task.done():
receive_task.cancel()
return ControllerFirmwareUpdateResult(data["result"]) | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/firmware.py | firmware.py |
from __future__ import annotations
from typing import TYPE_CHECKING
from .const import RssiError
if TYPE_CHECKING:
from .const import CommandClass
from .model.value import Value
from .model.version import VersionInfo
class BaseZwaveJSServerError(Exception):
"""Base Zwave JS Server exception."""
class TransportError(BaseZwaveJSServerError):
"""Exception raised to represent transport errors."""
def __init__(self, message: str, error: Exception | None = None) -> None:
"""Initialize a transport error."""
super().__init__(message)
self.error = error
class ConnectionClosed(TransportError):
"""Exception raised when the connection is closed."""
class CannotConnect(TransportError):
"""Exception raised when failed to connect the client."""
def __init__(self, error: Exception) -> None:
"""Initialize a cannot connect error."""
super().__init__(f"{error}", error)
class ConnectionFailed(TransportError):
"""Exception raised when an established connection fails."""
def __init__(self, error: Exception | None = None) -> None:
"""Initialize a connection failed error."""
if error is None:
super().__init__("Connection failed.")
return
super().__init__(f"{error}", error)
class NotFoundError(BaseZwaveJSServerError):
"""Exception that is raised when an entity can't be found."""
class NotConnected(BaseZwaveJSServerError):
"""Exception raised when not connected to client."""
class InvalidState(BaseZwaveJSServerError):
"""Exception raised when data gets in invalid state."""
class InvalidMessage(BaseZwaveJSServerError):
"""Exception raised when an invalid message is received."""
class InvalidServerVersion(BaseZwaveJSServerError):
"""Exception raised when connected to server with incompatible version."""
def __init__(
self,
version_info: "VersionInfo",
required_schema_version: int,
message: str,
) -> None:
"""Initialize an invalid server version error."""
self.server_version = version_info.server_version
self.server_max_schema_version = version_info.max_schema_version
self.required_schema_version = required_schema_version
super().__init__(message)
class FailedCommand(BaseZwaveJSServerError):
"""When a command has failed."""
def __init__(
self, message_id: str, error_code: str, msg: str | None = None
) -> None:
"""Initialize a failed command error."""
super().__init__(msg or f"Command failed: {error_code}")
self.message_id = message_id
self.error_code = error_code
class FailedZWaveCommand(FailedCommand):
"""When a command has failed because of Z-Wave JS error."""
def __init__(
self, message_id: str, zwave_error_code: int, zwave_error_message: str
):
"""Initialize a failed command error."""
super().__init__(
message_id,
"zwave_error",
f"Z-Wave error {zwave_error_code}: {zwave_error_message}",
)
self.zwave_error_code = zwave_error_code
self.zwave_error_message = zwave_error_message
class UnparseableValue(BaseZwaveJSServerError):
"""Exception raised when a value can't be parsed."""
class UnwriteableValue(BaseZwaveJSServerError):
"""Exception raised when trying to change a read only Value."""
class InvalidNewValue(BaseZwaveJSServerError):
"""Exception raised when target new value is invalid based on Value metadata."""
class ValueTypeError(BaseZwaveJSServerError):
"""Exception raised when target Zwave value is the wrong type."""
class SetValueFailed(BaseZwaveJSServerError):
"""
Exception raise when setting a value fails.
Refer to https://zwave-js.github.io/node-zwave-js/#/api/node?id=setvalue for
possible reasons.
"""
class BulkSetConfigParameterFailed(BaseZwaveJSServerError):
"""
Exception raised when bulk setting a config parameter fails.
Derived from another exception
"""
class InvalidCommandClass(BaseZwaveJSServerError):
"""Exception raised when Zwave Value has an invalid command class."""
def __init__(self, value: "Value", command_class: "CommandClass") -> None:
"""Initialize an invalid Command Class error."""
self.value = value
self.command_class = command_class
super().__init__(
f"Value {value} does not match expected command class: {command_class}"
)
class UnknownValueData(BaseZwaveJSServerError):
"""
Exception raised when Zwave Value has data that the library can't parse.
This can be caused by an upstream issue with the driver, or missing support in the
library.
"""
def __init__(self, value: "Value", path: str) -> None:
"""Initialize an unknown data error."""
self.value = value
self.path = path
super().__init__(
f"Value {value} has unknown data in the following location: {path}. "
f"A reinterview of node {value.node} may correct this issue, but if it "
"doesn't, please report this issue as it may be caused by either an "
"upstream issue with the driver or missing support for this data in the "
"library"
)
class RssiErrorReceived(BaseZwaveJSServerError):
"""Exception raised when an RSSI error is received."""
def __init__(self, error: "RssiError") -> None:
"""Initialize an RSSI error."""
self.error = error
super().__init__()
class RepeaterRssiErrorReceived(BaseZwaveJSServerError):
"""Exception raised when an RSSI error is received in list of RSSIs."""
def __init__(self, rssi_list: list[int]) -> None:
"""Initialize an RSSI error."""
self.rssi_list = rssi_list
rssi_errors = [item.value for item in RssiError]
self.error_list = [
RssiError(rssi_) if rssi_ in rssi_errors else None for rssi_ in rssi_list
]
super().__init__() | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/exceptions.py | exceptions.py |
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum, IntEnum
from importlib import metadata
from typing import TypedDict
PACKAGE_NAME = "zwave-js-server-python"
__version__ = metadata.version(PACKAGE_NAME)
# minimal server schema version we can handle
MIN_SERVER_SCHEMA_VERSION = 31
# max server schema version we can handle (and our code is compatible with)
MAX_SERVER_SCHEMA_VERSION = 31
VALUE_UNKNOWN = "unknown"
NOT_INTERVIEWED = "None"
INTERVIEW_FAILED = "Failed"
CURRENT_STATE_PROPERTY = "currentState"
TARGET_STATE_PROPERTY = "targetState"
CURRENT_VALUE_PROPERTY = "currentValue"
TARGET_VALUE_PROPERTY = "targetValue"
DURATION_PROPERTY = "duration"
TRANSITION_DURATION_OPTION = "transitionDuration"
VOLUME_OPTION = "volume"
class CommandStatus(str, Enum):
"""Status of a command sent to zwave-js-server."""
ACCEPTED = "accepted"
QUEUED = "queued"
# Multiple inheritance so that LogLevel will JSON serialize properly
# Reference: https://stackoverflow.com/a/51976841
class LogLevel(str, Enum):
"""Enum for log levels used by node-zwave-js."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/core/src/log/shared.ts#L12
# https://github.com/winstonjs/triple-beam/blame/master/config/npm.js#L14
ERROR = "error"
WARN = "warn"
INFO = "info"
HTTP = "http"
VERBOSE = "verbose"
DEBUG = "debug"
SILLY = "silly"
class CommandClass(IntEnum):
"""Enum with all known CommandClasses."""
SENSOR_ALARM = 156
SILENCE_ALARM = 157
SWITCH_ALL = 39
ANTITHEFT = 93
ANTITHEFT_UNLOCK = 126
APPLICATION_CAPABILITY = 87
APPLICATION_STATUS = 34
ASSOCIATION = 133
ASSOCIATION_COMMAND_CONFIGURATION = 155
ASSOCIATION_GRP_INFO = 89
AUTHENTICATION = 161
AUTHENTICATION_MEDIA_WRITE = 162
BARRIER_OPERATOR = 102
BASIC = 32
BASIC_TARIFF_INFO = 54
BASIC_WINDOW_COVERING = 80
BATTERY = 128
SENSOR_BINARY = 48
SWITCH_BINARY = 37
SWITCH_TOGGLE_BINARY = 40
CLIMATE_CONTROL_SCHEDULE = 70
CENTRAL_SCENE = 91
CLOCK = 129
SWITCH_COLOR = 51
CONFIGURATION = 112
CONTROLLER_REPLICATION = 33
CRC_16_ENCAP = 86
DCP_CONFIG = 58
DCP_MONITOR = 59
DEVICE_RESET_LOCALLY = 90
DOOR_LOCK = 98
DOOR_LOCK_LOGGING = 76
ENERGY_PRODUCTION = 144
ENTRY_CONTROL = 111
FIRMWARE_UPDATE_MD = 122
GENERIC_SCHEDULE = 163
GEOGRAPHIC_LOCATION = 140
GROUPING_NAME = 123
HAIL = 130
HRV_STATUS = 55
HRV_CONTROL = 57
HUMIDITY_CONTROL_MODE = 109
HUMIDITY_CONTROL_OPERATING_STATE = 110
HUMIDITY_CONTROL_SETPOINT = 100
INCLUSION_CONTROLLER = 116
INDICATOR = 135
IP_ASSOCIATION = 92
IP_CONFIGURATION = 154
IR_REPEATER = 160
IRRIGATION = 107
LANGUAGE = 137
LOCK = 118
MAILBOX = 105
MANUFACTURER_PROPRIETARY = 145
MANUFACTURER_SPECIFIC = 114
MARK = 239
METER = 50
METER_TBL_CONFIG = 60
METER_TBL_MONITOR = 61
METER_TBL_PUSH = 62
MTP_WINDOW_COVERING = 81
MULTI_CHANNEL = 96
MULTI_CHANNEL_ASSOCIATION = 142
MULTI_CMD = 143
SENSOR_MULTILEVEL = 49
SWITCH_MULTILEVEL = 38
SWITCH_TOGGLE_MULTILEVEL = 41
NETWORK_MANAGEMENT_BASIC = 77
NETWORK_MANAGEMENT_INCLUSION = 52
NETWORK_MANAGEMENT_INSTALLATION_MAINTENANCE = 103
NETWORK_MANAGEMENT_PRIMARY = 84
NETWORK_MANAGEMENT_PROXY = 82
NO_OPERATION = 0
NODE_NAMING = 119
NODE_PROVISIONING = 120
NOTIFICATION = 113
POWERLEVEL = 115
PREPAYMENT = 63
PREPAYMENT_ENCAPSULATION = 65
PROPRIETARY = 136
PROTECTION = 117
METER_PULSE = 53
RATE_TBL_CONFIG = 72
RATE_TBL_MONITOR = 73
REMOTE_ASSOCIATION_ACTIVATE = 124
REMOTE_ASSOCIATION = 125
SCENE_ACTIVATION = 43
SCENE_ACTUATOR_CONF = 44
SCENE_CONTROLLER_CONF = 45
SCHEDULE = 83
SCHEDULE_ENTRY_LOCK = 78
SCREEN_ATTRIBUTES = 147
SCREEN_MD = 146
SECURITY = 152
SECURITY_2 = 159
SECURITY_SCHEME0_MARK = 61696
SENSOR_CONFIGURATION = 158
SIMPLE_AV_CONTROL = 148
SOUND_SWITCH = 121
SUPERVISION = 108
TARIFF_CONFIG = 74
TARIFF_TBL_MONITOR = 75
THERMOSTAT_FAN_MODE = 68
THERMOSTAT_FAN_STATE = 69
THERMOSTAT_MODE = 64
THERMOSTAT_OPERATING_STATE = 66
THERMOSTAT_SETBACK = 71
THERMOSTAT_SETPOINT = 67
TIME = 138
TIME_PARAMETERS = 139
TRANSPORT_SERVICE = 85
USER_CODE = 99
VERSION = 134
WAKE_UP = 132
WINDOW_COVERING = 106
ZIP = 35
ZIP_6LOWPAN = 79
ZIP_GATEWAY = 95
ZIP_NAMING = 104
ZIP_ND = 88
ZIP_PORTAL = 97
ZWAVEPLUS_INFO = 94
class ProtocolVersion(IntEnum):
"""Protocol version."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/node/Types.ts#L149
UNKNOWN = 0
VERSION_2_0 = 1
VERSION_4_2X_OR_5_0X = 2
VERSION_4_5X_OR_6_0X = 3
class NodeStatus(IntEnum):
"""Enum with all Node status values."""
# https://zwave-js.github.io/node-zwave-js/#/api/node?id=status
UNKNOWN = 0
ASLEEP = 1
AWAKE = 2
DEAD = 3
ALIVE = 4
class ConfigurationValueType(str, Enum):
"""Enum for configuration value types."""
BOOLEAN = "boolean"
ENUMERATED = "enumerated"
MANUAL_ENTRY = "manual_entry"
RANGE = "range"
UNDEFINED = "undefined"
class NodeType(IntEnum):
"""Enum with all Node types."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/core/src/capabilities/NodeInfo.ts#L151-L156
CONTROLLER = 0
END_NODE = 1
# Exclusion enums
class ExclusionStrategy(IntEnum):
"""Enum with all exclusion strategies."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/controller/Inclusion.ts#L49-L56
EXCLUDE_ONLY = 0
DISABLE_PROVISIONING_ENTRY = 1
UNPROVISION = 2
# Inclusion enums
class InclusionStrategy(IntEnum):
"""Enum for all known inclusion strategies."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/controller/Inclusion.ts#L9-L46
DEFAULT = 0
SMART_START = 1
INSECURE = 2
SECURITY_S0 = 3
SECURITY_S2 = 4
class SecurityClass(IntEnum):
"""Enum for all known security classes."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/core/src/security/SecurityClass.ts#L3-L17
NONE = -1
S2_UNAUTHENTICATED = 0
S2_AUTHENTICATED = 1
S2_ACCESS_CONTROL = 2
S0_LEGACY = 7
class QRCodeVersion(IntEnum):
"""Enum for all known QR Code versions."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/core/src/security/QR.ts#L43-L46
S2 = 0
SMART_START = 1
class Protocols(IntEnum):
"""Enum for all known protocols."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/core/src/capabilities/Protocols.ts#L1-L4
ZWAVE = 0
ZWAVE_LONG_RANGE = 1
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/core/src/security/QR.ts#L41
MINIMUM_QR_STRING_LENGTH = 52
class ZwaveFeature(IntEnum):
"""Enum for all known Zwave features."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/controller/Features.ts#L4
SMART_START = 0
class PowerLevel(IntEnum):
"""Enum for all known power levels."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/commandclass/PowerlevelCC.ts#L38
NORMAL_POWER = 0
DBM_MINUS_1 = 1
DBM_MINUS_2 = 2
DBM_MINUS_3 = 3
DBM_MINUS_4 = 4
DBM_MINUS_5 = 5
DBM_MINUS_6 = 6
DBM_MINUS_7 = 7
DBM_MINUS_8 = 8
DBM_MINUS_9 = 9
class InclusionState(IntEnum):
"""Enum for all known inclusion states."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/controller/Inclusion.ts#L154
IDLE = 0
INCLUDING = 1
EXCLUDING = 2
BUSY = 3
SMART_START = 4
class RFRegion(IntEnum):
"""Enum for all known RF regions."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/serialapi/misc/SerialAPISetupMessages.ts#L41
EUROPE = 0
USA = 1
AUSTRALIA_AND_NEW_ZEALAND = 2
HONG_KONG = 3
INDIA = 5
ISRAEL = 6
RUSSIA = 7
CHINA = 8
USA_LONG_RANGE = 9
JAPAN = 32
KOREA = 33
UNKNOWN = 254
DEFAULT_EU = 255
class ProtocolDataRate(IntEnum):
"""Enum for all known protocol data rates."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/core/src/capabilities/Protocols.ts#L6
ZWAVE_9K6 = 1
ZWAVE_40K = 2
ZWAVE_100K = 3
LONG_RANGE_100K = 4
class RssiError(IntEnum):
"""Enum for all known RSSI errors."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/controller/SendDataShared.ts#L79
NOT_AVAILABLE = 127
RECEIVER_SATURATED = 126
NO_SIGNAL_DETECTED = 125
class ProvisioningEntryStatus(IntEnum):
"""Enum for all known provisioning entry statuses."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/controller/Inclusion.ts#L136
ACTIVE = 0
INACTIVE = 1
class SecurityBootstrapFailure(IntEnum):
"""Enum with all security bootstrap failure reasons."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/controller/Inclusion.ts#L16
USER_CANCELED = 0
NO_KEYS_CONFIGURED = 1
S2_NO_USER_CALLBACKS = 2
TIMEOUT = 3
PARAMETER_MISMATCH = 4
NODE_CANCELED = 5
S2_INCORRECT_PIN = 6
S2_WRONG_SECURITY_LEVEL = 7
UNKNOWN = 8
class SetValueStatus(IntEnum):
"""Enum for all known setValue statuses."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/cc/src/lib/API.ts#L83
# The device reports no support for this command
NO_DEVICE_SUPPORT = 0
# The device has accepted the command and is working on it
WORKING = 1
# The device has rejected the command
FAIL = 2
# The endpoint specified in the value ID does not exist
ENDPOINT_NOT_FOUND = 3
# The given CC or its API is not implemented (yet) or it has no `setValue` implementation
NOT_IMPLEMENTED = 4
# The value to set (or a related value) is invalid
INVALID_VALUE = 5
# The command was sent successfully, but it is unknown whether it was executed
SUCCESS_UNSUPERVISED = 254
# The device has executed the command successfully
SUCCESS = 255
SET_VALUE_SUCCESS = (
SetValueStatus.SUCCESS,
SetValueStatus.SUCCESS_UNSUPERVISED,
SetValueStatus.WORKING,
)
class RemoveNodeReason(IntEnum):
"""Enum for all known reasons why a node was removed."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/controller/Inclusion.ts#L266
# The node was excluded by the user or an inclusion controller
EXCLUDED = 0
# The node was excluded by an inclusion controller
PROXY_EXCLUDED = 1
# The node was removed using the "remove failed node" feature
REMOVE_FAILED = 2
# The node was replaced using the "replace failed node" feature
REPLACED = 3
# The node was replaced by an inclusion controller
PROXY_REPLACED = 4
# The node was reset locally and was auto-removed
RESET = 5
# SmartStart inclusion failed, and the node was auto-removed as a result.
SMART_START_FAILED = 6
class Weekday(IntEnum):
"""Enum for all known weekdays."""
UNKNOWN = 0
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
SUNDAY = 7
class DateAndTimeDataType(TypedDict, total=False):
"""Represent a date and time data type."""
hour: int
minute: int
weekday: int
second: int
year: int
month: int
day: int
dstOffset: int
standardOffset: int
@dataclass
class DateAndTime:
"""Represent a date and time."""
data: DateAndTimeDataType
hour: int | None = field(init=False)
minute: int | None = field(init=False)
weekday: Weekday | None = field(default=None, init=False)
second: int | None = field(init=False)
year: int | None = field(init=False)
month: int | None = field(init=False)
day: int | None = field(init=False)
dst_offset: int | None = field(init=False)
standard_offset: int | None = field(init=False)
def __post_init__(self) -> None:
"""Post initialization."""
self.hour = self.data.get("hour")
self.minute = self.data.get("minute")
if weekday := self.data.get("weekday"):
self.weekday = Weekday(weekday)
self.second = self.data.get("second")
self.year = self.data.get("year")
self.month = self.data.get("month")
self.day = self.data.get("day")
self.dst_offset = self.data.get("dstOffset")
self.standard_offset = self.data.get("standardOffset")
class ControllerStatus(IntEnum):
"""Enum for all known controller statuses."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/core/src/consts/ControllerStatus.TS
# The controller is ready to accept commands and transmit
READY = 0
# The controller is unresponsive
UNRESPONSIVE = 1
# The controller is unable to transmit
JAMMED = 2 | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/const/__init__.py | __init__.py |
from __future__ import annotations
from enum import IntEnum
THERMOSTAT_MODE_PROPERTY = "mode"
THERMOSTAT_SETPOINT_PROPERTY = "setpoint"
THERMOSTAT_OPERATING_STATE_PROPERTY = "state"
THERMOSTAT_CURRENT_TEMP_PROPERTY = "Air temperature"
THERMOSTAT_HUMIDITY_PROPERTY = "Humidity"
THERMOSTAT_FAN_MODE_PROPERTY = "mode"
THERMOSTAT_FAN_OFF_PROPERTY = "off"
THERMOSTAT_FAN_STATE_PROPERTY = "state"
class ThermostatMode(IntEnum):
"""Enum with all (known/used) Z-Wave ThermostatModes."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/commandclass/ThermostatModeCC.ts#L53-L70
OFF = 0
HEAT = 1
COOL = 2
AUTO = 3
AUXILIARY = 4
RESUME_ON = 5
FAN = 6
FURNACE = 7
DRY = 8
MOIST = 9
AUTO_CHANGE_OVER = 10
HEATING_ECON = 11
COOLING_ECON = 12
AWAY = 13
FULL_POWER = 15
MANUFACTURER_SPECIFIC = 31
class ThermostatOperatingState(IntEnum):
"""Enum with all (known/used) Z-Wave Thermostat OperatingStates."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/commandclass/ThermostatOperatingStateCC.ts#L38-L51
IDLE = 0
HEATING = 1
COOLING = 2
FAN_ONLY = 3
PENDING_HEAT = 4
PENDING_COOL = 5
VENT_ECONOMIZER = 6
AUX_HEATING = 7
SECOND_STAGE_HEATING = 8
SECOND_STAGE_COOLING = 9
SECOND_STAGE_AUX_HEAT = 10
THIRD_STAGE_AUX_HEAT = 11
class ThermostatSetpointType(IntEnum):
"""
Enum with all (known/used) Z-Wave Thermostat Setpoint Types.
Returns tuple of (property_key, property_key_name).
"""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/commandclass/ThermostatSetpointCC.ts#L53-L66
NA = 0
HEATING = 1
COOLING = 2
FURNACE = 7
DRY_AIR = 8
MOIST_AIR = 9
AUTO_CHANGEOVER = 10
ENERGY_SAVE_HEATING = 11
ENERGY_SAVE_COOLING = 12
AWAY_HEATING = 13
AWAY_COOLING = 14
FULL_POWER = 15
THERMOSTAT_MODE_SETPOINT_MAP: dict[int, list[ThermostatSetpointType]] = {
ThermostatMode.OFF: [],
ThermostatMode.HEAT: [ThermostatSetpointType.HEATING],
ThermostatMode.COOL: [ThermostatSetpointType.COOLING],
ThermostatMode.AUTO: [
ThermostatSetpointType.HEATING,
ThermostatSetpointType.COOLING,
],
ThermostatMode.AUXILIARY: [ThermostatSetpointType.HEATING],
ThermostatMode.FURNACE: [ThermostatSetpointType.FURNACE],
ThermostatMode.DRY: [ThermostatSetpointType.DRY_AIR],
ThermostatMode.MOIST: [ThermostatSetpointType.MOIST_AIR],
ThermostatMode.AUTO_CHANGE_OVER: [ThermostatSetpointType.AUTO_CHANGEOVER],
ThermostatMode.HEATING_ECON: [ThermostatSetpointType.ENERGY_SAVE_HEATING],
ThermostatMode.COOLING_ECON: [ThermostatSetpointType.ENERGY_SAVE_COOLING],
ThermostatMode.AWAY: [
ThermostatSetpointType.AWAY_HEATING,
ThermostatSetpointType.AWAY_COOLING,
],
ThermostatMode.FULL_POWER: [ThermostatSetpointType.FULL_POWER],
} | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/const/command_class/thermostat.py | thermostat.py |
from __future__ import annotations
from enum import IntEnum
VALUE_PROPERTY = "value"
CC_SPECIFIC_SCALE = "scale"
CC_SPECIFIC_METER_TYPE = "meterType"
CC_SPECIFIC_RATE_TYPE = "rateType"
RESET_METER_CC_API = "reset"
# optional attributes when calling the Meter CC reset API.
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/commandclass/MeterCC.ts#L873-L881
RESET_METER_OPTION_TARGET_VALUE = "targetValue"
RESET_METER_OPTION_TYPE = "type"
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/config/config/meters.json
class MeterType(IntEnum):
"""Enum with all known meter types."""
ELECTRIC = 1
GAS = 2
WATER = 3
HEATING = 4
COOLING = 5
class MeterScaleType(IntEnum):
"""Common base class for meter scale enums."""
class ElectricScale(MeterScaleType):
"""Enum with all known electric meter scale values."""
KILOWATT_HOUR = 0
KILOVOLT_AMPERE_HOUR = 1
WATT = 2
PULSE_COUNT = 3
VOLT = 4
AMPERE = 5
POWER_FACTOR = 6
KILOVOLT_AMPERE_REACTIVE = 7
KILOVOLT_AMPERE_REACTIVE_HOUR = 8
class GasScale(MeterScaleType):
"""Enum with all known gas meter scale values."""
CUBIC_METER = 0
CUBIC_FEET = 1
PULSE_COUNT = 3
class WaterScale(MeterScaleType):
"""Enum with all known water meter scale values."""
CUBIC_METER = 0
CUBIC_FEET = 1
US_GALLON = 2
PULSE_COUNT = 3
class HeatingScale(MeterScaleType):
"""Enum with all known heating meter scale values."""
KILOWATT_HOUR = 0
CoolingScale = HeatingScale
METER_TYPE_TO_SCALE_ENUM_MAP: dict[MeterType, type[MeterScaleType]] = {
MeterType.ELECTRIC: ElectricScale,
MeterType.GAS: GasScale,
MeterType.WATER: WaterScale,
MeterType.HEATING: HeatingScale,
MeterType.COOLING: CoolingScale,
}
ENERGY_TOTAL_INCREASING_METER_TYPES: list[MeterScaleType] = [
ElectricScale.KILOWATT_HOUR,
ElectricScale.KILOVOLT_AMPERE_HOUR,
ElectricScale.KILOVOLT_AMPERE_REACTIVE_HOUR,
HeatingScale.KILOWATT_HOUR,
CoolingScale.KILOWATT_HOUR,
ElectricScale.PULSE_COUNT,
]
POWER_METER_TYPES: list[MeterScaleType] = [
ElectricScale.WATT,
ElectricScale.KILOVOLT_AMPERE_REACTIVE,
]
POWER_FACTOR_METER_TYPES: list[MeterScaleType] = [ElectricScale.POWER_FACTOR]
VOLTAGE_METER_TYPES: list[MeterScaleType] = [ElectricScale.VOLT]
CURRENT_METER_TYPES: list[MeterScaleType] = [ElectricScale.AMPERE]
GAS_METER_TYPES: list[MeterScaleType] = [
GasScale.CUBIC_METER,
GasScale.CUBIC_FEET,
GasScale.PULSE_COUNT,
]
WATER_METER_TYPES: list[MeterScaleType] = [
WaterScale.CUBIC_METER,
WaterScale.CUBIC_FEET,
WaterScale.US_GALLON,
WaterScale.PULSE_COUNT,
]
UNIT_KILOWATT_HOUR: list[MeterScaleType] = [
ElectricScale.KILOWATT_HOUR,
HeatingScale.KILOWATT_HOUR,
CoolingScale.KILOWATT_HOUR,
]
UNIT_KILOVOLT_AMPERE_HOUR: list[MeterScaleType] = [ElectricScale.KILOVOLT_AMPERE_HOUR]
UNIT_WATT: list[MeterScaleType] = [ElectricScale.WATT]
UNIT_PULSE_COUNT: list[MeterScaleType] = [
ElectricScale.PULSE_COUNT,
GasScale.PULSE_COUNT,
WaterScale.PULSE_COUNT,
]
UNIT_VOLT: list[MeterScaleType] = [ElectricScale.VOLT]
UNIT_AMPERE: list[MeterScaleType] = [ElectricScale.AMPERE]
UNIT_POWER_FACTOR: list[MeterScaleType] = [ElectricScale.POWER_FACTOR]
UNIT_KILOVOLT_AMPERE_REACTIVE: list[MeterScaleType] = [
ElectricScale.KILOVOLT_AMPERE_REACTIVE
]
UNIT_KILOVOLT_AMPERE_REACTIVE_HOUR: list[MeterScaleType] = [
ElectricScale.KILOVOLT_AMPERE_REACTIVE_HOUR
]
UNIT_CUBIC_METER: list[MeterScaleType] = [GasScale.CUBIC_METER, WaterScale.CUBIC_METER]
UNIT_CUBIC_FEET: list[MeterScaleType] = [GasScale.CUBIC_FEET, WaterScale.CUBIC_FEET]
UNIT_US_GALLON: list[MeterScaleType] = [WaterScale.US_GALLON] | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/const/command_class/meter.py | meter.py |
from __future__ import annotations
from enum import Enum, IntEnum
from .. import CommandClass
class DoorLockMode(IntEnum):
"""Enum with all (known/used) Z-Wave lock states for CommandClass.DOOR_LOCK."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/commandclass/DoorLockCC.ts#L56-L65
UNSECURED = 0
UNSECURED_WITH_TIMEOUT = 1
INSIDE_UNSECURED = 2
INSIDE_UNSECURED_WITH_TIMEOUT = 3
OUTSIDE_UNSECURED = 4
OUTSIDE_UNSECURED_WITH_TIMEOUT = 5
UNKNOWN = 254
SECURED = 255
class OperationType(IntEnum):
"""Enum with all (known/used) Z-Wave lock operation types."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/cc/src/lib/_Types.ts#L496
CONSTANT = 1
TIMED = 2
DOOR_LOCK_CC_UNSECURED_MAP = {
OperationType.CONSTANT: {
DoorLockMode.UNSECURED,
DoorLockMode.INSIDE_UNSECURED,
DoorLockMode.OUTSIDE_UNSECURED,
},
OperationType.TIMED: {
DoorLockMode.UNSECURED_WITH_TIMEOUT,
DoorLockMode.INSIDE_UNSECURED_WITH_TIMEOUT,
DoorLockMode.OUTSIDE_UNSECURED_WITH_TIMEOUT,
},
}
class LatchStatus(str, Enum):
"""Enum with all (known/used) Z-Wave latch statuses."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/cc/src/cc/DoorLockCC.ts#L854
OPEN = "open"
CLOSED = "closed"
class BoltStatus(str, Enum):
"""Enum with all (known/used) Z-Wave bolt statuses."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/cc/src/cc/DoorLockCC.ts#L854
LOCKED = "locked"
UNLOCKED = "unlocked"
class DoorStatus(str, Enum):
"""Enum with all (known/used) Z-Wave door statuses."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/cc/src/cc/DoorLockCC.ts#L854
OPEN = "open"
CLOSED = "closed"
class CodeSlotStatus(IntEnum):
"""Enum with all (known/used) Z-Wave code slot statuses."""
AVAILABLE = 0
ENABLED = 1
DISABLED = 2
# Depending on the Command Class being used by the lock, the lock state is
# different so we need a map to track it
LOCK_CMD_CLASS_TO_LOCKED_STATE_MAP = {
CommandClass.DOOR_LOCK: DoorLockMode.SECURED,
CommandClass.LOCK: True,
}
# Door Lock CC value constants
BOLT_STATUS_PROPERTY = "boltStatus"
CURRENT_AUTO_RELOCK_TIME_PROPERTY = "autoRelockTime"
CURRENT_BLOCK_TO_BLOCK_PROPERTY = "blockToBlock"
CURRENT_HOLD_AND_RELEASE_TIME_PROPERTY = "holdAndReleaseTime"
CURRENT_INSIDE_HANDLES_CAN_OPEN_DOOR_PROPERTY = "insideHandlesCanOpenDoor"
CURRENT_LOCK_TIMEOUT_PROPERTY = "lockTimeout"
CURRENT_MODE_PROPERTY = "currentMode"
CURRENT_OPERATION_TYPE_PROPERTY = "operationType"
CURRENT_OUTSIDE_HANDLES_CAN_OPEN_DOOR_PROPERTY = "outsideHandlesCanOpenDoor"
CURRENT_TWIST_ASSIST_PROPERTY = "twistAssist"
DOOR_STATUS_PROPERTY = "doorStatus"
LATCH_STATUS_PROPERTY = "latchStatus"
TARGET_MODE_PROPERTY = "targetMode"
# Door Lock CC configuration constants
TARGET_AUTO_RELOCK_TIME_PROPERTY = CURRENT_AUTO_RELOCK_TIME_PROPERTY
TARGET_BLOCK_TO_BLOCK_PROPERTY = CURRENT_BLOCK_TO_BLOCK_PROPERTY
TARGET_HOLD_AND_RELEASE_TIME_PROPERTY = CURRENT_HOLD_AND_RELEASE_TIME_PROPERTY
TARGET_INSIDE_HANDLES_CAN_OPEN_DOOR_PROPERTY = "insideHandlesCanOpenDoorConfiguration"
TARGET_LOCK_TIMEOUT_PROPERTY = "lockTimeoutConfiguration"
TARGET_OPERATION_TYPE_PROPERTY = CURRENT_OPERATION_TYPE_PROPERTY
TARGET_OUTSIDE_HANDLES_CAN_OPEN_DOOR_PROPERTY = "outsideHandlesCanOpenDoorConfiguration"
TARGET_TWIST_ASSIST_PROPERTY = CURRENT_TWIST_ASSIST_PROPERTY
# Lock CC constants
LOCKED_PROPERTY = "locked"
# User Code CC constants
LOCK_USERCODE_PROPERTY = "userCode"
LOCK_USERCODE_STATUS_PROPERTY = "userIdStatus"
ATTR_CODE_SLOT = "code_slot"
ATTR_IN_USE = "in_use"
ATTR_NAME = "name"
ATTR_USERCODE = "usercode"
# Depending on the Command Class being used by the lock, the locked state property
# is different so we need a map to track it
LOCK_CMD_CLASS_TO_PROPERTY_MAP = {
CommandClass.DOOR_LOCK: TARGET_MODE_PROPERTY,
CommandClass.LOCK: LOCKED_PROPERTY,
} | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/const/command_class/lock.py | lock.py |
from __future__ import annotations
import logging
from typing import cast
from ..const import CommandClass, CommandStatus, ConfigurationValueType, SetValueStatus
from ..exceptions import (
BulkSetConfigParameterFailed,
InvalidNewValue,
NotFoundError,
SetValueFailed,
ValueTypeError,
)
from ..model.node import Node
from ..model.value import ConfigurationValue, SetValueResult, get_value_id_str
_LOGGER = logging.getLogger(__name__)
def dump_node_state(node: Node) -> dict:
"""Get state from a node."""
return {
**node.data,
"values": {value_id: value.data for value_id, value in node.values.items()},
"endpoints": {idx: endpoint.data for idx, endpoint in node.endpoints.items()},
}
def partial_param_bit_shift(property_key: int) -> int:
"""Get the number of bits to shift the value for a given property key."""
# We can get the binary representation of the property key, reverse it,
# and find the first 1
return bin(property_key)[::-1].index("1")
async def async_set_config_parameter(
node: Node,
new_value: int | str,
property_or_property_name: int | str,
property_key: int | str | None = None,
endpoint: int = 0,
) -> tuple[ConfigurationValue, CommandStatus]:
"""
Set a value for a config parameter on this node.
new_value and property_ can be provided as labels, so we need to resolve them to
the appropriate key
"""
config_values = node.get_configuration_values()
# If a property name is provided, we have to search for the correct value since
# we can't use value ID
if isinstance(property_or_property_name, str):
try:
zwave_value = next(
config_value
for config_value in config_values.values()
if config_value.property_name == property_or_property_name
and config_value.endpoint == endpoint
)
except StopIteration:
raise NotFoundError(
"Configuration parameter with parameter name "
f"{property_or_property_name} on node {node} endpoint {endpoint} "
"could not be found"
) from None
else:
value_id = get_value_id_str(
node,
CommandClass.CONFIGURATION,
property_or_property_name,
endpoint=endpoint,
property_key=property_key,
)
if value_id not in config_values:
raise NotFoundError(
f"Configuration parameter with value ID {value_id} could not be "
"found"
) from None
zwave_value = config_values[value_id]
new_value = _validate_and_transform_new_value(zwave_value, new_value)
# Finally attempt to set the value and return the Value object if successful
result = await node.async_set_value(zwave_value, new_value)
if result and result.status not in (
SetValueStatus.WORKING,
SetValueStatus.SUCCESS,
SetValueStatus.SUCCESS_UNSUPERVISED,
):
raise SetValueFailed(str(result))
status = CommandStatus.ACCEPTED if result is not None else CommandStatus.QUEUED
return zwave_value, status
async def async_bulk_set_partial_config_parameters(
node: Node,
property_: int,
new_value: int | dict[int | str, int | str],
endpoint: int = 0,
) -> CommandStatus:
"""Bulk set partial configuration values on this node."""
config_values = node.get_configuration_values()
partial_param_values = {
value_id: value
for value_id, value in config_values.items()
if value.property_ == property_
and value.endpoint == endpoint
and value.property_key is not None
}
if not partial_param_values:
# If we find a value with this property_, we know this value isn't split
# into partial params
if (
get_value_id_str(
node, CommandClass.CONFIGURATION, property_, endpoint=endpoint
)
in config_values
):
# If the new value is provided as a dict, we don't have enough information
# to set the parameter.
if isinstance(new_value, dict):
raise ValueTypeError(
f"Configuration parameter {property_} for node {node.node_id} "
f"endpoint {endpoint} does not have partials"
)
# If the new value is provided as an int, we may as well try to set it
# using the standard utility function
_LOGGER.info(
"Falling back to async_set_config_parameter because no partials "
"were found"
)
_, cmd_status = await async_set_config_parameter(
node, new_value, property_, endpoint=endpoint
)
return cmd_status
# Otherwise if we can't find any values with this property, this config
# parameter does not exist
raise NotFoundError(
f"Configuration parameter {property_} for node {node.node_id} endpoint "
f"{endpoint} not found"
)
# If new_value is a dictionary, we need to calculate the full value to send
if isinstance(new_value, dict):
new_value = _get_int_from_partials_dict(
node, partial_param_values, property_, new_value, endpoint=endpoint
)
else:
_validate_raw_int(partial_param_values, new_value)
cmd_response = await node.async_send_command(
"set_value",
valueId={
"commandClass": CommandClass.CONFIGURATION.value,
"endpoint": endpoint,
"property": property_,
},
value=new_value,
require_schema=29,
)
# If we didn't wait for a response, we assume the command has been queued
if cmd_response is None:
return CommandStatus.QUEUED
result = SetValueResult(cmd_response["result"])
if result.status not in (
SetValueStatus.WORKING,
SetValueStatus.SUCCESS,
SetValueStatus.SUCCESS_UNSUPERVISED,
):
raise SetValueFailed(str(result))
return CommandStatus.ACCEPTED
def _validate_and_transform_new_value(
zwave_value: ConfigurationValue, new_value: int | str
) -> int:
"""Validate a new value and return the integer value to set."""
# If needed, convert a state label to its key. We know the state exists because
# of the validation above.
if isinstance(new_value, str):
try:
new_value = int(
next(
key
for key, label in zwave_value.metadata.states.items()
if label == new_value
)
)
except StopIteration:
raise InvalidNewValue(
f"State '{new_value}' not found for parameter {zwave_value.value_id}"
) from None
if zwave_value.configuration_value_type == ConfigurationValueType.UNDEFINED:
# We need to use the Configuration CC API to set the value for this type
raise NotImplementedError("Configuration values of undefined type can't be set")
return new_value
def _bulk_set_validate_and_transform_new_value(
zwave_value: ConfigurationValue, property_key: int, new_partial_value: int | str
) -> int:
"""
Validate and transform new value for a bulk set function call.
Returns a bulk set friendly error if validation fails.
"""
try:
return _validate_and_transform_new_value(zwave_value, new_partial_value)
except (InvalidNewValue, NotImplementedError) as err:
raise BulkSetConfigParameterFailed(
f"Config parameter {zwave_value.value_id} failed validation on partial "
f"parameter {property_key}"
) from err
def _get_int_from_partials_dict(
node: Node,
partial_param_values: dict[str, ConfigurationValue],
property_: int,
new_value: dict[int | str, int | str],
endpoint: int = 0,
) -> int:
"""Take an input dict for a set of partial values and compute the raw int value."""
int_value = 0
provided_partial_values = []
# For each property key provided, we bit shift the partial value using the
# property_key
for property_key_or_name, partial_value in new_value.items():
# If the dict key is a property key, we can generate the value ID to find the
# partial value
if isinstance(property_key_or_name, int):
value_id = get_value_id_str(
node,
CommandClass.CONFIGURATION,
property_,
property_key=property_key_or_name,
endpoint=endpoint,
)
if value_id not in partial_param_values:
raise NotFoundError(
f"Bitmask {property_key_or_name} ({hex(property_key_or_name)}) "
f"not found for parameter {property_} on node {node} endpoint "
f"{endpoint}"
)
zwave_value = partial_param_values[value_id]
# If the dict key is a property name, we have to find the value from the list
# of partial param values
else:
try:
zwave_value = next(
value
for value in partial_param_values.values()
if value.property_name == property_key_or_name
and value.endpoint == endpoint
)
except StopIteration:
raise NotFoundError(
f"Partial parameter with label '{property_key_or_name}'"
f"not found for parameter {property_} on node {node} endpoint "
f"{endpoint}"
) from None
provided_partial_values.append(zwave_value)
property_key = cast(int, zwave_value.property_key)
partial_value = _bulk_set_validate_and_transform_new_value(
zwave_value, property_key, partial_value
)
int_value += partial_value << partial_param_bit_shift(property_key)
# To set partial parameters in bulk, we also have to include cached values for
# property keys that haven't been specified
missing_values = set(partial_param_values.values()) - set(provided_partial_values)
int_value += sum(
cast(int, property_value.value)
<< partial_param_bit_shift(cast(int, property_value.property_key))
for property_value in missing_values
)
return int_value
def _validate_raw_int(
partial_param_values: dict[str, ConfigurationValue], new_value: int
) -> None:
"""
Validate raw value against all partial values.
Raises if a partial value in the raw value is invalid.
"""
# Break down the bulk value into partial values and validate them against
# each partial parameter's metadata by looping through the property values
# starting with the highest property key
for zwave_value in sorted(
partial_param_values.values(),
key=lambda val: cast(int, val.property_key),
reverse=True,
):
property_key = cast(int, zwave_value.property_key)
multiplication_factor = 2 ** partial_param_bit_shift(property_key)
partial_value = int(new_value / multiplication_factor)
new_value = new_value % multiplication_factor
_bulk_set_validate_and_transform_new_value(
zwave_value, property_key, partial_value
) | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/util/node.py | node.py |
from __future__ import annotations
from typing import Any, cast
from ..client import Client
from ..const import CommandClass
from ..model.node import Node, _get_value_id_dict_from_value_data
from ..model.value import SetValueResult, ValueDataType
async def _async_send_command(
client: Client,
command: str,
nodes: list[Node] | None = None,
require_schema: int | None = None,
**kwargs: Any,
) -> dict:
"""Send a multicast command."""
if nodes:
cmd = {
"command": f"multicast_group.{command}",
"nodeIDs": [node.node_id for node in nodes],
**kwargs,
}
else:
cmd = {"command": f"broadcast_node.{command}", **kwargs}
return await client.async_send_command(cmd, require_schema)
async def async_multicast_set_value(
client: Client,
new_value: Any,
value_data: ValueDataType,
nodes: list[Node] | None = None,
options: dict | None = None,
) -> SetValueResult:
"""Send a multicast set_value command."""
result = await _async_send_command(
client,
"set_value",
nodes,
valueId=_get_value_id_dict_from_value_data(value_data),
value=new_value,
options=options,
require_schema=29,
)
return SetValueResult(result["result"])
async def async_multicast_get_endpoint_count(
client: Client, nodes: list[Node] | None = None
) -> int:
"""Send a multicast get_endpoint_count command."""
result = await _async_send_command(
client, "get_endpoint_count", nodes, require_schema=5
)
return cast(int, result["count"])
async def async_multicast_endpoint_supports_cc(
client: Client,
endpoint: int,
command_class: CommandClass,
nodes: list[Node] | None = None,
) -> bool:
"""Send a supports_cc command to a multicast endpoint."""
result = await _async_send_command(
client,
"supports_cc",
nodes,
index=endpoint,
commandClass=command_class,
require_schema=5,
)
return cast(bool, result["supported"])
async def async_multicast_endpoint_get_cc_version(
client: Client,
endpoint: int,
command_class: CommandClass,
nodes: list[Node] | None = None,
) -> int:
"""Send a get_cc_version command to a multicast endpoint."""
result = await _async_send_command(
client,
"get_cc_version",
nodes,
index=endpoint,
commandClass=command_class,
require_schema=5,
)
return cast(int, result["version"])
async def async_multicast_endpoint_invoke_cc_api(
client: Client,
endpoint: int,
command_class: CommandClass,
method_name: str,
args: list[Any] | None = None,
nodes: list[Node] | None = None,
) -> Any:
"""Send a invoke_cc_api command to a multicast endpoint."""
result = await _async_send_command(
client,
"invoke_cc_api",
nodes,
index=endpoint,
commandClass=command_class,
methodName=method_name,
args=args,
require_schema=5,
)
return result["response"]
async def async_multicast_endpoint_supports_cc_api(
client: Client,
endpoint: int,
command_class: CommandClass,
nodes: list[Node] | None = None,
) -> bool:
"""Send a supports_cc_api command to a multicast endpoint."""
result = await _async_send_command(
client,
"supports_cc_api",
nodes,
index=endpoint,
commandClass=command_class,
require_schema=5,
)
return cast(bool, result["supported"]) | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/util/multicast.py | multicast.py |
from __future__ import annotations
from typing import TypedDict, cast
from ..const import CommandClass
from ..const.command_class.lock import (
ATTR_CODE_SLOT,
ATTR_IN_USE,
ATTR_NAME,
ATTR_USERCODE,
LOCK_USERCODE_PROPERTY,
LOCK_USERCODE_STATUS_PROPERTY,
CodeSlotStatus,
)
from ..exceptions import NotFoundError
from ..model.node import Node
from ..model.value import SetValueResult, Value, get_value_id_str
def get_code_slot_value(node: Node, code_slot: int, property_name: str) -> Value:
"""Get a code slot value."""
value = node.values.get(
get_value_id_str(
node,
CommandClass.USER_CODE,
property_name,
endpoint=0,
property_key=code_slot,
)
)
if not value:
raise NotFoundError(f"{property_name} for code slot {code_slot} not found")
return value
class CodeSlot(TypedDict, total=False):
"""Represent a code slot."""
code_slot: int # required
name: str # required
in_use: bool | None # required
usercode: str | None
def _get_code_slots(node: Node, include_usercode: bool = False) -> list[CodeSlot]:
"""Get all code slots on the lock and optionally include usercode."""
code_slot = 1
slots: list[CodeSlot] = []
# Loop until we can't find a code slot
while True:
try:
value = get_code_slot_value(node, code_slot, LOCK_USERCODE_PROPERTY)
status_value = get_code_slot_value(
node, code_slot, LOCK_USERCODE_STATUS_PROPERTY
)
except NotFoundError:
return slots
code_slot = int(value.property_key) # type: ignore
in_use = (
None
if status_value.value is None
else status_value.value == CodeSlotStatus.ENABLED
)
# we know that code slots will always have a property key
# that is an int, so we can ignore mypy
slot = {
ATTR_CODE_SLOT: code_slot,
ATTR_NAME: value.metadata.label,
ATTR_IN_USE: in_use,
}
if include_usercode:
slot[ATTR_USERCODE] = value.value
slots.append(cast(CodeSlot, slot))
code_slot += 1
def get_code_slots(node: Node) -> list[CodeSlot]:
"""Get all code slots on the lock and whether or not they are used."""
return _get_code_slots(node, False)
def get_usercodes(node: Node) -> list[CodeSlot]:
"""Get all code slots and usercodes on the lock."""
return _get_code_slots(node, True)
def get_usercode(node: Node, code_slot: int) -> CodeSlot:
"""Get usercode from slot X on the lock."""
value = get_code_slot_value(node, code_slot, LOCK_USERCODE_PROPERTY)
status_value = get_code_slot_value(node, code_slot, LOCK_USERCODE_STATUS_PROPERTY)
code_slot = int(value.property_key) # type: ignore
in_use = (
None
if status_value.value is None
else status_value.value == CodeSlotStatus.ENABLED
)
return cast(
CodeSlot,
{
ATTR_CODE_SLOT: code_slot,
ATTR_NAME: value.metadata.label,
ATTR_IN_USE: in_use,
ATTR_USERCODE: value.value,
},
)
async def get_usercode_from_node(node: Node, code_slot: int) -> CodeSlot:
"""
Fetch a usercode directly from a node.
Should be used when Z-Wave JS's ValueDB hasn't been populated for this code slot.
This call will populate the ValueDB and trigger value update events from the
driver.
"""
await node.async_invoke_cc_api(
CommandClass.USER_CODE, "get", code_slot, wait_for_result=True
)
return get_usercode(node, code_slot)
async def set_usercode(
node: Node, code_slot: int, usercode: str
) -> SetValueResult | None:
"""Set the usercode to index X on the lock."""
value = get_code_slot_value(node, code_slot, LOCK_USERCODE_PROPERTY)
if len(str(usercode)) < 4:
raise ValueError("User code must be at least 4 digits")
return await node.async_set_value(value, usercode)
async def clear_usercode(node: Node, code_slot: int) -> SetValueResult | None:
"""Clear a code slot on the lock."""
value = get_code_slot_value(node, code_slot, LOCK_USERCODE_STATUS_PROPERTY)
return await node.async_set_value(value, CodeSlotStatus.AVAILABLE.value) | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/util/lock.py | lock.py |
from __future__ import annotations
from dataclasses import dataclass, field
from enum import StrEnum
from typing import TYPE_CHECKING, Any, TypedDict
from ..const import VALUE_UNKNOWN, CommandClass, ConfigurationValueType, SetValueStatus
from ..event import Event
from ..util.helpers import parse_buffer
from .duration import Duration, DurationDataType
if TYPE_CHECKING:
from .node import Node
class ValueType(StrEnum):
"""Enum with all value types."""
ANY = "any"
BOOLEAN = "boolean"
NUMBER = "number"
STRING = "string"
class MetaDataType(TypedDict, total=False):
"""Represent a metadata data dict type."""
type: str # required
readable: bool # required
writeable: bool # required
description: str
label: str
min: int | None
max: int | None
unit: str
states: dict[str, str]
ccSpecific: dict[str, Any]
valueChangeOptions: list[str]
allowManualEntry: bool
valueSize: int
stateful: bool
secret: bool
class ValueDataType(TypedDict, total=False):
"""Represent a value data dict type."""
commandClass: int # required
commandClassName: str # required
endpoint: int
property: int | str # required
propertyName: str # required
propertyKey: int | str
propertyKeyName: str
value: Any
newValue: Any
prevValue: Any
metadata: MetaDataType # required
ccVersion: int # required
def _init_value(node: "Node", val: ValueDataType) -> "Value" | "ConfigurationValue":
"""Initialize a Value object from ValueDataType."""
if val["commandClass"] == CommandClass.CONFIGURATION:
return ConfigurationValue(node, val)
return Value(node, val)
def _get_value_id_str_from_dict(node: "Node", val: ValueDataType) -> str:
"""Return string ID of value from ValueDataType dict."""
return get_value_id_str(
node,
val["commandClass"],
val["property"],
val.get("endpoint"),
val.get("propertyKey"),
)
def get_value_id_str(
node: "Node",
command_class: int,
property_: int | str,
endpoint: int | None = None,
property_key: int | str | None = None,
) -> str:
"""Return string ID of value."""
# If endpoint is not provided, assume root endpoint
endpoint_ = endpoint or 0
value_id = f"{node.node_id}-{command_class}-{endpoint_}-{property_}"
# Property key is only included when it has a value
if property_key is not None:
value_id += f"-{property_key}"
return value_id
class ValueMetadata:
"""Represent metadata on a value instance."""
def __init__(self, data: MetaDataType) -> None:
"""Initialize metadata."""
self.data = data
@property
def type(self) -> str:
"""Return type."""
return self.data["type"]
@property
def readable(self) -> bool | None:
"""Return readable."""
return self.data.get("readable")
@property
def writeable(self) -> bool | None:
"""Return writeable."""
return self.data.get("writeable")
@property
def label(self) -> str | None:
"""Return label."""
return self.data.get("label")
@property
def description(self) -> str | None:
"""Return description."""
return self.data.get("description")
@property
def min(self) -> int | None:
"""Return min."""
return self.data.get("min")
@property
def max(self) -> int | None:
"""Return max."""
return self.data.get("max")
@property
def unit(self) -> str | None:
"""Return unit."""
return self.data.get("unit")
@property
def states(self) -> dict:
"""Return (optional) states."""
return self.data.get("states", {})
@property
def cc_specific(self) -> dict[str, Any]:
"""Return ccSpecific."""
return self.data.get("ccSpecific", {})
@property
def value_change_options(self) -> list[str]:
"""Return valueChangeOptions."""
return self.data.get("valueChangeOptions", [])
@property
def allow_manual_entry(self) -> bool | None:
"""Return allowManualEntry."""
return self.data.get("allowManualEntry")
@property
def value_size(self) -> int | None:
"""Return valueSize."""
return self.data.get("valueSize")
@property
def stateful(self) -> bool | None:
"""Return stateful."""
return self.data.get("stateful")
@property
def secret(self) -> bool | None:
"""Return secret."""
return self.data.get("secret")
def update(self, data: MetaDataType) -> None:
"""Update data."""
self.data.update(data)
class Value:
"""Represent a Z-Wave JS value."""
def __init__(self, node: "Node", data: ValueDataType) -> None:
"""Initialize value."""
self.node = node
self.data: ValueDataType = {}
self._value: Any = None
self._metadata = ValueMetadata({"type": "unknown"})
self.update(data)
def __repr__(self) -> str:
"""Return the representation."""
return f"{type(self).__name__}(value_id={self.value_id!r})"
def __hash__(self) -> int:
"""Return the hash."""
return hash((self.node, self.value_id))
def __eq__(self, other: object) -> bool:
"""Return whether this instance equals another."""
if not isinstance(other, Value):
return False
return self.node == other.node and self.value_id == other.value_id
@property
def value_id(self) -> str:
"""Return value ID."""
return _get_value_id_str_from_dict(self.node, self.data)
@property
def metadata(self) -> ValueMetadata:
"""Return value metadata."""
return self._metadata
@property
def value(self) -> Any | None:
"""Return value."""
# Treat unknown values like they are None
if self._value == VALUE_UNKNOWN:
return None
return self._value
@property
def command_class_name(self) -> str:
"""Return commandClassName."""
return self.data["commandClassName"]
@property
def command_class(self) -> int:
"""Return commandClass."""
return self.data["commandClass"]
@property
def cc_version(self) -> int:
"""Return commandClass version."""
return self.data["ccVersion"]
@property
def endpoint(self) -> int | None:
"""Return endpoint."""
return self.data.get("endpoint")
@property
def property_(self) -> int | str:
"""Return property.
Note the underscore in the end of this property name.
That's there to not confuse Python to think it's a property
decorator.
"""
return self.data["property"]
@property
def property_key(self) -> int | str | None:
"""Return propertyKey."""
return self.data.get("propertyKey")
@property
def property_name(self) -> str | None:
"""Return propertyName."""
return self.data.get("propertyName")
@property
def property_key_name(self) -> str | None:
"""Return propertyKeyName."""
return self.data.get("propertyKeyName")
def receive_event(self, event: Event) -> None:
"""Receive an event."""
self.update(event.data["args"])
def update(self, data: ValueDataType) -> None:
"""Update data."""
self.data.update(data)
self.data.pop("prevValue", None)
if "newValue" in self.data:
self.data["value"] = self.data.pop("newValue")
if "metadata" in data:
self._metadata.update(data["metadata"])
self._value = self.data.get("value")
# Handle buffer dict and json string in value.
if self._value is not None and self.metadata.type == "buffer":
self._value = parse_buffer(self._value)
class ValueNotification(Value):
"""
Model for a Value Notification message.
https://zwave-js.github.io/node-zwave-js/#/api/node?id=quotvalue-notificationquot
"""
# format is the same as a Value message, subclassed for easier identifying and future use
class ConfigurationValue(Value):
"""Model for a Configuration Value."""
@property
def configuration_value_type(self) -> ConfigurationValueType:
"""Return configuration value type."""
min_ = self.metadata.min
max_ = self.metadata.max
states = self.metadata.states
allow_manual_entry = self.metadata.allow_manual_entry
type_ = self.metadata.type
if (max_ == 1 and min_ == 0 or type_ == ValueType.BOOLEAN) and not states:
return ConfigurationValueType.BOOLEAN
if (
allow_manual_entry
and not max_ == min_ == 0
and not (max_ is None and min_ is None)
):
return ConfigurationValueType.MANUAL_ENTRY
if states:
return ConfigurationValueType.ENUMERATED
if (max_ is not None or min_ is not None) and not max_ == min_ == 0:
return ConfigurationValueType.RANGE
return ConfigurationValueType.UNDEFINED
class SetValueResultDataType(TypedDict, total=False):
"""Represent a setValue result data dict type."""
# https://github.com/zwave-js/node-zwave-js/blob/v11-dev/packages/cc/src/lib/API.ts#L103
status: int # required
remainingDuration: DurationDataType
message: str
@dataclass
class SetValueResult:
"""Result from setValue command."""
data: SetValueResultDataType
status: SetValueStatus = field(init=False)
remaining_duration: Duration | None = field(init=False)
message: str | None = field(init=False)
def __post_init__(self) -> None:
"""Post init."""
self.status = SetValueStatus(self.data["status"])
self.remaining_duration = (
Duration(duration_data)
if (duration_data := self.data.get("remainingDuration"))
else None
)
self.message = self.data.get("message")
def __repr__(self) -> str:
"""Return the representation."""
status = self.status.name.replace("_", " ").title()
if self.status == SetValueStatus.WORKING:
assert self.remaining_duration
return f"{status} ({self.remaining_duration})"
if self.status in (
SetValueStatus.ENDPOINT_NOT_FOUND,
SetValueStatus.INVALID_VALUE,
SetValueStatus.NOT_IMPLEMENTED,
):
assert self.message
return f"{status}: {self.message}"
return status | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/value.py | value.py |
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Literal, TypedDict
from ..const import CommandClass
class LogMessageContextDataType(TypedDict, total=False):
"""Represent a log message context data dict type."""
source: Literal["config", "serial", "controller", "driver"] # required
type: Literal["controller", "value", "node"]
nodeId: int
header: str
direction: Literal["inbound", "outbound", "none"]
change: Literal["added", "removed", "updated", "notification"]
internal: bool
endpoint: int
commandClass: int
property: int | str
propertyKey: int | str
@dataclass
class LogMessageContext:
"""Represent log message context information."""
data: LogMessageContextDataType
source: Literal["config", "serial", "controller", "driver"] = field(init=False)
type: Literal["controller", "value", "node"] | None = field(init=False)
node_id: int | None = field(init=False)
header: str | None = field(init=False)
direction: Literal["inbound", "outbound", "none"] | None = field(init=False)
change: Literal["added", "removed", "updated", "notification"] | None = field(
init=False
)
internal: bool | None = field(init=False)
endpoint: int | None = field(init=False)
command_class: CommandClass | None = field(init=False, default=None)
property_: int | str | None = field(init=False)
property_key: int | str | None = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.source = self.data["source"]
self.type = self.data.get("type")
self.node_id = self.data.get("nodeId")
self.header = self.data.get("header")
self.direction = self.data.get("direction")
self.change = self.data.get("change")
self.internal = self.data.get("internal")
self.endpoint = self.data.get("endpoint")
if (command_class := self.data.get("commandClass")) is not None:
self.command_class = CommandClass(command_class)
self.property_ = self.data.get("property")
self.property_key = self.data.get("propertyKey")
class LogMessageDataType(TypedDict, total=False):
"""Represent a log message data dict type."""
source: Literal["driver"] # required
event: Literal["logging"] # required
message: str | list[str] # required
formattedMessage: str | list[str] # required
direction: str # required
level: str # required
context: LogMessageContextDataType # required
primaryTags: str
secondaryTags: str
secondaryTagPadding: int
multiline: bool
timestamp: str
label: str
def _process_message(message: str | list[str]) -> list[str]:
"""Process a message and always return a list."""
if isinstance(message, str):
return str(message).splitlines()
# We will assume each item in the array is on a separate line so we can
# remove trailing line breaks
return [message.rstrip("\n") for message in message]
@dataclass
class LogMessage:
"""Represent a log message."""
data: LogMessageDataType
message: list[str] = field(init=False)
formatted_message: list[str] = field(init=False)
direction: str = field(init=False)
level: str = field(init=False)
context: LogMessageContext = field(init=False)
primary_tags: str | None = field(init=False)
secondary_tags: str | None = field(init=False)
secondary_tag_padding: int | None = field(init=False)
multiline: bool | None = field(init=False)
timestamp: str | None = field(init=False)
label: str | None = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.message = _process_message(self.data["message"])
self.formatted_message = _process_message(self.data["formattedMessage"])
self.direction = self.data["direction"]
self.level = self.data["level"]
self.context = LogMessageContext(self.data["context"])
self.primary_tags = self.data.get("primaryTags")
self.secondary_tags = self.data.get("secondaryTags")
self.secondary_tag_padding = self.data.get("secondaryTagPadding")
self.multiline = self.data.get("multiline")
self.timestamp = self.data.get("timestamp")
self.label = self.data.get("label") | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/log_message.py | log_message.py |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, TypedDict, cast
from ..const import NodeStatus
from ..event import EventBase
from ..exceptions import FailedCommand, NotFoundError
from .command_class import CommandClass, CommandClassInfo, CommandClassInfoDataType
from .device_class import DeviceClass, DeviceClassDataType
from .value import ConfigurationValue, Value
if TYPE_CHECKING:
from ..client import Client
from .node.data_model import NodeDataType
class EndpointDataType(TypedDict, total=False):
"""Represent an endpoint data dict type."""
nodeId: int # required
index: int # required
deviceClass: DeviceClassDataType | None
installerIcon: int
userIcon: int
endpointLabel: str
commandClasses: list[CommandClassInfoDataType] # required
class Endpoint(EventBase):
"""Model for a Zwave Node's endpoint."""
def __init__(
self,
client: "Client",
data: EndpointDataType,
values: dict[str, ConfigurationValue | Value],
) -> None:
"""Initialize."""
super().__init__()
self.client = client
self.data: EndpointDataType = data
self.values: dict[str, ConfigurationValue | Value] = {}
self.update(data, values)
def __repr__(self) -> str:
"""Return the representation."""
return f"{type(self).__name__}(node_id={self.node_id}, index={self.index})"
def __hash__(self) -> int:
"""Return the hash."""
return hash((self.client.driver, self.node_id, self.index))
def __eq__(self, other: object) -> bool:
"""Return whether this instance equals another."""
if not isinstance(other, Endpoint):
return False
return (
self.client.driver == other.client.driver
and self.node_id == other.node_id
and self.index == other.index
)
@property
def node_id(self) -> int:
"""Return node ID property."""
return self.data["nodeId"]
@property
def index(self) -> int:
"""Return index property."""
return self.data["index"]
@property
def device_class(self) -> DeviceClass | None:
"""Return the device_class."""
if (device_class := self.data.get("deviceClass")) is None:
return None
return DeviceClass(device_class)
@property
def installer_icon(self) -> int | None:
"""Return installer icon property."""
return self.data.get("installerIcon")
@property
def user_icon(self) -> int | None:
"""Return user icon property."""
return self.data.get("userIcon")
@property
def command_classes(self) -> list[CommandClassInfo]:
"""Return all CommandClasses supported on this node."""
return [CommandClassInfo(cc) for cc in self.data["commandClasses"]]
@property
def endpoint_label(self) -> str | None:
"""Return endpoint label property."""
return self.data.get("endpointLabel")
def update(
self, data: EndpointDataType, values: dict[str, ConfigurationValue | Value]
) -> None:
"""Update the endpoint data."""
self.data = data
# Remove stale values
self.values = {
value_id: val for value_id, val in self.values.items() if value_id in values
}
# Populate new values
for value_id, value in values.items():
if value_id not in self.values:
self.values[value_id] = value
async def async_send_command(
self,
cmd: str,
require_schema: int | None = None,
wait_for_result: bool | None = None,
**cmd_kwargs: Any,
) -> dict[str, Any] | None:
"""
Send an endpoint command. For internal use only.
If wait_for_result is not None, it will take precedence, otherwise we will decide
to wait or not based on the node status.
"""
if self.client.driver is None:
raise FailedCommand(
"Command failed", "failed_command", "The client is not connected"
)
node = self.client.driver.controller.nodes[self.node_id]
kwargs = {}
message = {
"command": f"endpoint.{cmd}",
"nodeId": self.node_id,
"endpoint": self.index,
**cmd_kwargs,
}
if require_schema is not None:
kwargs["require_schema"] = require_schema
if wait_for_result or (
wait_for_result is None and node.status != NodeStatus.ASLEEP
):
result = await self.client.async_send_command(message, **kwargs)
return result
await self.client.async_send_command_no_wait(message, **kwargs)
return None
async def async_invoke_cc_api(
self,
command_class: CommandClass,
method_name: str,
*args: Any,
wait_for_result: bool | None = None,
) -> Any:
"""Call endpoint.invoke_cc_api command."""
if not any(cc.id == command_class.value for cc in self.command_classes):
raise NotFoundError(
f"Command class {command_class} not found on endpoint {self}"
)
result = await self.async_send_command(
"invoke_cc_api",
commandClass=command_class.value,
methodName=method_name,
args=list(args),
require_schema=7,
wait_for_result=wait_for_result,
)
if not result:
return None
return result["response"]
async def async_supports_cc_api(self, command_class: CommandClass) -> bool:
"""Call endpoint.supports_cc_api command."""
result = await self.async_send_command(
"supports_cc_api",
commandClass=command_class.value,
require_schema=7,
wait_for_result=True,
)
assert result
return cast(bool, result["supported"])
async def async_supports_cc(self, command_class: CommandClass) -> bool:
"""Call endpoint.supports_cc command."""
result = await self.async_send_command(
"supports_cc",
commandClass=command_class.value,
require_schema=23,
wait_for_result=True,
)
assert result
return cast(bool, result["supported"])
async def async_controls_cc(self, command_class: CommandClass) -> bool:
"""Call endpoint.controls_cc command."""
result = await self.async_send_command(
"controls_cc",
commandClass=command_class.value,
require_schema=23,
wait_for_result=True,
)
assert result
return cast(bool, result["controlled"])
async def async_is_cc_secure(self, command_class: CommandClass) -> bool:
"""Call endpoint.is_cc_secure command."""
result = await self.async_send_command(
"is_cc_secure",
commandClass=command_class.value,
require_schema=23,
wait_for_result=True,
)
assert result
return cast(bool, result["secure"])
async def async_get_cc_version(self, command_class: CommandClass) -> bool:
"""Call endpoint.get_cc_version command."""
result = await self.async_send_command(
"get_cc_version",
commandClass=command_class.value,
require_schema=23,
wait_for_result=True,
)
assert result
return cast(bool, result["version"])
async def async_get_node_unsafe(self) -> "NodeDataType":
"""Call endpoint.get_node_unsafe command."""
result = await self.async_send_command(
"get_node_unsafe",
require_schema=23,
wait_for_result=True,
)
assert result
return cast("NodeDataType", result["node"]) | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/endpoint.py | endpoint.py |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Literal, cast
from ..event import BaseEventModel, Event, EventBase
from .controller import Controller
from .log_config import LogConfig, LogConfigDataType
from .log_message import LogMessage, LogMessageDataType
try:
from pydantic.v1 import create_model_from_typeddict
except ImportError:
from pydantic import create_model_from_typeddict
if TYPE_CHECKING:
from ..client import Client
class BaseDriverEventModel(BaseEventModel):
"""Base model for a driver event."""
source: Literal["driver"]
class LogConfigUpdatedEventModel(BaseDriverEventModel):
"""Model for `log config updated` event data."""
event: Literal["log config updated"]
config: LogConfigDataType
class AllNodesReadyEventModel(BaseDriverEventModel):
"""Model for `all nodes ready` event data."""
event: Literal["all nodes ready"]
LoggingEventModel = create_model_from_typeddict(
LogMessageDataType, __base__=BaseDriverEventModel
)
DRIVER_EVENT_MODEL_MAP: dict[str, type["BaseDriverEventModel"]] = {
"all nodes ready": AllNodesReadyEventModel,
"log config updated": LogConfigUpdatedEventModel,
"logging": LoggingEventModel,
}
class CheckConfigUpdates:
"""Represent config updates check."""
def __init__(self, data: dict) -> None:
"""Initialize class."""
self.installed_version: str = data["installedVersion"]
self.update_available: bool = data["updateAvailable"]
self.new_version: str | None = data.get("newVersion")
class Driver(EventBase):
"""Represent a Z-Wave JS driver."""
def __init__(
self, client: "Client", state: dict, log_config: LogConfigDataType
) -> None:
"""Initialize driver."""
super().__init__()
self.client = client
self.controller = Controller(client, state)
self.log_config = LogConfig.from_dict(log_config)
def __hash__(self) -> int:
"""Return the hash."""
return hash(self.controller)
def __eq__(self, other: object) -> bool:
"""Return whether this instance equals another."""
if not isinstance(other, Driver):
return False
return self.controller == other.controller
def receive_event(self, event: Event) -> None:
"""Receive an event."""
if event.data["source"] != "driver":
self.controller.receive_event(event)
return
DRIVER_EVENT_MODEL_MAP[event.type](**event.data)
self._handle_event_protocol(event)
self.emit(event.type, event.data)
async def _async_send_command(
self, command: str, require_schema: int | None = None, **kwargs: Any
) -> dict:
"""Send a driver command. For internal use only."""
return await self.client.async_send_command(
{
"command": f"driver.{command}",
**kwargs,
},
require_schema,
)
async def async_update_log_config(self, log_config: LogConfig) -> None:
"""Update log config for driver."""
await self._async_send_command(
"update_log_config", config=log_config.to_dict(), require_schema=4
)
async def async_get_log_config(self) -> LogConfig:
"""Return current log config for driver."""
result = await self._async_send_command("get_log_config", require_schema=4)
return LogConfig.from_dict(result["config"])
async def async_enable_statistics(
self, application_name: str, application_version: str
) -> None:
"""Send command to enable data collection."""
await self._async_send_command(
"enable_statistics",
applicationName=application_name,
applicationVersion=application_version,
require_schema=4,
)
async def async_disable_statistics(self) -> None:
"""Send command to stop listening to log events."""
await self._async_send_command("disable_statistics", require_schema=4)
async def async_is_statistics_enabled(self) -> bool:
"""Send command to start listening to log events."""
result = await self._async_send_command(
"is_statistics_enabled", require_schema=4
)
return cast(bool, result["statisticsEnabled"])
async def async_check_for_config_updates(self) -> CheckConfigUpdates:
"""Send command to check for config updates."""
result = await self._async_send_command(
"check_for_config_updates", require_schema=5
)
return CheckConfigUpdates(result)
async def async_install_config_update(self) -> bool:
"""Send command to install config update."""
result = await self._async_send_command(
"install_config_update", require_schema=5
)
return cast(bool, result["success"])
async def async_set_preferred_scales(
self, scales: dict[str | int, str | int]
) -> None:
"""Send command to set preferred sensor scales."""
await self._async_send_command(
"set_preferred_scales", scales=scales, require_schema=6
)
async def async_enable_error_reporting(self) -> None:
"""Send command to enable Sentry error reporting."""
await self._async_send_command("enable_error_reporting", require_schema=16)
async def async_hard_reset(self) -> None:
"""Send command to hard reset controller."""
await self._async_send_command("hard_reset", require_schema=25)
async def async_try_soft_reset(self) -> None:
"""Send command to try to soft reset controller."""
await self._async_send_command("try_soft_reset", require_schema=25)
async def async_soft_reset(self) -> None:
"""Send command to soft reset controller."""
await self._async_send_command("soft_reset", require_schema=25)
async def async_shutdown(self) -> bool:
"""Send command to shutdown controller."""
data = await self._async_send_command("shutdown", require_schema=27)
return cast(bool, data["success"])
def handle_logging(self, event: Event) -> None:
"""Process a driver logging event."""
event.data["log_message"] = LogMessage(cast(LogMessageDataType, event.data))
def handle_log_config_updated(self, event: Event) -> None:
"""Process a driver log config updated event."""
event.data["log_config"] = self.log_config = LogConfig.from_dict(
event.data["config"]
)
def handle_all_nodes_ready(self, event: Event) -> None:
"""Process a driver all nodes ready event.""" | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/driver.py | driver.py |
from __future__ import annotations
from typing import Any, Literal, TypedDict
class DeviceDeviceDataType(TypedDict, total=False):
"""Represent a device device data dict type."""
productType: str
productId: str
class DeviceDevice:
"""Model for a Zwave Node's device config's device."""
def __init__(self, data: DeviceDeviceDataType) -> None:
"""Initialize."""
self.data = data
@property
def product_type(self) -> str | None:
"""Return product type."""
return self.data.get("productType")
@property
def product_id(self) -> str | None:
"""Return product id."""
return self.data.get("productId")
class DeviceFirmwareVersionRangeDataType(TypedDict, total=False):
"""Represent a device firmware version range data dict type."""
min: str
max: str
class DeviceFirmwareVersionRange:
"""Model for a Zwave Node's device config's firmware version range."""
def __init__(self, data: DeviceFirmwareVersionRangeDataType) -> None:
"""Initialize."""
self.data = data
@property
def min(self) -> str | None:
"""Return min version."""
return self.data.get("min")
@property
def max(self) -> str | None:
"""Return max version."""
return self.data.get("max")
class CommentDataType(TypedDict):
"""Represent a device config's comment data dict type."""
# See PR for suggested meanings of each level:
# https://github.com/zwave-js/node-zwave-js/pull/3947
level: Literal["info", "warning", "error"]
text: str
class DeviceMetadataDataType(TypedDict, total=False):
"""Represent a device metadata data dict type."""
wakeup: str
inclusion: str
exclusion: str
reset: str
manual: str
comments: CommentDataType | list[CommentDataType]
class DeviceMetadata:
"""Model for a Zwave Node's device config's metadata."""
def __init__(self, data: DeviceMetadataDataType) -> None:
"""Initialize."""
self.data = data
@property
def wakeup(self) -> str | None:
"""Return wakeup instructions."""
return self.data.get("wakeup")
@property
def inclusion(self) -> str | None:
"""Return inclusion instructions."""
return self.data.get("inclusion")
@property
def exclusion(self) -> str | None:
"""Return exclusion instructions."""
return self.data.get("exclusion")
@property
def reset(self) -> str | None:
"""Return reset instructions."""
return self.data.get("reset")
@property
def manual(self) -> str | None:
"""Return manual instructions."""
return self.data.get("manual")
@property
def comments(self) -> list[CommentDataType]:
"""Return list of comments about device."""
comments = self.data.get("comments", [])
if isinstance(comments, dict):
return [comments]
return comments
class DeviceConfigDataType(TypedDict, total=False):
"""Represent a device config data dict type."""
filename: str
manufacturer: str
manufacturerId: str
label: str
description: str
devices: list[DeviceDeviceDataType]
firmwareVersion: DeviceFirmwareVersionRangeDataType
associations: dict[str, dict]
paramInformation: dict[str, dict]
supportsZWavePlus: bool
proprietary: dict
compat: dict[str, Any]
metadata: DeviceMetadataDataType
isEmbedded: bool
class DeviceConfig:
"""Model for a Zwave Node's device config."""
def __init__(self, data: DeviceConfigDataType) -> None:
"""Initialize."""
self.data = data
self._devices = [
DeviceDevice(device) for device in self.data.get("devices", [])
]
self._firmware_version = DeviceFirmwareVersionRange(
self.data.get("firmwareVersion", {})
)
self._metadata = DeviceMetadata(self.data.get("metadata", {}))
@property
def filename(self) -> str | None:
"""Return config filename."""
return self.data.get("filename")
@property
def manufacturer(self) -> str | None:
"""Return name of the manufacturer."""
return self.data.get("manufacturer")
@property
def manufacturer_id(self) -> str | None: # TODO: In the dump this is an int.
"""Return manufacturer id (as defined in the specs) as a 4-digit hexadecimal string."""
return self.data.get("manufacturerId")
@property
def label(self) -> str | None:
"""Return short label for the device."""
return self.data.get("label")
@property
def description(self) -> str | None:
"""Return longer description of the device, usually the full name."""
return self.data.get("description")
@property
def devices(self) -> list[DeviceDevice]:
"""Return list of product type and product ID combinations."""
return self._devices
@property
def firmware_version(self) -> DeviceFirmwareVersionRange:
"""Return firmware version range this config is valid for."""
return self._firmware_version
@property
def associations(self) -> dict[str, dict]:
"""Return dict of association groups the device supports."""
return self.data.get("associations", {})
@property
def param_information(self) -> dict[str, dict]:
"""Return dictionary of configuration parameters the device supports."""
return self.data.get("paramInformation", {})
@property
def supports_zwave_plus(self) -> bool | None:
"""Return if the device complies with the Z-Wave+ standard."""
return self.data.get("supportsZWavePlus")
@property
def proprietary(self) -> dict:
"""Return dictionary of settings for the proprietary CC."""
return self.data.get("proprietary", {})
@property
def compat(self) -> dict[str, dict]:
"""Return compatibility flags."""
return self.data.get("compat", {})
@property
def metadata(self) -> DeviceMetadata:
"""Return metadata."""
return self._metadata
@property
def is_embedded(self) -> bool | None:
"""Return whether device config is embedded in zwave-js-server."""
return self.data.get("isEmbedded") | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/device_config.py | device_config.py |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Literal, TypedDict
from ..const.command_class.multilevel_switch import MultilevelSwitchCommand
from ..const.command_class.power_level import PowerLevelTestStatus
from ..util.helpers import parse_buffer
if TYPE_CHECKING:
from .node import Node
class BaseNotificationDataType(TypedDict):
"""Represent a base notification event data dict type."""
source: Literal["node"] # required
event: Literal["notification"] # required
nodeId: int # required
ccId: int # required
class EntryControlNotificationArgsDataType(TypedDict, total=False):
"""Represent args for a Entry Control CC notification event data dict type."""
eventType: int # required
eventTypeLabel: str # required
dataType: int # required
dataTypeLabel: str # required
eventData: str | dict[str, Any]
class EntryControlNotificationDataType(BaseNotificationDataType):
"""Represent an Entry Control CC notification event data dict type."""
args: EntryControlNotificationArgsDataType # required
class EntryControlNotification:
"""Model for a Zwave Node's Entry Control CC notification event."""
def __init__(self, node: "Node", data: EntryControlNotificationDataType) -> None:
"""Initialize."""
self.node = node
self.data = data
@property
def node_id(self) -> int:
"""Return node ID property."""
return self.data["nodeId"]
@property
def command_class(self) -> int:
"""Return command class."""
return self.data["ccId"]
@property
def event_type(self) -> int:
"""Return event type property."""
return self.data["args"]["eventType"]
@property
def event_type_label(self) -> str:
"""Return event type label property."""
return self.data["args"]["eventTypeLabel"]
@property
def data_type(self) -> int:
"""Return data type property."""
return self.data["args"]["dataType"]
@property
def data_type_label(self) -> str:
"""Return data type label property."""
return self.data["args"]["dataTypeLabel"]
@property
def event_data(self) -> str | None:
"""Return event data property."""
if event_data := self.data["args"].get("eventData"):
return parse_buffer(event_data)
return None
class NotificationNotificationArgsDataType(TypedDict, total=False):
"""Represent args for a Notification CC notification event data dict type."""
type: int # required
label: str # required
event: int # required
eventLabel: str # required
parameters: dict[str, Any]
class NotificationNotificationDataType(BaseNotificationDataType):
"""Represent a Notification CC notification event data dict type."""
args: NotificationNotificationArgsDataType # required
class NotificationNotification:
"""Model for a Zwave Node's Notification CC notification event."""
def __init__(self, node: "Node", data: NotificationNotificationDataType) -> None:
"""Initialize."""
self.node = node
self.data = data
@property
def node_id(self) -> int:
"""Return node ID property."""
return self.data["nodeId"]
@property
def command_class(self) -> int:
"""Return command class."""
return self.data["ccId"]
@property
def type_(self) -> int:
"""Return type property."""
return self.data["args"]["type"]
@property
def label(self) -> str:
"""Return label property."""
return self.data["args"]["label"]
@property
def event(self) -> int:
"""Return event property."""
return self.data["args"]["event"]
@property
def event_label(self) -> str:
"""Return notification label property."""
return self.data["args"]["eventLabel"]
@property
def parameters(self) -> dict[str, Any]:
"""Return installer icon property."""
return self.data["args"].get("parameters", {})
class PowerLevelNotificationArgsDataType(TypedDict):
"""Represent args for a Power Level CC notification event data dict type."""
testNodeId: int
status: int
acknowledgedFrames: int
class PowerLevelNotificationDataType(BaseNotificationDataType):
"""Represent a Power Level CC notification event data dict type."""
args: PowerLevelNotificationArgsDataType # required
class PowerLevelNotification:
"""Model for a Zwave Node's Power Level CC notification event."""
def __init__(self, node: "Node", data: PowerLevelNotificationDataType) -> None:
"""Initialize."""
self.node = node
self.data = data
@property
def node_id(self) -> int:
"""Return node ID property."""
return self.data["nodeId"]
@property
def command_class(self) -> int:
"""Return command class."""
return self.data["ccId"]
@property
def test_node_id(self) -> int:
"""Return test node ID property."""
return self.data["args"]["testNodeId"]
@property
def status(self) -> PowerLevelTestStatus:
"""Return status."""
return PowerLevelTestStatus(self.data["args"]["status"])
@property
def acknowledged_frames(self) -> int:
"""Return acknowledged frames property."""
return self.data["args"]["acknowledgedFrames"]
class MultilevelSwitchNotificationArgsDataType(TypedDict, total=False):
"""Represent args for a Multi Level Switch CC notification event data dict type."""
eventType: int # required
eventTypeLabel: str # required
direction: str
class MultilevelSwitchNotificationDataType(BaseNotificationDataType):
"""Represent a Multi Level Switch CC notification event data dict type."""
args: MultilevelSwitchNotificationArgsDataType # required
class MultilevelSwitchNotification:
"""Model for a Zwave Node's Multi Level CC notification event."""
def __init__(
self, node: "Node", data: MultilevelSwitchNotificationDataType
) -> None:
"""Initialize."""
self.node = node
self.data = data
@property
def node_id(self) -> int:
"""Return node ID property."""
return self.data["nodeId"]
@property
def command_class(self) -> int:
"""Return command class."""
return self.data["ccId"]
@property
def event_type(self) -> MultilevelSwitchCommand:
"""Return event type property."""
return MultilevelSwitchCommand(self.data["args"]["eventType"])
@property
def event_type_label(self) -> str:
"""Return event type label property."""
return self.data["args"]["eventTypeLabel"]
@property
def direction(self) -> str | None:
"""Return direction property."""
if direction := self.data["args"].get("direction"):
return direction
return None | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/notification.py | notification.py |
from __future__ import annotations
from dataclasses import dataclass, field
from functools import cached_property
from typing import TYPE_CHECKING, TypedDict
from zwave_js_server.exceptions import RepeaterRssiErrorReceived, RssiErrorReceived
from ..const import ProtocolDataRate, RssiError
if TYPE_CHECKING:
from ..client import Client
from .node import Node
class RouteStatisticsDataType(TypedDict, total=False):
"""Represent a route statistics data dict type."""
protocolDataRate: int
repeaters: list[int]
rssi: int
repeaterRSSI: list[int]
routeFailedBetween: list[int]
class RouteStatisticsDict(TypedDict):
"""Represent a route statistics data dict type."""
protocol_data_rate: int
repeaters: list["Node"]
rssi: int | None
repeater_rssi: list[int]
route_failed_between: tuple["Node", "Node"] | None
@dataclass
class RouteStatistics:
"""Represent route statistics."""
client: "Client"
data: RouteStatisticsDataType
protocol_data_rate: ProtocolDataRate = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.protocol_data_rate = ProtocolDataRate(self.data["protocolDataRate"])
@cached_property
def repeaters(self) -> list["Node"]:
"""Return repeaters."""
assert self.client.driver
return [
self.client.driver.controller.nodes[node_id]
for node_id in self.data["repeaters"]
]
@property
def rssi(self) -> int | None:
"""Return RSSI."""
if (rssi := self.data.get("rssi")) is None:
return None
if rssi in [item.value for item in RssiError]:
raise RssiErrorReceived(RssiError(rssi))
return rssi
@property
def repeater_rssi(self) -> list[int]:
"""Return repeater RSSI."""
repeater_rssi = self.data.get("repeaterRSSI", [])
rssi_errors = [item.value for item in RssiError]
if any(rssi_ in rssi_errors for rssi_ in repeater_rssi):
raise RepeaterRssiErrorReceived(repeater_rssi)
return repeater_rssi
@cached_property
def route_failed_between(self) -> tuple["Node", "Node"] | None:
"""Return route failed between."""
if (node_ids := self.data.get("routeFailedBetween")) is None:
return None
assert self.client.driver
assert len(node_ids) == 2
return (
self.client.driver.controller.nodes[node_ids[0]],
self.client.driver.controller.nodes[node_ids[1]],
)
def as_dict(self) -> RouteStatisticsDict:
"""Return route statistics as dict."""
return {
"protocol_data_rate": self.protocol_data_rate.value,
"repeaters": self.repeaters,
"rssi": self.data.get("rssi"),
"repeater_rssi": self.data.get("repeaterRSSI", []),
"route_failed_between": (
self.route_failed_between[0],
self.route_failed_between[1],
)
if self.route_failed_between
else None,
} | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/statistics.py | statistics.py |
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TypedDict
from ...const import PowerLevel
class LifelineHealthCheckResultDataType(TypedDict, total=False):
"""Represent a lifeline health check result data dict type."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/node/Types.ts#L171
latency: int # required
numNeighbors: int # required
failedPingsNode: int # required
rating: int # required
routeChanges: int
minPowerlevel: int
failedPingsController: int
snrMargin: int
class LifelineHealthCheckSummaryDataType(TypedDict):
"""Represent a lifeline health check summary data dict type."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/node/_Types.ts#L254
results: list[LifelineHealthCheckResultDataType]
rating: int
@dataclass
class LifelineHealthCheckResult:
"""Represent a lifeline health check result."""
data: LifelineHealthCheckResultDataType
latency: int = field(init=False)
num_neighbors: int = field(init=False)
failed_pings_node: int = field(init=False)
rating: int = field(init=False)
route_changes: int | None = field(init=False)
min_power_level: PowerLevel | None = field(init=False, default=None)
failed_pings_controller: int | None = field(init=False)
snr_margin: int | None = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.latency = self.data["latency"]
self.num_neighbors = self.data["numNeighbors"]
self.failed_pings_node = self.data["failedPingsNode"]
self.rating = self.data["rating"]
self.route_changes = self.data.get("routeChanges")
if (min_power_level := self.data.get("minPowerlevel")) is not None:
self.min_power_level = PowerLevel(min_power_level)
self.failed_pings_controller = self.data.get("failedPingsController")
self.snr_margin = self.data.get("snrMargin")
@dataclass
class LifelineHealthCheckSummary:
"""Represent a lifeline health check summary update."""
data: LifelineHealthCheckSummaryDataType
rating: int = field(init=False)
results: list[LifelineHealthCheckResult] = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.rating = self.data["rating"]
self.results = [
LifelineHealthCheckResult(r) for r in self.data.get("results", [])
]
class RouteHealthCheckResultDataType(TypedDict, total=False):
"""Represent a route health check result data dict type."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/node/_Types.ts#L285
numNeighbors: int # required
rating: int # required
failedPingsToTarget: int
failedPingsToSource: int
minPowerlevelSource: int
minPowerlevelTarget: int
class RouteHealthCheckSummaryDataType(TypedDict):
"""Represent a route health check summary data dict type."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/node/_Types.ts#L317
results: list[RouteHealthCheckResultDataType]
rating: int
@dataclass
class RouteHealthCheckResult:
"""Represent a route health check result."""
data: RouteHealthCheckResultDataType
num_neighbors: int = field(init=False)
rating: int = field(init=False)
failed_pings_to_target: int | None = field(init=False)
failed_pings_to_source: int | None = field(init=False)
min_power_level_source: PowerLevel | None = field(init=False, default=None)
min_power_level_target: PowerLevel | None = field(init=False, default=None)
def __post_init__(self) -> None:
"""Post initialize."""
self.num_neighbors = self.data["numNeighbors"]
self.rating = self.data["rating"]
self.failed_pings_to_target = self.data.get("failedPingsToTarget")
self.failed_pings_to_source = self.data.get("failedPingsToSource")
if (min_power_level_source := self.data.get("minPowerlevelSource")) is not None:
self.min_power_level_source = PowerLevel(min_power_level_source)
if (min_power_level_target := self.data.get("minPowerlevelTarget")) is not None:
self.min_power_level_target = PowerLevel(min_power_level_target)
@dataclass
class RouteHealthCheckSummary:
"""Represent a route health check summary update."""
data: RouteHealthCheckSummaryDataType
rating: int = field(init=False)
results: list[RouteHealthCheckResult] = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.rating = self.data["rating"]
self.results = [RouteHealthCheckResult(r) for r in self.data.get("results", [])]
@dataclass
class TestPowerLevelProgress:
"""Class to represent a test power level progress update."""
acknowledged: int
total: int
@dataclass
class CheckHealthProgress:
"""Represent a check lifeline/route health progress update."""
rounds: int
total_rounds: int
last_rating: int
last_result: LifelineHealthCheckResult | RouteHealthCheckResult | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/node/health_check.py | health_check.py |
from __future__ import annotations
from dataclasses import asdict, dataclass, field
from enum import IntEnum
from typing import TYPE_CHECKING, TypedDict, cast
from ...const import VALUE_UNKNOWN
from ...util.helpers import convert_bytes_to_base64
if TYPE_CHECKING:
from . import Node
class NodeFirmwareUpdateDataDataType(TypedDict, total=False):
"""Represent a firmware update data dict type."""
filename: str # required
file: str # required
fileFormat: str
firmwareTarget: int
@dataclass
class NodeFirmwareUpdateData:
"""Firmware update data."""
filename: str
file: bytes
file_format: str | None = None
firmware_target: int | None = None
def to_dict(self) -> NodeFirmwareUpdateDataDataType:
"""Convert firmware update data to dict."""
data: NodeFirmwareUpdateDataDataType = {
"filename": self.filename,
"file": convert_bytes_to_base64(self.file),
}
if self.file_format is not None:
data["fileFormat"] = self.file_format
if self.firmware_target is not None:
data["firmwareTarget"] = self.firmware_target
return data
class NodeFirmwareUpdateCapabilitiesDataType(TypedDict, total=False):
"""Represent a firmware update capabilities dict type."""
firmwareUpgradable: bool # required
firmwareTargets: list[int]
continuesToFunction: bool | str
supportsActivation: bool | str
class NodeFirmwareUpdateCapabilitiesDict(TypedDict, total=False):
"""Represent a dict from FirmwareUpdateCapabilities."""
firmware_upgradable: bool # required
firmware_targets: list[int]
continues_to_function: bool | None
supports_activation: bool | None
@dataclass
class NodeFirmwareUpdateCapabilities:
"""Model for firmware update capabilities."""
data: NodeFirmwareUpdateCapabilitiesDataType
firmware_upgradable: bool = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.firmware_upgradable = self.data["firmwareUpgradable"]
@property
def firmware_targets(self) -> list[int]:
"""Return firmware targets."""
if not self.firmware_upgradable:
raise TypeError("Firmware is not upgradeable.")
return self.data["firmwareTargets"]
@property
def continues_to_function(self) -> bool | None:
"""Return whether node continues to function during update."""
if not self.firmware_upgradable:
raise TypeError("Firmware is not upgradeable.")
if (val := self.data["continuesToFunction"]) == VALUE_UNKNOWN:
return None
assert isinstance(val, bool)
return val
@property
def supports_activation(self) -> bool | None:
"""Return whether node supports delayed activation of the new firmware."""
if not self.firmware_upgradable:
raise TypeError("Firmware is not upgradeable.")
if (val := self.data["supportsActivation"]) == VALUE_UNKNOWN:
return None
assert isinstance(val, bool)
return val
def to_dict(self) -> NodeFirmwareUpdateCapabilitiesDict:
"""Return dict representation of the object."""
if not self.firmware_upgradable:
return {"firmware_upgradable": self.firmware_upgradable}
return {
"firmware_upgradable": self.firmware_upgradable,
"firmware_targets": self.firmware_targets,
"continues_to_function": self.continues_to_function,
"supports_activation": self.supports_activation,
}
class NodeFirmwareUpdateStatus(IntEnum):
"""Enum with all node firmware update status values.
https://zwave-js.github.io/node-zwave-js/#/api/node?id=quotfirmware-update-finishedquot
"""
ERROR_TIMEOUT = -1
ERROR_CHECKSUM = 0
ERROR_TRANSMISSION_FAILED = 1
ERROR_INVALID_MANUFACTURER_ID = 2
ERROR_INVALID_FIRMWARE_ID = 3
ERROR_INVALID_FIRMWARE_TARGET = 4
ERROR_INVALID_HEADER_INFORMATION = 5
ERROR_INVALID_HEADER_FORMAT = 6
ERROR_INSUFFICIENT_MEMORY = 7
ERROR_INVALID_HARDWARE_VERSION = 8
OK_WAITING_FOR_ACTIVATION = 253
OK_NO_RESTART = 254
OK_RESTART_PENDING = 255
class NodeFirmwareUpdateProgressDataType(TypedDict):
"""Represent a node firmware update progress dict type."""
currentFile: int
totalFiles: int
sentFragments: int
totalFragments: int
progress: float
@dataclass
class NodeFirmwareUpdateProgress:
"""Model for a node firmware update progress data."""
node: "Node"
data: NodeFirmwareUpdateProgressDataType
current_file: int = field(init=False)
total_files: int = field(init=False)
sent_fragments: int = field(init=False)
total_fragments: int = field(init=False)
progress: float = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.current_file = self.data["currentFile"]
self.total_files = self.data["totalFiles"]
self.sent_fragments = self.data["sentFragments"]
self.total_fragments = self.data["totalFragments"]
self.progress = float(self.data["progress"])
class NodeFirmwareUpdateResultDataType(TypedDict, total=False):
"""Represent a node firmware update result dict type."""
status: int # required
success: bool # required
waitTime: int
reInterview: bool # required
@dataclass
class NodeFirmwareUpdateResult:
"""Model for node firmware update result data."""
node: "Node"
data: NodeFirmwareUpdateResultDataType
status: NodeFirmwareUpdateStatus = field(init=False)
success: bool = field(init=False)
wait_time: int | None = field(init=False)
reinterview: bool = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.status = NodeFirmwareUpdateStatus(self.data["status"])
self.success = self.data["success"]
self.wait_time = self.data.get("waitTime")
self.reinterview = self.data["reInterview"]
class NodeFirmwareUpdateFileInfoDataType(TypedDict):
"""Represent a firmware update file info data dict type."""
target: int
url: str
integrity: str # sha256
@dataclass
class NodeFirmwareUpdateFileInfo:
"""Represent a firmware update file info."""
target: int
url: str
integrity: str
@classmethod
def from_dict(
cls, data: NodeFirmwareUpdateFileInfoDataType
) -> "NodeFirmwareUpdateFileInfo":
"""Initialize from dict."""
return cls(**data)
def to_dict(self) -> NodeFirmwareUpdateFileInfoDataType:
"""Return dict representation of the object."""
return cast(NodeFirmwareUpdateFileInfoDataType, asdict(self))
class NodeFirmwareUpdateInfoDataType(TypedDict):
"""Represent a firmware update info data dict type."""
version: str
changelog: str
files: list[NodeFirmwareUpdateFileInfoDataType]
@dataclass
class NodeFirmwareUpdateInfo:
"""Represent a firmware update info."""
version: str
changelog: str
files: list[NodeFirmwareUpdateFileInfo]
@classmethod
def from_dict(
cls, data: NodeFirmwareUpdateInfoDataType
) -> "NodeFirmwareUpdateInfo":
"""Initialize from dict."""
return cls(
version=data["version"],
changelog=data["changelog"],
files=[
NodeFirmwareUpdateFileInfo.from_dict(file) for file in data["files"]
],
)
def to_dict(self) -> NodeFirmwareUpdateInfoDataType:
"""Return dict representation of the object."""
return cast(
NodeFirmwareUpdateInfoDataType,
{
"version": self.version,
"changelog": self.changelog,
"files": [file.to_dict() for file in self.files],
},
) | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/node/firmware.py | firmware.py |
from __future__ import annotations
from typing import Literal
from ...const import CommandClass
from ...event import BaseEventModel
from ..notification import (
EntryControlNotificationArgsDataType,
NotificationNotificationArgsDataType,
PowerLevelNotificationArgsDataType,
)
from ..value import ValueDataType
from .data_model import NodeDataType
from .firmware import (
NodeFirmwareUpdateProgressDataType,
NodeFirmwareUpdateResultDataType,
)
from .statistics import NodeStatisticsDataType
try:
from pydantic.v1 import BaseModel
except ImportError:
from pydantic import BaseModel
class BaseNodeEventModel(BaseEventModel):
"""Base model for a node event."""
source: Literal["node"]
nodeId: int
class AliveEventModel(BaseNodeEventModel):
"""Model for `alive` event data."""
event: Literal["alive"]
class CheckHealthProgressEventModel(BaseNodeEventModel):
"""
Model for `check health progress` type events data.
Includes `check lifeline health progress` and `check route health progress` events.
"""
rounds: int
totalRounds: int
lastRating: int
class CheckLifelineHealthProgressEventModel(CheckHealthProgressEventModel):
"""Model for `check lifeline health progress` event data."""
event: Literal["check lifeline health progress"]
class CheckRouteHealthProgressEventModel(CheckHealthProgressEventModel):
"""Model for `check route health progress` event data."""
event: Literal["check route health progress"]
class DeadEventModel(BaseNodeEventModel):
"""Model for `dead` event data."""
event: Literal["dead"]
class InterviewCompletedEventModel(BaseNodeEventModel):
"""Model for `interview completed` event data."""
event: Literal["interview completed"]
class InterviewFailedEventArgsModel(BaseModel):
"""Model for `interview failed` event args."""
errorMessage: str
isFinal: bool
attempt: int | None
maxAttempts: int | None
class InterviewFailedEventModel(BaseNodeEventModel):
"""Model for `interview failed` event data."""
event: Literal["interview failed"]
args: InterviewFailedEventArgsModel
class InterviewStageCompletedEventModel(BaseNodeEventModel):
"""Model for `interview stage completed` event data."""
event: Literal["interview stage completed"]
stageName: str
class InterviewStartedEventModel(BaseNodeEventModel):
"""Model for `interview started` event data."""
event: Literal["interview started"]
class NotificationEventModel(BaseNodeEventModel):
"""Model for `notification` event data."""
event: Literal["notification"]
ccId: CommandClass
args: (
NotificationNotificationArgsDataType
| EntryControlNotificationArgsDataType
| PowerLevelNotificationArgsDataType
)
class ReadyEventModel(BaseNodeEventModel):
"""Model for `ready` event data."""
event: Literal["ready"]
nodeState: NodeDataType
class SleepEventModel(BaseNodeEventModel):
"""Model for `sleep` event data."""
event: Literal["sleep"]
class StatisticsUpdatedEventModel(BaseNodeEventModel):
"""Model for `statistics updated` event data."""
event: Literal["statistics updated"]
statistics: NodeStatisticsDataType
class TestPowerLevelProgressEventModel(BaseNodeEventModel):
"""Model for `test powerlevel progress` event data."""
event: Literal["test powerlevel progress"]
acknowledged: int
total: int
class ValueEventModel(BaseNodeEventModel):
"""
Model for `value` events data.
Subclass for event models for `metadata updated`, `value added`,
`value notification`, `value removed`, and `value updated`.
"""
args: ValueDataType
class MetadataUpdatedEventModel(ValueEventModel):
"""Model for `metadata updated` event data."""
event: Literal["metadata updated"]
class ValueAddedEventModel(ValueEventModel):
"""Model for `value added` event data."""
event: Literal["value added"]
class ValueNotificationEventModel(ValueEventModel):
"""Model for `value notification` event data."""
event: Literal["value notification"]
class ValueRemovedEventModel(ValueEventModel):
"""Model for `value removed` event data."""
event: Literal["value removed"]
class ValueUpdatedEventModel(ValueEventModel):
"""Model for `value updated` event data."""
event: Literal["value updated"]
class WakeUpEventModel(BaseNodeEventModel):
"""Model for `wake up` event data."""
event: Literal["wake up"]
class FirmwareUpdateFinishedEventModel(BaseNodeEventModel):
"""Model for `firmware update finished` event data."""
event: Literal["firmware update finished"]
result: NodeFirmwareUpdateResultDataType
class FirmwareUpdateProgressEventModel(BaseNodeEventModel):
"""Model for `firmware update progress` event data."""
event: Literal["firmware update progress"]
progress: NodeFirmwareUpdateProgressDataType
NODE_EVENT_MODEL_MAP: dict[str, type["BaseNodeEventModel"]] = {
"alive": AliveEventModel,
"check lifeline health progress": CheckLifelineHealthProgressEventModel,
"check route health progress": CheckRouteHealthProgressEventModel,
"dead": DeadEventModel,
"firmware update finished": FirmwareUpdateFinishedEventModel,
"firmware update progress": FirmwareUpdateProgressEventModel,
"interview completed": InterviewCompletedEventModel,
"interview failed": InterviewFailedEventModel,
"interview stage completed": InterviewStageCompletedEventModel,
"interview started": InterviewStartedEventModel,
"metadata updated": MetadataUpdatedEventModel,
"notification": NotificationEventModel,
"ready": ReadyEventModel,
"sleep": SleepEventModel,
"statistics updated": StatisticsUpdatedEventModel,
"test powerlevel progress": TestPowerLevelProgressEventModel,
"value added": ValueAddedEventModel,
"value notification": ValueNotificationEventModel,
"value removed": ValueRemovedEventModel,
"value updated": ValueUpdatedEventModel,
"wake up": WakeUpEventModel,
} | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/node/event_model.py | event_model.py |
from __future__ import annotations
from contextlib import suppress
from dataclasses import dataclass, field
from datetime import datetime
from typing import TYPE_CHECKING, TypedDict
from zwave_js_server.exceptions import RssiErrorReceived
from ...const import RssiError
from ..statistics import RouteStatistics, RouteStatisticsDataType
if TYPE_CHECKING:
from ...client import Client
class NodeStatisticsDataType(TypedDict, total=False):
"""Represent a node statistics data dict type."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/node/NodeStatistics.ts#L21-L33
commandsTX: int # required
commandsRX: int # required
commandsDroppedTX: int # required
commandsDroppedRX: int # required
timeoutResponse: int # required
rtt: int
rssi: int
lwr: RouteStatisticsDataType
nlwr: RouteStatisticsDataType
lastSeen: str
@dataclass
class NodeStatistics:
"""Represent a node statistics update."""
client: "Client"
data: NodeStatisticsDataType
commands_tx: int = field(init=False)
commands_rx: int = field(init=False)
commands_dropped_rx: int = field(init=False)
commands_dropped_tx: int = field(init=False)
timeout_response: int = field(init=False)
rtt: int | None = field(init=False)
lwr: RouteStatistics | None = field(init=False, default=None)
nlwr: RouteStatistics | None = field(init=False, default=None)
last_seen: datetime | None = field(init=False, default=None)
def __post_init__(self) -> None:
"""Post initialize."""
self.commands_tx = self.data["commandsTX"]
self.commands_rx = self.data["commandsRX"]
self.commands_dropped_rx = self.data["commandsDroppedRX"]
self.commands_dropped_tx = self.data["commandsDroppedTX"]
self.timeout_response = self.data["timeoutResponse"]
self.rtt = self.data.get("rtt")
if last_seen := self.data.get("lastSeen"):
self.last_seen = datetime.fromisoformat(last_seen)
if lwr := self.data.get("lwr"):
with suppress(ValueError):
self.lwr = RouteStatistics(self.client, lwr)
if nlwr := self.data.get("nlwr"):
with suppress(ValueError):
self.nlwr = RouteStatistics(self.client, nlwr)
@property
def rssi(self) -> int | None:
"""
Return average RSSI of frames received by this node.
Consecutive non-error measurements are combined using an exponential moving
average.
"""
if not self.data or (rssi_ := self.data.get("rssi")) is None:
return None
if rssi_ in [item.value for item in RssiError]:
raise RssiErrorReceived(RssiError(rssi_))
return rssi_ | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/node/statistics.py | statistics.py |
from __future__ import annotations
import asyncio
import copy
import logging
from datetime import datetime
from typing import TYPE_CHECKING, Any, cast
from ...const import (
INTERVIEW_FAILED,
NOT_INTERVIEWED,
CommandClass,
DateAndTime,
NodeStatus,
PowerLevel,
SecurityClass,
)
from ...event import Event, EventBase
from ...exceptions import (
FailedCommand,
NotFoundError,
UnparseableValue,
UnwriteableValue,
)
from ..command_class import CommandClassInfo
from ..device_class import DeviceClass
from ..device_config import DeviceConfig
from ..endpoint import Endpoint
from ..notification import (
EntryControlNotification,
EntryControlNotificationDataType,
MultilevelSwitchNotification,
MultilevelSwitchNotificationDataType,
NotificationNotification,
NotificationNotificationDataType,
PowerLevelNotification,
PowerLevelNotificationDataType,
)
from ..value import (
ConfigurationValue,
MetaDataType,
SetValueResult,
Value,
ValueDataType,
ValueMetadata,
ValueNotification,
_get_value_id_str_from_dict,
_init_value,
)
from .data_model import NodeDataType
from .event_model import NODE_EVENT_MODEL_MAP
from .firmware import (
NodeFirmwareUpdateCapabilities,
NodeFirmwareUpdateCapabilitiesDataType,
NodeFirmwareUpdateProgress,
NodeFirmwareUpdateProgressDataType,
NodeFirmwareUpdateResult,
NodeFirmwareUpdateResultDataType,
)
from .health_check import (
CheckHealthProgress,
LifelineHealthCheckResult,
LifelineHealthCheckSummary,
RouteHealthCheckResult,
RouteHealthCheckSummary,
TestPowerLevelProgress,
)
from .statistics import NodeStatistics, NodeStatisticsDataType
if TYPE_CHECKING:
from ...client import Client
# pylint: disable=too-many-lines
_LOGGER = logging.getLogger(__package__)
DEFAULT_NODE_STATISTICS = NodeStatisticsDataType(
commandsTX=0,
commandsRX=0,
commandsDroppedTX=0,
commandsDroppedRX=0,
timeoutResponse=0,
)
def _get_value_id_dict_from_value_data(value_data: ValueDataType) -> dict[str, Any]:
"""Return a value ID dict from ValueDataType."""
data = {
"commandClass": value_data["commandClass"],
"property": value_data["property"],
}
if (endpoint := value_data.get("endpoint")) is not None:
data["endpoint"] = endpoint
if (property_key := value_data.get("propertyKey")) is not None:
data["propertyKey"] = property_key
return data
class Node(EventBase):
"""Represent a Z-Wave JS node."""
def __init__(self, client: "Client", data: NodeDataType) -> None:
"""Initialize the node."""
super().__init__()
self.client = client
self.data: NodeDataType = {}
self._device_config = DeviceConfig({})
self._statistics = NodeStatistics(
client, data.get("statistics", DEFAULT_NODE_STATISTICS)
)
self._firmware_update_progress: NodeFirmwareUpdateProgress | None = None
self.values: dict[str, ConfigurationValue | Value] = {}
self.endpoints: dict[int, Endpoint] = {}
self._status_event = asyncio.Event()
self.update(data)
def __repr__(self) -> str:
"""Return the representation."""
return f"{type(self).__name__}(node_id={self.node_id})"
def __hash__(self) -> int:
"""Return the hash."""
return hash((self.client.driver, self.node_id))
def __eq__(self, other: object) -> bool:
"""Return whether this instance equals another."""
if not isinstance(other, Node):
return False
return (
self.client.driver == other.client.driver and self.node_id == other.node_id
)
@property
def node_id(self) -> int:
"""Return node ID property."""
return self.data["nodeId"]
@property
def index(self) -> int:
"""Return index property."""
return self.data["index"]
@property
def device_class(self) -> DeviceClass | None:
"""Return the device_class."""
if (device_class := self.data.get("deviceClass")) is None:
return None
return DeviceClass(device_class)
@property
def installer_icon(self) -> int | None:
"""Return installer icon property."""
return self.data.get("installerIcon")
@property
def user_icon(self) -> int | None:
"""Return user icon property."""
return self.data.get("userIcon")
@property
def status(self) -> NodeStatus:
"""Return the status."""
return NodeStatus(self.data["status"])
@property
def ready(self) -> bool | None:
"""Return the ready."""
return self.data.get("ready")
@property
def is_listening(self) -> bool | None:
"""Return the is_listening."""
return self.data.get("isListening")
@property
def is_frequent_listening(self) -> bool | str | None:
"""Return the is_frequent_listening."""
return self.data.get("isFrequentListening")
@property
def is_routing(self) -> bool | None:
"""Return the is_routing."""
return self.data.get("isRouting")
@property
def max_data_rate(self) -> int | None:
"""Return the max_data_rate."""
return self.data.get("maxDataRate")
@property
def supported_data_rates(self) -> list[int]:
"""Return the supported_data_rates."""
return self.data.get("supportedDataRates", [])
@property
def is_secure(self) -> bool | None:
"""Return the is_secure."""
if (is_secure := self.data.get("isSecure")) == "unknown":
return None
return is_secure
@property
def protocol_version(self) -> int | None:
"""Return the protocol_version."""
return self.data.get("protocolVersion")
@property
def supports_beaming(self) -> bool | None:
"""Return the supports_beaming."""
return self.data.get("supportsBeaming")
@property
def supports_security(self) -> bool | None:
"""Return the supports_security."""
return self.data.get("supportsSecurity")
@property
def manufacturer_id(self) -> int | None:
"""Return the manufacturer_id."""
return self.data.get("manufacturerId")
@property
def product_id(self) -> int | None:
"""Return the product_id."""
return self.data.get("productId")
@property
def product_type(self) -> int | None:
"""Return the product_type."""
return self.data.get("productType")
@property
def firmware_version(self) -> str | None:
"""Return the firmware_version."""
return self.data.get("firmwareVersion")
@property
def zwave_plus_version(self) -> int | None:
"""Return the zwave_plus_version."""
return self.data.get("zwavePlusVersion")
@property
def zwave_plus_node_type(self) -> int | None:
"""Return the zwave_plus_node_type."""
return self.data.get("zwavePlusNodeType")
@property
def zwave_plus_role_type(self) -> int | None:
"""Return the zwave_plus_role_type."""
return self.data.get("zwavePlusRoleType")
@property
def name(self) -> str | None:
"""Return the name."""
return self.data.get("name")
@property
def location(self) -> str | None:
"""Return the location."""
return self.data.get("location")
@property
def device_config(self) -> DeviceConfig:
"""Return the device_config."""
return self._device_config
@property
def label(self) -> str | None:
"""Return the label."""
return self.data.get("label")
@property
def device_database_url(self) -> str | None:
"""Return the device database URL."""
return self.data.get("deviceDatabaseUrl")
@property
def endpoint_count_is_dynamic(self) -> bool | None:
"""Return the endpoint_count_is_dynamic."""
return self.data.get("endpointCountIsDynamic")
@property
def endpoints_have_identical_capabilities(self) -> bool | None:
"""Return the endpoints_have_identical_capabilities."""
return self.data.get("endpointsHaveIdenticalCapabilities")
@property
def individual_endpoint_count(self) -> int | None:
"""Return the individual_endpoint_count."""
return self.data.get("individualEndpointCount")
@property
def aggregated_endpoint_count(self) -> int | None:
"""Return the aggregated_endpoint_count."""
return self.data.get("aggregatedEndpointCount")
@property
def interview_attempts(self) -> int | None:
"""Return the interview_attempts."""
return self.data.get("interviewAttempts")
@property
def interview_stage(self) -> int | str | None:
"""Return the interview_stage."""
return self.data.get("interviewStage")
@property
def in_interview(self) -> bool:
"""Return whether node is currently being interviewed."""
return (
not self.ready
and not self.awaiting_manual_interview
and self.interview_stage != INTERVIEW_FAILED
)
@property
def awaiting_manual_interview(self) -> bool:
"""Return whether node requires a manual interview."""
return self.interview_stage in (None, NOT_INTERVIEWED)
@property
def command_classes(self) -> list[CommandClassInfo]:
"""Return all CommandClasses supported on this node."""
return self.endpoints[0].command_classes
@property
def statistics(self) -> NodeStatistics:
"""Return statistics property."""
return self._statistics
@property
def firmware_update_progress(self) -> NodeFirmwareUpdateProgress | None:
"""Return firmware update progress."""
return self._firmware_update_progress
@property
def highest_security_class(self) -> SecurityClass | None:
"""Return highest security class configured on the node."""
if (security_class := self.data.get("highestSecurityClass")) is None:
return None
return SecurityClass(security_class)
@property
def is_controller_node(self) -> bool:
"""Return whether the node is a controller node."""
return self.data["isControllerNode"]
@property
def keep_awake(self) -> bool:
"""Return whether the node is set to keep awake."""
return self.data["keepAwake"]
@property
def last_seen(self) -> datetime | None:
"""Return when the node was last seen."""
if last_seen := self.data.get("lastSeen"):
return datetime.fromisoformat(last_seen)
return None
@property
def default_volume(self) -> int | float | None:
"""Return the default volume."""
return self.data.get("defaultVolume")
@property
def default_transition_duration(self) -> int | float | None:
"""Return the default transition duration."""
return self.data.get("defaultTransitionDuration")
def update(self, data: NodeDataType) -> None:
"""Update the internal state data."""
self.data = copy.deepcopy(data)
self._device_config = DeviceConfig(self.data.get("deviceConfig", {}))
self._statistics = NodeStatistics(
self.client, self.data.get("statistics", DEFAULT_NODE_STATISTICS)
)
new_values_data = {
_get_value_id_str_from_dict(self, val): val
for val in self.data.pop("values")
}
new_value_ids = set(new_values_data)
stale_value_ids = set(self.values) - new_value_ids
# Remove stale values
for value_id in stale_value_ids:
self.values.pop(value_id)
# Updating existing values and populate new values. Preserve value order if
# initializing values for the node for the first time by using the key order
# which is deterministic
for value_id in (
new_value_ids - stale_value_ids
if stale_value_ids
else list(new_values_data)
):
val = new_values_data[value_id]
try:
if value_id in self.values:
self.values[value_id].update(val)
else:
self.values[value_id] = _init_value(self, val)
except UnparseableValue:
# If we can't parse the value, don't store it
pass
new_endpoints_data = {
endpoint["index"]: endpoint for endpoint in self.data.pop("endpoints")
}
new_endpoint_idxs = set(new_endpoints_data)
stale_endpoint_idxs = set(self.endpoints) - new_endpoint_idxs
# Remove stale endpoints
for endpoint_idx in stale_endpoint_idxs:
self.endpoints.pop(endpoint_idx)
# Add new endpoints or update existing ones
for endpoint_idx in new_endpoint_idxs - stale_endpoint_idxs:
endpoint = new_endpoints_data[endpoint_idx]
values = {
value_id: value
for value_id, value in self.values.items()
if self.index == value.endpoint
}
if endpoint_idx in self.endpoints:
self.endpoints[endpoint_idx].update(endpoint, values)
else:
self.endpoints[endpoint_idx] = Endpoint(self.client, endpoint, values)
def get_command_class_values(
self, command_class: CommandClass, endpoint: int | None = None
) -> dict[str, ConfigurationValue | Value]:
"""Return all values for a given command class."""
return {
value_id: value
for value_id, value in self.values.items()
if value.command_class == command_class
and (endpoint is None or value.endpoint == endpoint)
}
def get_configuration_values(self) -> dict[str, ConfigurationValue]:
"""Return all configuration values for a node."""
return cast(
dict[str, ConfigurationValue],
self.get_command_class_values(CommandClass.CONFIGURATION),
)
def receive_event(self, event: Event) -> None:
"""Receive an event."""
NODE_EVENT_MODEL_MAP[event.type](**event.data)
self._handle_event_protocol(event)
event.data["node"] = self
self.emit(event.type, event.data)
async def async_send_command(
self,
cmd: str,
require_schema: int | None = None,
wait_for_result: bool | None = None,
**cmd_kwargs: Any,
) -> dict[str, Any] | None:
"""
Send a node command. For internal use only.
If wait_for_result is not None, it will take precedence, otherwise we will decide
to wait or not based on the node status.
"""
kwargs = {}
message = {"command": f"node.{cmd}", "nodeId": self.node_id, **cmd_kwargs}
if require_schema is not None:
kwargs["require_schema"] = require_schema
if wait_for_result:
result = await self.client.async_send_command(message, **kwargs)
return result
if wait_for_result is None and self.status not in (
NodeStatus.ASLEEP,
NodeStatus.DEAD,
):
result_task = asyncio.create_task(
self.client.async_send_command(message, **kwargs)
)
status_task = asyncio.create_task(self._status_event.wait())
await asyncio.wait(
[result_task, status_task],
return_when=asyncio.FIRST_COMPLETED,
)
status_task.cancel()
if self._status_event.is_set() and not result_task.done():
result_task.cancel()
return None
return result_task.result()
await self.client.async_send_command_no_wait(message, **kwargs)
return None
async def async_set_value(
self,
val: Value | str,
new_value: Any,
options: dict | None = None,
wait_for_result: bool | None = None,
) -> SetValueResult | None:
"""Send setValue command to Node for given value (or value_id)."""
# a value may be specified as value_id or the value itself
if not isinstance(val, Value):
if val not in self.values:
raise NotFoundError(f"Value {val} not found on node {self}")
val = self.values[val]
if val.metadata.writeable is False:
raise UnwriteableValue
cmd_args = {
"valueId": _get_value_id_dict_from_value_data(val.data),
"value": new_value,
}
if options:
option = next(
(
option
for option in options
if option not in val.metadata.value_change_options
),
None,
)
if option is not None:
raise NotFoundError(
f"Option {option} not found on value {val} on node {self}"
)
cmd_args["options"] = options
# the value object needs to be send to the server
result = await self.async_send_command(
"set_value", **cmd_args, require_schema=29, wait_for_result=wait_for_result
)
if result is None:
return None
return SetValueResult(result["result"])
async def async_refresh_info(self) -> None:
"""Send refreshInfo command to Node."""
await self.async_send_command("refresh_info", wait_for_result=False)
async def async_refresh_values(self) -> None:
"""Send refreshValues command to Node."""
await self.async_send_command(
"refresh_values", wait_for_result=False, require_schema=4
)
async def async_refresh_cc_values(self, command_class: CommandClass) -> None:
"""Send refreshCCValues command to Node."""
await self.async_send_command(
"refresh_cc_values",
commandClass=command_class,
wait_for_result=False,
require_schema=4,
)
async def async_get_defined_value_ids(self) -> list[Value]:
"""Send getDefinedValueIDs command to Node."""
data = await self.async_send_command(
"get_defined_value_ids", wait_for_result=True
)
if data is None:
# We should never reach this code
raise FailedCommand("Command failed", "failed_command")
return [
_init_value(self, cast(ValueDataType, value_id))
for value_id in data["valueIds"]
]
async def async_get_value_metadata(self, val: Value | str) -> ValueMetadata:
"""Send getValueMetadata command to Node."""
# a value may be specified as value_id or the value itself
if not isinstance(val, Value):
val = self.values[val]
# the value object needs to be send to the server
data = await self.async_send_command(
"get_value_metadata",
valueId=_get_value_id_dict_from_value_data(val.data),
wait_for_result=True,
)
return ValueMetadata(cast(MetaDataType, data))
async def async_get_firmware_update_capabilities(
self,
) -> NodeFirmwareUpdateCapabilities:
"""Send getFirmwareUpdateCapabilities command to Node."""
data = await self.async_send_command(
"get_firmware_update_capabilities",
require_schema=7,
wait_for_result=True,
)
assert data
return NodeFirmwareUpdateCapabilities(
cast(NodeFirmwareUpdateCapabilitiesDataType, data["capabilities"])
)
async def async_get_firmware_update_capabilities_cached(
self,
) -> NodeFirmwareUpdateCapabilities:
"""Send getFirmwareUpdateCapabilitiesCached command to Node."""
data = await self.async_send_command(
"get_firmware_update_capabilities_cached",
require_schema=21,
wait_for_result=True,
)
assert data
return NodeFirmwareUpdateCapabilities(
cast(NodeFirmwareUpdateCapabilitiesDataType, data["capabilities"])
)
async def async_abort_firmware_update(self) -> None:
"""Send abortFirmwareUpdate command to Node."""
await self.async_send_command("abort_firmware_update", wait_for_result=True)
async def async_poll_value(self, val: Value | str) -> None:
"""Send pollValue command to Node for given value (or value_id)."""
# a value may be specified as value_id or the value itself
if not isinstance(val, Value):
val = self.values[val]
await self.async_send_command(
"poll_value",
valueId=_get_value_id_dict_from_value_data(val.data),
require_schema=1,
)
async def async_ping(self) -> bool:
"""Send ping command to Node."""
data = (
await self.async_send_command(
"ping", require_schema=5, wait_for_result=True
)
or {}
)
return cast(bool, data.get("responded", False))
async def async_invoke_cc_api(
self,
command_class: CommandClass,
method_name: str,
*args: Any,
wait_for_result: bool | None = None,
) -> Any:
"""Call endpoint.invoke_cc_api command."""
return await self.endpoints[0].async_invoke_cc_api(
command_class, method_name, *args, wait_for_result=wait_for_result
)
async def async_supports_cc_api(self, command_class: CommandClass) -> bool:
"""Call endpoint.supports_cc_api command."""
return await self.endpoints[0].async_supports_cc_api(command_class)
async def async_supports_cc(self, command_class: CommandClass) -> bool:
"""Call endpoint.supports_cc command."""
return await self.endpoints[0].async_supports_cc(command_class)
async def async_controls_cc(self, command_class: CommandClass) -> bool:
"""Call endpoint.controls_cc command."""
return await self.endpoints[0].async_controls_cc(command_class)
async def async_is_cc_secure(self, command_class: CommandClass) -> bool:
"""Call endpoint.is_cc_secure command."""
return await self.endpoints[0].async_is_cc_secure(command_class)
async def async_get_cc_version(self, command_class: CommandClass) -> bool:
"""Call endpoint.get_cc_version command."""
return await self.endpoints[0].async_get_cc_version(command_class)
async def async_get_node_unsafe(self) -> NodeDataType:
"""Call endpoint.get_node_unsafe command."""
return await self.endpoints[0].async_get_node_unsafe()
async def async_has_security_class(self, security_class: SecurityClass) -> bool:
"""Return whether node has the given security class."""
data = await self.async_send_command(
"has_security_class",
securityClass=security_class,
require_schema=8,
wait_for_result=True,
)
assert data
return cast(bool, data["hasSecurityClass"])
async def async_get_highest_security_class(self) -> SecurityClass:
"""Get the highest security class that a node supports."""
data = await self.async_send_command(
"get_highest_security_class", require_schema=8, wait_for_result=True
)
assert data
return SecurityClass(data["highestSecurityClass"])
async def async_test_power_level(
self, test_node: "Node", power_level: PowerLevel, test_frame_count: int
) -> int:
"""Send testPowerLevel command to Node."""
data = await self.async_send_command(
"test_powerlevel",
testNodeId=test_node.node_id,
powerlevel=power_level,
testFrameCount=test_frame_count,
require_schema=13,
wait_for_result=True,
)
assert data
return cast(int, data["framesAcked"])
async def async_check_lifeline_health(
self, rounds: int | None = None
) -> LifelineHealthCheckSummary:
"""Send checkLifelineHealth command to Node."""
kwargs = {}
if rounds is not None:
kwargs["rounds"] = rounds
data = await self.async_send_command(
"check_lifeline_health",
require_schema=13,
wait_for_result=True,
**kwargs,
)
assert data
return LifelineHealthCheckSummary(data["summary"])
async def async_check_route_health(
self, target_node: "Node", rounds: int | None = None
) -> RouteHealthCheckSummary:
"""Send checkRouteHealth command to Node."""
kwargs = {"targetNodeId": target_node.node_id}
if rounds is not None:
kwargs["rounds"] = rounds
data = await self.async_send_command(
"check_route_health",
require_schema=13,
wait_for_result=True,
**kwargs,
)
assert data
return RouteHealthCheckSummary(data["summary"])
async def async_get_state(self) -> NodeDataType:
"""Get node state."""
data = await self.async_send_command(
"get_state", require_schema=14, wait_for_result=True
)
assert data
return cast(NodeDataType, data["state"])
async def async_set_name(
self, name: str, update_cc: bool = True, wait_for_result: bool | None = None
) -> None:
"""Set node name."""
# If we may not potentially update the name CC, we should just wait for the
# result because the change is local to the driver
if not update_cc:
wait_for_result = True
await self.async_send_command(
"set_name",
name=name,
updateCC=update_cc,
wait_for_result=wait_for_result,
require_schema=14,
)
self.data["name"] = name
async def async_set_location(
self,
location: str,
update_cc: bool = True,
wait_for_result: bool | None = None,
) -> None:
"""Set node location."""
# If we may not potentially update the location CC, we should just wait for the
# result because the change is local to the driver
if not update_cc:
wait_for_result = True
await self.async_send_command(
"set_location",
location=location,
updateCC=update_cc,
wait_for_result=wait_for_result,
require_schema=14,
)
self.data["location"] = location
async def async_is_firmware_update_in_progress(self) -> bool:
"""
Send isFirmwareUpdateInProgress command to Node.
If `True`, a firmware update for this node is in progress.
"""
data = await self.async_send_command(
"is_firmware_update_in_progress", require_schema=21, wait_for_result=True
)
assert data
return cast(bool, data["progress"])
async def async_set_keep_awake(self, keep_awake: bool) -> None:
"""Set node keep awake state."""
await self.async_send_command(
"set_keep_awake",
keepAwake=keep_awake,
wait_for_result=True,
require_schema=14,
)
self.data["keepAwake"] = keep_awake
async def async_interview(self) -> None:
"""Interview node."""
await self.async_send_command(
"interview",
wait_for_result=False,
require_schema=22,
)
async def async_get_value_timestamp(self, val: Value | str) -> int:
"""Send getValueTimestamp command to Node for given value (or value_id)."""
# a value may be specified as value_id or the value itself
if not isinstance(val, Value):
val = self.values[val]
data = await self.async_send_command(
"get_value_timestamp",
valueId=_get_value_id_dict_from_value_data(val.data),
require_schema=27,
wait_for_result=True,
)
assert data
return cast(int, data["timestamp"])
async def async_manually_idle_notification_value(self, val: Value | str) -> None:
"""Send manuallyIdleNotificationValue command to Node for given value (or value_id)."""
# a value may be specified as value_id or the value itself
if not isinstance(val, Value):
val = self.values[val]
if val.command_class != CommandClass.NOTIFICATION:
raise ValueError(
"Value must be of CommandClass.NOTIFICATION to manually idle it"
)
await self.async_send_command(
"manually_idle_notification_value",
valueId=_get_value_id_dict_from_value_data(val.data),
require_schema=28,
wait_for_result=False,
)
async def async_set_date_and_time(self, datetime_: datetime | None = None) -> bool:
"""Send setDateAndTime command to Node."""
args = {}
if datetime_:
args["date"] = datetime_.isoformat()
data = await self.async_send_command(
"set_date_and_time",
**args,
require_schema=28,
wait_for_result=True,
)
assert data
return cast(bool, data["success"])
async def async_get_date_and_time(self) -> DateAndTime:
"""Send getDateAndTime command to Node."""
data = await self.async_send_command(
"get_date_and_time",
require_schema=31,
wait_for_result=True,
)
assert data
return DateAndTime(data["dateAndTime"])
async def async_is_health_check_in_progress(self) -> bool:
"""Send isHealthCheckInProgress command to Node."""
data = await self.async_send_command(
"is_health_check_in_progress",
require_schema=31,
wait_for_result=True,
)
assert data
return cast(bool, data["progress"])
async def async_abort_health_check(self) -> None:
"""Send abortHealthCheck command to Node."""
await self.async_send_command(
"abort_health_check",
require_schema=31,
wait_for_result=True,
)
async def async_set_default_volume(
self, default_volume: int | float | None
) -> None:
"""Send setDefaultVolume command to Node."""
cmd_kwargs = {}
self.data["defaultVolume"] = default_volume
if default_volume is not None:
cmd_kwargs["defaultVolume"] = default_volume
await self.async_send_command(
"set_default_volume",
require_schema=31,
wait_for_result=None,
**cmd_kwargs,
)
async def async_set_default_transition_duration(
self, default_duration_transition: int | float | None
) -> None:
"""Send setDefaultTransitionDuration command to Node."""
cmd_kwargs = {}
self.data["defaultTransitionDuration"] = default_duration_transition
if default_duration_transition is not None:
cmd_kwargs["defaultTransitionDuration"] = default_duration_transition
await self.async_send_command(
"set_default_transition_duration",
require_schema=31,
wait_for_result=None,
**cmd_kwargs,
)
async def async_has_device_config_changed(self) -> bool:
"""Send hasDeviceConfigChanged command to Node."""
data = await self.async_send_command(
"has_device_config_changed",
require_schema=31,
wait_for_result=True,
)
assert data
return cast(bool, data["changed"])
def handle_test_powerlevel_progress(self, event: Event) -> None:
"""Process a test power level progress event."""
event.data["test_power_level_progress"] = TestPowerLevelProgress(
event.data["acknowledged"], event.data["total"]
)
def handle_check_lifeline_health_progress(self, event: Event) -> None:
"""Process a check lifeline health progress event."""
event.data["check_lifeline_health_progress"] = CheckHealthProgress(
event.data["rounds"],
event.data["totalRounds"],
event.data["lastRating"],
LifelineHealthCheckResult(event.data["lastResult"]),
)
def handle_check_route_health_progress(self, event: Event) -> None:
"""Process a check route health progress event."""
event.data["check_route_health_progress"] = CheckHealthProgress(
event.data["rounds"],
event.data["totalRounds"],
event.data["lastRating"],
RouteHealthCheckResult(event.data["lastResult"]),
)
def handle_wake_up(self, event: Event) -> None:
"""Process a node wake up event."""
# pylint: disable=unused-argument
self._status_event.clear()
self.data["status"] = NodeStatus.AWAKE
def handle_sleep(self, event: Event) -> None:
"""Process a node sleep event."""
# pylint: disable=unused-argument
self._status_event.set()
self.data["status"] = NodeStatus.ASLEEP
def handle_dead(self, event: Event) -> None:
"""Process a node dead event."""
# pylint: disable=unused-argument
self._status_event.set()
self.data["status"] = NodeStatus.DEAD
def handle_alive(self, event: Event) -> None:
"""Process a node alive event."""
# pylint: disable=unused-argument
self._status_event.clear()
self.data["status"] = NodeStatus.ALIVE
def handle_interview_started(self, event: Event) -> None:
"""Process a node interview started event."""
# pylint: disable=unused-argument
self.data["ready"] = False
self.data["interviewStage"] = None
def handle_interview_stage_completed(self, event: Event) -> None:
"""Process a node interview stage completed event."""
self.data["interviewStage"] = event.data["stageName"]
def handle_interview_failed(self, event: Event) -> None:
"""Process a node interview failed event."""
# pylint: disable=unused-argument
self.data["interviewStage"] = INTERVIEW_FAILED
def handle_interview_completed(self, event: Event) -> None:
"""Process a node interview completed event."""
# pylint: disable=unused-argument
self.data["ready"] = True
def handle_ready(self, event: Event) -> None:
"""Process a node ready event."""
# the event contains a full dump of the node
self.update(event.data["nodeState"])
def handle_value_added(self, event: Event) -> None:
"""Process a node value added event."""
self.handle_value_updated(event)
def handle_value_updated(self, event: Event) -> None:
"""Process a node value updated event."""
evt_val_data: ValueDataType = event.data["args"]
value_id = _get_value_id_str_from_dict(self, evt_val_data)
value = self.values.get(value_id)
if value is None:
value = _init_value(self, evt_val_data)
self.values[value.value_id] = event.data["value"] = value
else:
value.receive_event(event)
event.data["value"] = value
def handle_value_removed(self, event: Event) -> None:
"""Process a node value removed event."""
value_id = _get_value_id_str_from_dict(self, event.data["args"])
event.data["value"] = self.values.pop(value_id)
def handle_value_notification(self, event: Event) -> None:
"""Process a node value notification event."""
# if value is found, use value data as base and update what is provided
# in the event, otherwise use the event data
event_data = event.data["args"]
if value := self.values.get(_get_value_id_str_from_dict(self, event_data)):
value_notification = ValueNotification(
self, cast(ValueDataType, dict(value.data))
)
value_notification.update(event_data)
else:
value_notification = ValueNotification(self, event_data)
event.data["value_notification"] = value_notification
def handle_metadata_updated(self, event: Event) -> None:
"""Process a node metadata updated event."""
# handle metadata updated as value updated (as its a value object with
# included metadata)
self.handle_value_updated(event)
def handle_notification(self, event: Event) -> None:
"""Process a node notification event."""
command_class = CommandClass(event.data["ccId"])
if command_class == CommandClass.NOTIFICATION:
event.data["notification"] = NotificationNotification(
self, cast(NotificationNotificationDataType, event.data)
)
elif command_class == CommandClass.SWITCH_MULTILEVEL:
event.data["notification"] = MultilevelSwitchNotification(
self, cast(MultilevelSwitchNotificationDataType, event.data)
)
elif command_class == CommandClass.ENTRY_CONTROL:
event.data["notification"] = EntryControlNotification(
self, cast(EntryControlNotificationDataType, event.data)
)
elif command_class == CommandClass.POWERLEVEL:
event.data["notification"] = PowerLevelNotification(
self, cast(PowerLevelNotificationDataType, event.data)
)
else:
_LOGGER.info("Unhandled notification command class: %s", command_class.name)
def handle_firmware_update_progress(self, event: Event) -> None:
"""Process a node firmware update progress event."""
self._firmware_update_progress = event.data[
"firmware_update_progress"
] = NodeFirmwareUpdateProgress(
self, cast(NodeFirmwareUpdateProgressDataType, event.data["progress"])
)
def handle_firmware_update_finished(self, event: Event) -> None:
"""Process a node firmware update finished event."""
self._firmware_update_progress = None
event.data["firmware_update_finished"] = NodeFirmwareUpdateResult(
self, cast(NodeFirmwareUpdateResultDataType, event.data["result"])
)
def handle_statistics_updated(self, event: Event) -> None:
"""Process a statistics updated event."""
self.data["statistics"] = statistics = event.data["statistics"]
event.data["statistics_updated"] = self._statistics = NodeStatistics(
self.client, statistics
)
if last_seen := statistics.get("lastSeen"):
self.data["lastSeen"] = last_seen | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/node/__init__.py | __init__.py |
from __future__ import annotations
from dataclasses import dataclass, field
from enum import IntEnum
from typing import TypedDict
from ...util.helpers import convert_bytes_to_base64
class ControllerFirmwareUpdateDataDataType(TypedDict, total=False):
"""Represent a controller firmware update data dict type."""
filename: str # required
file: str # required
fileFormat: str
@dataclass
class ControllerFirmwareUpdateData:
"""Controller firmware update data."""
filename: str
file: bytes
file_format: str | None = None
def to_dict(self) -> ControllerFirmwareUpdateDataDataType:
"""Convert firmware update data to dict."""
data: ControllerFirmwareUpdateDataDataType = {
"filename": self.filename,
"file": convert_bytes_to_base64(self.file),
}
if self.file_format is not None:
data["fileFormat"] = self.file_format
return data
class ControllerFirmwareUpdateStatus(IntEnum):
"""Enum with all controller firmware update status values.
https://zwave-js.github.io/node-zwave-js/#/api/controller?id=quotfirmware-update-finishedquot
"""
ERROR_TIMEOUT = 0
# The maximum number of retry attempts for a firmware fragments were reached
ERROR_RETRY_LIMIT_REACHED = 1
# The update was aborted by the bootloader
ERROR_ABORTED = 2
# This controller does not support firmware updates
ERROR_NOT_SUPPORTED = 3
OK = 255
class ControllerFirmwareUpdateProgressDataType(TypedDict):
"""Represent a controller firmware update progress dict type."""
sentFragments: int
totalFragments: int
progress: float
@dataclass
class ControllerFirmwareUpdateProgress:
"""Model for a controller firmware update progress data."""
data: ControllerFirmwareUpdateProgressDataType
sent_fragments: int = field(init=False)
total_fragments: int = field(init=False)
progress: float = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.sent_fragments = self.data["sentFragments"]
self.total_fragments = self.data["totalFragments"]
self.progress = float(self.data["progress"])
class ControllerFirmwareUpdateResultDataType(TypedDict):
"""Represent a controller firmware update result dict type."""
status: int
success: bool
@dataclass
class ControllerFirmwareUpdateResult:
"""Model for controller firmware update result data."""
data: ControllerFirmwareUpdateResultDataType
status: ControllerFirmwareUpdateStatus = field(init=False)
success: bool = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.status = ControllerFirmwareUpdateStatus(self.data["status"])
self.success = self.data["success"] | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/controller/firmware.py | firmware.py |
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, TypedDict
from ...const import Protocols, ProvisioningEntryStatus, QRCodeVersion, SecurityClass
class InclusionGrantDataType(TypedDict):
"""Representation of an inclusion grant data dict type."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/controller/Inclusion.ts#L48-L56
securityClasses: list[int]
clientSideAuth: bool
@dataclass
class InclusionGrant:
"""Representation of an inclusion grant."""
security_classes: list[SecurityClass]
client_side_auth: bool
def to_dict(self) -> InclusionGrantDataType:
"""Return InclusionGrantDataType dict from self."""
return {
"securityClasses": [sec_cls.value for sec_cls in self.security_classes],
"clientSideAuth": self.client_side_auth,
}
@classmethod
def from_dict(cls, data: InclusionGrantDataType) -> "InclusionGrant":
"""Return InclusionGrant from InclusionGrantDataType dict."""
return cls(
security_classes=[
SecurityClass(sec_cls) for sec_cls in data["securityClasses"]
],
client_side_auth=data["clientSideAuth"],
)
@dataclass
class ProvisioningEntry:
"""Class to represent the base fields of a provisioning entry."""
dsk: str
security_classes: list[SecurityClass]
requested_security_classes: list[SecurityClass] | None = None
status: ProvisioningEntryStatus = ProvisioningEntryStatus.ACTIVE
additional_properties: dict[str, Any] | None = None
def to_dict(self) -> dict[str, Any]:
"""Return PlannedProvisioning data dict from self."""
data = {
"dsk": self.dsk,
"securityClasses": [sec_cls.value for sec_cls in self.security_classes],
"status": self.status.value,
**(self.additional_properties or {}),
}
if self.requested_security_classes:
data["requestedSecurityClasses"] = [
sec_cls.value for sec_cls in self.requested_security_classes
]
return data
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "ProvisioningEntry":
"""Return ProvisioningEntry from data dict."""
cls_instance = cls(
dsk=data["dsk"],
security_classes=[
SecurityClass(sec_cls) for sec_cls in data["securityClasses"]
],
additional_properties={
k: v
for k, v in data.items()
if k
not in {"dsk", "securityClasses", "requestedSecurityClasses", "status"}
},
)
if "requestedSecurityClasses" in data:
cls_instance.requested_security_classes = [
SecurityClass(sec_cls) for sec_cls in data["requestedSecurityClasses"]
]
if "status" in data:
cls_instance.status = ProvisioningEntryStatus(data["status"])
return cls_instance
@dataclass
class QRProvisioningInformationMixin:
"""Mixin class to represent the base fields of a QR provisioning information."""
version: QRCodeVersion
generic_device_class: int
specific_device_class: int
installer_icon_type: int
manufacturer_id: int
product_type: int
product_id: int
application_version: str
max_inclusion_request_interval: int | None
uuid: str | None
supported_protocols: list[Protocols] | None
@dataclass
class QRProvisioningInformation(ProvisioningEntry, QRProvisioningInformationMixin):
"""Representation of provisioning information retrieved from a QR code."""
def to_dict(self) -> dict[str, Any]:
"""Return QRProvisioningInformation data dict from self."""
data = {
"version": self.version.value,
"securityClasses": [sec_cls.value for sec_cls in self.security_classes],
"dsk": self.dsk,
"status": self.status.value,
"genericDeviceClass": self.generic_device_class,
"specificDeviceClass": self.specific_device_class,
"installerIconType": self.installer_icon_type,
"manufacturerId": self.manufacturer_id,
"productType": self.product_type,
"productId": self.product_id,
"applicationVersion": self.application_version,
**(self.additional_properties or {}),
}
if self.requested_security_classes:
data["requestedSecurityClasses"] = [
sec_cls.value for sec_cls in self.requested_security_classes
]
if self.max_inclusion_request_interval is not None:
data["maxInclusionRequestInterval"] = self.max_inclusion_request_interval
if self.uuid is not None:
data["uuid"] = self.uuid
if self.supported_protocols is not None:
data["supportedProtocols"] = [
protocol.value for protocol in self.supported_protocols
]
return data
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "QRProvisioningInformation":
"""Return QRProvisioningInformation from data dict."""
cls_instance = cls(
version=QRCodeVersion(data["version"]),
security_classes=[
SecurityClass(sec_cls) for sec_cls in data["securityClasses"]
],
dsk=data["dsk"],
generic_device_class=data["genericDeviceClass"],
specific_device_class=data["specificDeviceClass"],
installer_icon_type=data["installerIconType"],
manufacturer_id=data["manufacturerId"],
product_type=data["productType"],
product_id=data["productId"],
application_version=data["applicationVersion"],
max_inclusion_request_interval=data.get("maxInclusionRequestInterval"),
uuid=data.get("uuid"),
supported_protocols=[
Protocols(supported_protocol)
for supported_protocol in data.get("supportedProtocols", [])
],
additional_properties={
k: v
for k, v in data.items()
if k
not in {
"version",
"securityClasses",
"requestedSecurityClasses",
"dsk",
"genericDeviceClass",
"specificDeviceClass",
"installerIconType",
"manufacturerId",
"productType",
"productId",
"applicationVersion",
"maxInclusionRequestInterval",
"uuid",
"supportedProtocols",
"status",
}
},
)
if "requestedSecurityClasses" in data:
cls_instance.requested_security_classes = [
SecurityClass(sec_cls) for sec_cls in data["requestedSecurityClasses"]
]
if "status" in data:
cls_instance.status = ProvisioningEntryStatus(data["status"])
return cls_instance | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/controller/inclusion_and_provisioning.py | inclusion_and_provisioning.py |
from __future__ import annotations
from typing import Literal, TypedDict
from ...const import RemoveNodeReason
from ...event import BaseEventModel
from ..node.data_model import FoundNodeDataType, NodeDataType
from .firmware import (
ControllerFirmwareUpdateProgressDataType,
ControllerFirmwareUpdateResultDataType,
)
from .inclusion_and_provisioning import InclusionGrantDataType
from .statistics import ControllerStatisticsDataType
class InclusionResultDataType(TypedDict, total=False):
"""Represent an inclusion result data dict type."""
lowSecurity: bool # required
lowSecurityReason: int
class BaseControllerEventModel(BaseEventModel):
"""Base model for a controller event."""
source: Literal["controller"]
class ExclusionFailedEventModel(BaseControllerEventModel):
"""Model for `exclusion failed` event data."""
event: Literal["exclusion failed"]
class ExclusionStartedEventModel(BaseControllerEventModel):
"""Model for `exclusion started` event data."""
event: Literal["exclusion started"]
class ExclusionStoppedEventModel(BaseControllerEventModel):
"""Model for `exclusion stopped` event data."""
event: Literal["exclusion stopped"]
class FirmwareUpdateFinishedEventModel(BaseControllerEventModel):
"""Model for `firmware update finished` event data."""
event: Literal["firmware update finished"]
result: ControllerFirmwareUpdateResultDataType
class FirmwareUpdateProgressEventModel(BaseControllerEventModel):
"""Model for `firmware update progress` event data."""
event: Literal["firmware update progress"]
progress: ControllerFirmwareUpdateProgressDataType
class GrantSecurityClassesEventModel(BaseControllerEventModel):
"""Model for `grant security classes` event data."""
event: Literal["grant security classes"]
requested: InclusionGrantDataType
class HealNetworkDoneEventModel(BaseControllerEventModel):
"""Model for `heal network done` event data."""
event: Literal["heal network done"]
result: dict[int, str]
class HealNetworkProgressEventModel(BaseControllerEventModel):
"""Model for `heal network progress` event data."""
event: Literal["heal network progress"]
progress: dict[int, str]
class InclusionAbortedEventModel(BaseControllerEventModel):
"""Model for `inclusion aborted` event data."""
event: Literal["inclusion aborted"]
class InclusionFailedEventModel(BaseControllerEventModel):
"""Model for `inclusion failed` event data."""
event: Literal["inclusion failed"]
class InclusionStartedEventModel(BaseControllerEventModel):
"""Model for `inclusion started` event data."""
event: Literal["inclusion started"]
secure: bool
class InclusionStoppedEventModel(BaseControllerEventModel):
"""Model for `inclusion stopped` event data."""
event: Literal["inclusion stopped"]
class NodeAddedEventModel(BaseControllerEventModel):
"""Model for `node added` event data."""
event: Literal["node added"]
node: NodeDataType
result: InclusionResultDataType
class NodeFoundEventModel(BaseControllerEventModel):
"""Model for `node found` event data."""
event: Literal["node found"]
node: FoundNodeDataType
class NodeRemovedEventModel(BaseControllerEventModel):
"""Model for `node removed` event data."""
event: Literal["node removed"]
node: NodeDataType
reason: RemoveNodeReason
class NVMBackupAndConvertProgressEventModel(BaseControllerEventModel):
"""Base model for `nvm backup progress` and `nvm convert progress` event data."""
bytesRead: int
total: int
class NVMBackupProgressEventModel(NVMBackupAndConvertProgressEventModel):
"""Model for `nvm backup progress` event data."""
event: Literal["nvm backup progress"]
class NVMConvertProgressEventModel(NVMBackupAndConvertProgressEventModel):
"""Model for `nvm convert progress` event data."""
event: Literal["nvm convert progress"]
class NVMRestoreProgressEventModel(BaseControllerEventModel):
"""Model for `nvm restore progress` event data."""
event: Literal["nvm restore progress"]
bytesWritten: int
total: int
class StatisticsUpdatedEventModel(BaseControllerEventModel):
"""Model for `statistics updated` event data."""
event: Literal["statistics updated"]
statistics: ControllerStatisticsDataType
class ValidateDSKAndEnterPINEventModel(BaseControllerEventModel):
"""Model for `validate dsk and enter pin` event data."""
event: Literal["validate dsk and enter pin"]
dsk: str
class IdentifyEventModel(BaseControllerEventModel):
"""Model for `identify` event data."""
event: Literal["identify"]
nodeId: int
class StatusChangedEventModel(BaseControllerEventModel):
"""Model for `status changed` event data."""
event: Literal["status changed"]
status: int
CONTROLLER_EVENT_MODEL_MAP: dict[str, type["BaseControllerEventModel"]] = {
"exclusion failed": ExclusionFailedEventModel,
"exclusion started": ExclusionStartedEventModel,
"exclusion stopped": ExclusionStoppedEventModel,
"firmware update finished": FirmwareUpdateFinishedEventModel,
"firmware update progress": FirmwareUpdateProgressEventModel,
"grant security classes": GrantSecurityClassesEventModel,
"heal network done": HealNetworkDoneEventModel,
"heal network progress": HealNetworkProgressEventModel,
"identify": IdentifyEventModel,
"inclusion aborted": InclusionAbortedEventModel,
"inclusion failed": InclusionFailedEventModel,
"inclusion started": InclusionStartedEventModel,
"inclusion stopped": InclusionStoppedEventModel,
"node added": NodeAddedEventModel,
"node found": NodeFoundEventModel,
"node removed": NodeRemovedEventModel,
"nvm backup progress": NVMBackupProgressEventModel,
"nvm convert progress": NVMConvertProgressEventModel,
"nvm restore progress": NVMRestoreProgressEventModel,
"statistics updated": StatisticsUpdatedEventModel,
"status changed": StatusChangedEventModel,
"validate dsk and enter pin": ValidateDSKAndEnterPINEventModel,
} | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/controller/event_model.py | event_model.py |
from __future__ import annotations
from contextlib import suppress
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, TypedDict
from ..statistics import RouteStatistics, RouteStatisticsDataType
if TYPE_CHECKING:
from ...client import Client
class ControllerLifelineRoutesDataType(TypedDict):
"""Represent a controller lifeline routes data dict type."""
lwr: RouteStatisticsDataType
nlwr: RouteStatisticsDataType
@dataclass
class ControllerLifelineRoutes:
"""Represent controller lifeline routes."""
client: "Client"
data: ControllerLifelineRoutesDataType
lwr: RouteStatistics | None = field(init=False, default=None)
nlwr: RouteStatistics | None = field(init=False, default=None)
def __post_init__(self) -> None:
"""Post initialize."""
if lwr := self.data.get("lwr"):
with suppress(ValueError):
self.lwr = RouteStatistics(self.client, lwr)
if nlwr := self.data.get("nlwr"):
with suppress(ValueError):
self.nlwr = RouteStatistics(self.client, nlwr)
class ChannelRSSIDataType(TypedDict):
"""Represent a channel RSSI data dict type."""
average: int
current: int
class BackgroundRSSIDataType(TypedDict, total=False):
"""Represent a background RSSI data dict type."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/controller/ControllerStatistics.ts#L40
timestamp: int # required
channel0: ChannelRSSIDataType # required
channel1: ChannelRSSIDataType # required
channel2: ChannelRSSIDataType
class ControllerStatisticsDataType(TypedDict, total=False):
"""Represent a controller statistics data dict type."""
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/zwave-js/src/lib/controller/ControllerStatistics.ts#L20-L39
messagesTX: int # required
messagesRX: int # required
messagesDroppedTX: int # required
messagesDroppedRX: int # required
NAK: int # required
CAN: int # required
timeoutACK: int # required
timeoutResponse: int # required
timeoutCallback: int # required
backgroundRSSI: BackgroundRSSIDataType
@dataclass
class ChannelRSSI:
"""Represent a channel RSSI."""
data: ChannelRSSIDataType
average: int = field(init=False)
current: int = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.average = self.data["average"]
self.current = self.data["current"]
@dataclass
class BackgroundRSSI:
"""Represent a background RSSI update."""
data: BackgroundRSSIDataType
timestamp: int = field(init=False)
channel_0: ChannelRSSI = field(init=False)
channel_1: ChannelRSSI = field(init=False)
channel_2: ChannelRSSI | None = field(init=False)
def __post_init__(self) -> None:
"""Post initialize."""
self.timestamp = self.data["timestamp"]
self.channel_0 = ChannelRSSI(self.data["channel0"])
self.channel_1 = ChannelRSSI(self.data["channel1"])
if not (channel_2 := self.data.get("channel2")):
self.channel_2 = None
return
self.channel_2 = ChannelRSSI(channel_2)
@dataclass
class ControllerStatistics:
"""Represent a controller statistics update."""
data: ControllerStatisticsDataType
messages_tx: int = field(init=False)
messages_rx: int = field(init=False)
messages_dropped_rx: int = field(init=False)
messages_dropped_tx: int = field(init=False)
nak: int = field(init=False)
can: int = field(init=False)
timeout_ack: int = field(init=False)
timeout_response: int = field(init=False)
timeout_callback: int = field(init=False)
background_rssi: BackgroundRSSI | None = field(init=False, default=None)
def __post_init__(self) -> None:
"""Post initialize."""
self.messages_tx = self.data["messagesTX"]
self.messages_rx = self.data["messagesRX"]
self.messages_dropped_rx = self.data["messagesDroppedRX"]
self.messages_dropped_tx = self.data["messagesDroppedTX"]
self.nak = self.data["NAK"]
self.can = self.data["CAN"]
self.timeout_ack = self.data["timeoutACK"]
self.timeout_response = self.data["timeoutResponse"]
self.timeout_callback = self.data["timeoutCallback"]
if background_rssi := self.data.get("backgroundRSSI"):
self.background_rssi = BackgroundRSSI(background_rssi) | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/controller/statistics.py | statistics.py |
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Literal, cast
from zwave_js_server.model.node.firmware import (
NodeFirmwareUpdateFileInfo,
NodeFirmwareUpdateInfo,
)
from ...const import (
MINIMUM_QR_STRING_LENGTH,
ControllerStatus,
ExclusionStrategy,
InclusionState,
InclusionStrategy,
NodeType,
QRCodeVersion,
RemoveNodeReason,
RFRegion,
ZwaveFeature,
)
from ...event import Event, EventBase
from ...util.helpers import convert_base64_to_bytes, convert_bytes_to_base64
from ..association import AssociationAddress, AssociationGroup
from ..node import Node
from ..node.firmware import NodeFirmwareUpdateResult
from .data_model import ControllerDataType
from .event_model import CONTROLLER_EVENT_MODEL_MAP
from .firmware import ControllerFirmwareUpdateProgress, ControllerFirmwareUpdateResult
from .inclusion_and_provisioning import (
InclusionGrant,
ProvisioningEntry,
QRProvisioningInformation,
)
from .statistics import (
ControllerLifelineRoutes,
ControllerStatistics,
ControllerStatisticsDataType,
)
if TYPE_CHECKING:
from ...client import Client
DEFAULT_CONTROLLER_STATISTICS = ControllerStatisticsDataType(
messagesTX=0,
messagesRX=0,
messagesDroppedTX=0,
messagesDroppedRX=0,
NAK=0,
CAN=0,
timeoutACK=0,
timeoutResponse=0,
timeoutCallback=0,
)
@dataclass
class NVMProgress:
"""Class to represent an NVM backup/restore progress event."""
bytes_read_or_written: int
total_bytes: int
class Controller(EventBase):
"""Represent a Z-Wave JS controller."""
def __init__(self, client: "Client", state: dict) -> None:
"""Initialize controller."""
super().__init__()
self.client = client
self.nodes: dict[int, Node] = {}
self._heal_network_progress: dict[int, str] | None = None
self._statistics = ControllerStatistics(DEFAULT_CONTROLLER_STATISTICS)
self._firmware_update_progress: ControllerFirmwareUpdateProgress | None = None
for node_state in state["nodes"]:
node = Node(client, node_state)
self.nodes[node.node_id] = node
self.update(state["controller"])
def __repr__(self) -> str:
"""Return the representation."""
return f"{type(self).__name__}(home_id={self.home_id})"
def __hash__(self) -> int:
"""Return the hash."""
return hash(self.home_id)
def __eq__(self, other: object) -> bool:
"""Return whether this instance equals another."""
if not isinstance(other, Controller):
return False
return self.home_id == other.home_id
@property
def sdk_version(self) -> str | None:
"""Return sdk_version."""
return self.data.get("sdkVersion")
@property
def controller_type(self) -> int | None:
"""Return controller_type."""
return self.data.get("type")
@property
def home_id(self) -> int | None:
"""Return home_id."""
return self.data.get("homeId")
@property
def own_node_id(self) -> int | None:
"""Return own_node_id."""
return self.data.get("ownNodeId")
@property
def own_node(self) -> Node | None:
"""Return own_node."""
if self.own_node_id is None:
return None
return self.nodes.get(self.own_node_id)
@property
def is_primary(self) -> bool | None:
"""Return is_primary."""
return self.data.get("isPrimary")
@property
def is_using_home_id_from_other_network(self) -> bool | None:
"""Return is_using_home_id_from_other_network."""
return self.data.get("isUsingHomeIdFromOtherNetwork")
@property
def is_SIS_present(self) -> bool | None: # pylint: disable=invalid-name
"""Return is_SIS_present."""
return self.data.get("isSISPresent")
@property
def was_real_primary(self) -> bool | None:
"""Return was_real_primary."""
return self.data.get("wasRealPrimary")
@property
def is_suc(self) -> bool | None:
"""Return is_suc."""
return self.data.get("isSUC")
@property
def node_type(self) -> NodeType | None:
"""Return node_type."""
if (node_type := self.data.get("nodeType")) is not None:
return NodeType(node_type)
return None
@property
def firmware_version(self) -> str | None:
"""Return firmware_version."""
return self.data.get("firmwareVersion")
@property
def manufacturer_id(self) -> int | None:
"""Return manufacturer_id."""
return self.data.get("manufacturerId")
@property
def product_type(self) -> int | None:
"""Return product_type."""
return self.data.get("productType")
@property
def product_id(self) -> int | None:
"""Return product_id."""
return self.data.get("productId")
@property
def supported_function_types(self) -> list[int]:
"""Return supported_function_types."""
return self.data.get("supportedFunctionTypes", [])
@property
def suc_node_id(self) -> int | None:
"""Return suc_node_id."""
return self.data.get("sucNodeId")
@property
def supports_timers(self) -> bool | None:
"""Return supports_timers."""
return self.data.get("supportsTimers")
@property
def is_heal_network_active(self) -> bool | None:
"""Return is_heal_network_active."""
return self.data.get("isHealNetworkActive")
@property
def statistics(self) -> ControllerStatistics:
"""Return statistics property."""
return self._statistics
@property
def heal_network_progress(self) -> dict[int, str] | None:
"""Return heal network progress state."""
return self._heal_network_progress
@property
def inclusion_state(self) -> InclusionState:
"""Return inclusion state."""
return InclusionState(self.data["inclusionState"])
@property
def rf_region(self) -> RFRegion | None:
"""Return RF region of controller."""
if (rf_region := self.data.get("rfRegion")) is None:
return None
return RFRegion(rf_region)
@property
def firmware_update_progress(self) -> ControllerFirmwareUpdateProgress | None:
"""Return firmware update progress."""
return self._firmware_update_progress
@property
def status(self) -> ControllerStatus:
"""Return status."""
return ControllerStatus(self.data["status"])
def update(self, data: ControllerDataType) -> None:
"""Update controller data."""
self.data = data
self._statistics = ControllerStatistics(
self.data.get("statistics", DEFAULT_CONTROLLER_STATISTICS)
)
async def async_begin_inclusion(
self,
inclusion_strategy: Literal[
InclusionStrategy.DEFAULT,
InclusionStrategy.SECURITY_S0,
InclusionStrategy.SECURITY_S2,
InclusionStrategy.INSECURE,
],
force_security: bool | None = None,
provisioning: str | ProvisioningEntry | QRProvisioningInformation | None = None,
dsk: str | None = None,
) -> bool:
"""Send beginInclusion command to Controller."""
# Most functionality was introduced in Schema 8
require_schema = 8
options: dict[str, Any] = {"strategy": inclusion_strategy}
# forceSecurity can only be used with the default inclusion strategy
if force_security is not None:
if inclusion_strategy != InclusionStrategy.DEFAULT:
raise ValueError(
"`forceSecurity` option is only supported with inclusion_strategy=DEFAULT"
)
options["forceSecurity"] = force_security
# provisioning can only be used with the S2 inclusion strategy and may need
# additional processing
if provisioning is not None:
if inclusion_strategy != InclusionStrategy.SECURITY_S2:
raise ValueError(
"`provisioning` option is only supported with inclusion_strategy=SECURITY_S2"
)
if dsk is not None:
raise ValueError("Only one of `provisioning` and `dsk` can be provided")
# Provisioning option was introduced in Schema 11
require_schema = 11
# String is assumed to be the QR code string so we can pass as is
if isinstance(provisioning, str):
if len(
provisioning
) < MINIMUM_QR_STRING_LENGTH or not provisioning.startswith("90"):
raise ValueError(
f"QR code string must be at least {MINIMUM_QR_STRING_LENGTH} characters "
"long and start with `90`"
)
options["provisioning"] = provisioning
# If we get a Smart Start QR code, we provision the node and return because
# inclusion is over
elif (
isinstance(provisioning, QRProvisioningInformation)
and provisioning.version == QRCodeVersion.SMART_START
):
raise ValueError(
"Smart Start QR codes can't use the normal inclusion process. Use the "
"provision_smart_start_node command to provision this device."
)
# Otherwise we assume the data is ProvisioningEntry or
# QRProvisioningInformation that is not a Smart Start QR code
else:
options["provisioning"] = provisioning.to_dict()
if dsk is not None:
if inclusion_strategy != InclusionStrategy.SECURITY_S2:
raise ValueError(
"`dsk` option is only supported with inclusion_strategy=SECURITY_S2"
)
require_schema = 25
options["dsk"] = dsk
data = await self.client.async_send_command(
{
"command": "controller.begin_inclusion",
"options": options,
},
require_schema=require_schema,
)
return cast(bool, data["success"])
async def async_provision_smart_start_node(
self,
provisioning_info: ProvisioningEntry | QRProvisioningInformation | str,
) -> None:
"""Send provisionSmartStartNode command to Controller."""
if (
isinstance(provisioning_info, QRProvisioningInformation)
and provisioning_info.version == QRCodeVersion.S2
):
raise ValueError(
"An S2 QR Code can't be used to pre-provision a Smart Start node"
)
await self.client.async_send_command(
{
"command": "controller.provision_smart_start_node",
"entry": provisioning_info
if isinstance(provisioning_info, str)
else provisioning_info.to_dict(),
},
require_schema=11,
)
async def async_unprovision_smart_start_node(
self, dsk_or_node_id: int | str
) -> None:
"""Send unprovisionSmartStartNode command to Controller."""
await self.client.async_send_command(
{
"command": "controller.unprovision_smart_start_node",
"dskOrNodeId": dsk_or_node_id,
},
require_schema=11,
)
async def async_get_provisioning_entry(
self, dsk_or_node_id: int | str
) -> ProvisioningEntry | None:
"""Send getProvisioningEntry command to Controller."""
data = await self.client.async_send_command(
{
"command": "controller.get_provisioning_entry",
"dskOrNodeId": dsk_or_node_id,
},
require_schema=17,
)
if "entry" in data:
return ProvisioningEntry.from_dict(data["entry"])
return None
async def async_get_provisioning_entries(self) -> list[ProvisioningEntry]:
"""Send getProvisioningEntries command to Controller."""
data = await self.client.async_send_command(
{
"command": "controller.get_provisioning_entries",
},
require_schema=11,
)
return [ProvisioningEntry.from_dict(entry) for entry in data.get("entries", [])]
async def async_stop_inclusion(self) -> bool:
"""Send stopInclusion command to Controller."""
data = await self.client.async_send_command(
{"command": "controller.stop_inclusion"}
)
return cast(bool, data["success"])
async def async_begin_exclusion(
self, strategy: ExclusionStrategy | None = None
) -> bool:
"""Send beginExclusion command to Controller."""
payload: dict[str, str | dict[str, ExclusionStrategy]] = {
"command": "controller.begin_exclusion"
}
if strategy is not None:
payload["options"] = {"strategy": strategy}
data = await self.client.async_send_command(payload, require_schema=22)
return cast(bool, data["success"])
async def async_stop_exclusion(self) -> bool:
"""Send stopExclusion command to Controller."""
data = await self.client.async_send_command(
{"command": "controller.stop_exclusion"}
)
return cast(bool, data["success"])
async def async_remove_failed_node(self, node: Node) -> None:
"""Send removeFailedNode command to Controller."""
await self.client.async_send_command(
{"command": "controller.remove_failed_node", "nodeId": node.node_id}
)
async def async_replace_failed_node(
self,
node: Node,
inclusion_strategy: Literal[
InclusionStrategy.DEFAULT,
InclusionStrategy.SECURITY_S0,
InclusionStrategy.SECURITY_S2,
InclusionStrategy.INSECURE,
],
force_security: bool | None = None,
provisioning: str | ProvisioningEntry | QRProvisioningInformation | None = None,
) -> bool:
"""Send replaceFailedNode command to Controller."""
# Most functionality was introduced in Schema 8
require_schema = 8
options: dict[str, Any] = {"strategy": inclusion_strategy}
# forceSecurity can only be used with the default inclusion strategy
if force_security is not None:
if inclusion_strategy != InclusionStrategy.DEFAULT:
raise ValueError(
"`forceSecurity` option is only supported with inclusion_strategy=DEFAULT"
)
options["forceSecurity"] = force_security
# provisioning can only be used with the S2 inclusion strategy and may need
# additional processing
if provisioning is not None:
if inclusion_strategy != InclusionStrategy.SECURITY_S2:
raise ValueError(
"`provisioning` option is only supported with inclusion_strategy=SECURITY_S2"
)
# Provisioning option was introduced in Schema 11
require_schema = 11
# String is assumed to be the QR code string so we can pass as is
if isinstance(provisioning, str):
if len(
provisioning
) < MINIMUM_QR_STRING_LENGTH or not provisioning.startswith("90"):
raise ValueError(
f"QR code string must be at least {MINIMUM_QR_STRING_LENGTH} characters "
"long and start with `90`"
)
options["provisioning"] = provisioning
# Otherwise we assume the data is ProvisioningEntry or
# QRProvisioningInformation
else:
options["provisioning"] = provisioning.to_dict()
data = await self.client.async_send_command(
{
"command": "controller.replace_failed_node",
"nodeId": node.node_id,
"options": options,
},
require_schema=require_schema,
)
return cast(bool, data["success"])
async def async_heal_node(self, node: Node) -> bool:
"""Send healNode command to Controller."""
data = await self.client.async_send_command(
{"command": "controller.heal_node", "nodeId": node.node_id}
)
return cast(bool, data["success"])
async def async_begin_healing_network(self) -> bool:
"""Send beginHealingNetwork command to Controller."""
data = await self.client.async_send_command(
{"command": "controller.begin_healing_network"}
)
return cast(bool, data["success"])
async def async_stop_healing_network(self) -> bool:
"""Send stopHealingNetwork command to Controller."""
data = await self.client.async_send_command(
{"command": "controller.stop_healing_network"}
)
success = cast(bool, data["success"])
if success:
self._heal_network_progress = None
self.data["isHealNetworkActive"] = False
return success
async def async_is_failed_node(self, node: Node) -> bool:
"""Send isFailedNode command to Controller."""
data = await self.client.async_send_command(
{"command": "controller.is_failed_node", "nodeId": node.node_id}
)
return cast(bool, data["failed"])
async def async_get_association_groups(
self, source: AssociationAddress
) -> dict[int, AssociationGroup]:
"""Send getAssociationGroups command to Controller."""
source_data = {"nodeId": source.node_id}
if source.endpoint is not None:
source_data["endpoint"] = source.endpoint
data = await self.client.async_send_command(
{
"command": "controller.get_association_groups",
**source_data,
}
)
groups = {}
for key, group in data["groups"].items():
groups[int(key)] = AssociationGroup(
max_nodes=group["maxNodes"],
is_lifeline=group["isLifeline"],
multi_channel=group["multiChannel"],
label=group["label"],
profile=group.get("profile"),
issued_commands=group.get("issuedCommands", {}),
)
return groups
async def async_get_associations(
self, source: AssociationAddress
) -> dict[int, list[AssociationAddress]]:
"""Send getAssociations command to Controller."""
source_data = {"nodeId": source.node_id}
if source.endpoint is not None:
source_data["endpoint"] = source.endpoint
data = await self.client.async_send_command(
{
"command": "controller.get_associations",
**source_data,
}
)
associations = {}
for key, association_addresses in data["associations"].items():
associations[int(key)] = [
AssociationAddress(
node_id=association_address["nodeId"],
endpoint=association_address.get("endpoint"),
)
for association_address in association_addresses
]
return associations
async def async_is_association_allowed(
self, source: AssociationAddress, group: int, association: AssociationAddress
) -> bool:
"""Send isAssociationAllowed command to Controller."""
source_data = {"nodeId": source.node_id}
if source.endpoint is not None:
source_data["endpoint"] = source.endpoint
association_data = {"nodeId": association.node_id}
if association.endpoint is not None:
association_data["endpoint"] = association.endpoint
data = await self.client.async_send_command(
{
"command": "controller.is_association_allowed",
**source_data,
"group": group,
"association": association_data,
}
)
return cast(bool, data["allowed"])
async def async_add_associations(
self,
source: AssociationAddress,
group: int,
associations: list[AssociationAddress],
wait_for_result: bool = False,
) -> None:
"""Send addAssociations command to Controller."""
source_data = {"nodeId": source.node_id}
if source.endpoint is not None:
source_data["endpoint"] = source.endpoint
associations_data = []
for association in associations:
association_data = {"nodeId": association.node_id}
if association.endpoint is not None:
association_data["endpoint"] = association.endpoint
associations_data.append(association_data)
cmd = {
"command": "controller.add_associations",
**source_data,
"group": group,
"associations": associations_data,
}
if wait_for_result:
await self.client.async_send_command(cmd)
else:
await self.client.async_send_command_no_wait(cmd)
async def async_remove_associations(
self,
source: AssociationAddress,
group: int,
associations: list[AssociationAddress],
wait_for_result: bool = False,
) -> None:
"""Send removeAssociations command to Controller."""
source_data = {"nodeId": source.node_id}
if source.endpoint is not None:
source_data["endpoint"] = source.endpoint
associations_data = []
for association in associations:
association_data = {"nodeId": association.node_id}
if association.endpoint is not None:
association_data["endpoint"] = association.endpoint
associations_data.append(association_data)
cmd = {
"command": "controller.remove_associations",
**source_data,
"group": group,
"associations": associations_data,
}
if wait_for_result:
await self.client.async_send_command(cmd)
else:
await self.client.async_send_command_no_wait(cmd)
async def async_remove_node_from_all_associations(
self,
node: Node,
wait_for_result: bool = False,
) -> None:
"""Send removeNodeFromAllAssociations command to Controller."""
cmd = {
"command": "controller.remove_node_from_all_associations",
"nodeId": node.node_id,
}
if wait_for_result:
await self.client.async_send_command(cmd)
else:
await self.client.async_send_command_no_wait(cmd)
async def async_get_node_neighbors(self, node: Node) -> list[int]:
"""Send getNodeNeighbors command to Controller to get node's neighbors."""
data = await self.client.async_send_command(
{
"command": "controller.get_node_neighbors",
"nodeId": node.node_id,
}
)
return cast(list[int], data["neighbors"])
async def async_grant_security_classes(
self, inclusion_grant: InclusionGrant
) -> None:
"""Send grantSecurityClasses command to Controller."""
await self.client.async_send_command(
{
"command": "controller.grant_security_classes",
"inclusionGrant": inclusion_grant.to_dict(),
}
)
async def async_validate_dsk_and_enter_pin(self, pin: str) -> None:
"""Send validateDSKAndEnterPIN command to Controller."""
await self.client.async_send_command(
{
"command": "controller.validate_dsk_and_enter_pin",
"pin": pin,
}
)
async def async_supports_feature(self, feature: ZwaveFeature) -> bool | None:
"""
Send supportsFeature command to Controller.
When None is returned it means the driver does not yet know whether the
controller supports the input feature.
"""
data = await self.client.async_send_command(
{"command": "controller.supports_feature", "feature": feature.value},
require_schema=12,
)
return cast(bool | None, data.get("supported"))
async def async_get_state(self) -> ControllerDataType:
"""Get controller state."""
data = await self.client.async_send_command(
{"command": "controller.get_state"}, require_schema=14
)
return cast(ControllerDataType, data["state"])
async def async_backup_nvm_raw(self) -> bytes:
"""Send backupNVMRaw command to Controller."""
data = await self.client.async_send_command(
{"command": "controller.backup_nvm_raw"}, require_schema=14
)
return convert_base64_to_bytes(data["nvmData"])
async def async_restore_nvm(self, file: bytes) -> None:
"""Send restoreNVM command to Controller."""
await self.client.async_send_command(
{
"command": "controller.restore_nvm",
"nvmData": convert_bytes_to_base64(file),
},
require_schema=14,
)
async def async_get_power_level(self) -> dict[str, int]:
"""Send getPowerlevel command to Controller."""
data = await self.client.async_send_command(
{"command": "controller.get_powerlevel"}, require_schema=14
)
return {
"power_level": data["powerlevel"],
"measured_0_dbm": data["measured0dBm"],
}
async def async_set_power_level(
self, power_level: int, measured_0_dbm: int
) -> bool:
"""Send setPowerlevel command to Controller."""
data = await self.client.async_send_command(
{
"command": "controller.set_powerlevel",
"powerlevel": power_level,
"measured0dBm": measured_0_dbm,
},
require_schema=14,
)
return cast(bool, data["success"])
async def async_get_rf_region(self) -> RFRegion:
"""Send getRFRegion command to Controller."""
data = await self.client.async_send_command(
{"command": "controller.get_rf_region"}, require_schema=14
)
return RFRegion(data["region"])
async def async_set_rf_region(self, rf_region: RFRegion) -> bool:
"""Send setRFRegion command to Controller."""
data = await self.client.async_send_command(
{
"command": "controller.set_rf_region",
"region": rf_region.value,
},
require_schema=14,
)
return cast(bool, data["success"])
async def async_get_known_lifeline_routes(
self,
) -> dict[Node, ControllerLifelineRoutes]:
"""Send getKnownLifelineRoutes command to Controller."""
data = await self.client.async_send_command(
{"command": "controller.get_known_lifeline_routes"}, require_schema=16
)
return {
self.nodes[node_id]: ControllerLifelineRoutes(self.client, lifeline_routes)
for node_id, lifeline_routes in data["routes"].items()
}
async def async_is_any_ota_firmware_update_in_progress(self) -> bool:
"""
Send isAnyOTAFirmwareUpdateInProgress command to Controller.
If `True`, a firmware update is in progress on at least one node.
"""
data = await self.client.async_send_command(
{"command": "controller.is_any_ota_firmware_update_in_progress"},
require_schema=21,
)
assert data
return cast(bool, data["progress"])
async def async_get_available_firmware_updates(
self, node: Node, api_key: str, include_prereleases: bool = False
) -> list[NodeFirmwareUpdateInfo]:
"""Send getAvailableFirmwareUpdates command to Controller."""
data = await self.client.async_send_command(
{
"command": "controller.get_available_firmware_updates",
"nodeId": node.node_id,
"apiKey": api_key,
"includePrereleases": include_prereleases,
},
require_schema=24,
)
assert data
return [NodeFirmwareUpdateInfo.from_dict(update) for update in data["updates"]]
async def async_firmware_update_ota(
self, node: Node, updates: list[NodeFirmwareUpdateFileInfo]
) -> NodeFirmwareUpdateResult:
"""Send firmwareUpdateOTA command to Controller."""
data = await self.client.async_send_command(
{
"command": "controller.firmware_update_ota",
"nodeId": node.node_id,
"updates": [update.to_dict() for update in updates],
},
require_schema=29,
)
return NodeFirmwareUpdateResult(node, data["result"])
async def async_is_firmware_update_in_progress(self) -> bool:
"""Send isFirmwareUpdateInProgress command to Controller."""
data = await self.client.async_send_command(
{"command": "controller.is_firmware_update_in_progress"}, require_schema=26
)
return cast(bool, data["progress"])
def receive_event(self, event: Event) -> None:
"""Receive an event."""
if event.data["source"] == "node":
node = self.nodes.get(event.data["nodeId"])
if node is None:
# TODO handle event for unknown node
pass
else:
node.receive_event(event)
return
if event.data["source"] != "controller":
# TODO decide what to do here
print(
f"Controller doesn't know how to handle/forward this event: {event.data}"
)
CONTROLLER_EVENT_MODEL_MAP[event.type](**event.data)
self._handle_event_protocol(event)
event.data["controller"] = self
self.emit(event.type, event.data)
def handle_firmware_update_progress(self, event: Event) -> None:
"""Process a firmware update progress event."""
self._firmware_update_progress = event.data[
"firmware_update_progress"
] = ControllerFirmwareUpdateProgress(event.data["progress"])
def handle_firmware_update_finished(self, event: Event) -> None:
"""Process a firmware update finished event."""
self._firmware_update_progress = None
event.data["firmware_update_finished"] = ControllerFirmwareUpdateResult(
event.data["result"]
)
def handle_inclusion_failed(self, event: Event) -> None:
"""Process an inclusion failed event."""
def handle_exclusion_failed(self, event: Event) -> None:
"""Process an exclusion failed event."""
def handle_inclusion_started(self, event: Event) -> None:
"""Process an inclusion started event."""
def handle_exclusion_started(self, event: Event) -> None:
"""Process an exclusion started event."""
def handle_inclusion_stopped(self, event: Event) -> None:
"""Process an inclusion stopped event."""
def handle_exclusion_stopped(self, event: Event) -> None:
"""Process an exclusion stopped event."""
def handle_node_found(self, event: Event) -> None:
"""Process a node found event."""
def handle_node_added(self, event: Event) -> None:
"""Process a node added event."""
node = event.data["node"] = Node(self.client, event.data["node"])
self.nodes[node.node_id] = node
def handle_node_removed(self, event: Event) -> None:
"""Process a node removed event."""
event.data["reason"] = RemoveNodeReason(event.data["reason"])
event.data["node"] = self.nodes.pop(event.data["node"]["nodeId"])
# Remove client from node since it's no longer connected to the controller
event.data["node"].client = None
def handle_heal_network_progress(self, event: Event) -> None:
"""Process a heal network progress event."""
self._heal_network_progress = event.data["progress"].copy()
self.data["isHealNetworkActive"] = True
def handle_heal_network_done(self, event: Event) -> None:
"""Process a heal network done event."""
# pylint: disable=unused-argument
self._heal_network_progress = None
self.data["isHealNetworkActive"] = False
def handle_statistics_updated(self, event: Event) -> None:
"""Process a statistics updated event."""
self.data["statistics"] = statistics = event.data["statistics"]
self._statistics = event.data["statistics_updated"] = ControllerStatistics(
statistics
)
def handle_grant_security_classes(self, event: Event) -> None:
"""Process a grant security classes event."""
event.data["requested_grant"] = InclusionGrant.from_dict(
event.data["requested"]
)
def handle_validate_dsk_and_enter_pin(self, event: Event) -> None:
"""Process a validate dsk and enter pin event."""
def handle_inclusion_aborted(self, event: Event) -> None:
"""Process an inclusion aborted event."""
def handle_nvm_backup_progress(self, event: Event) -> None:
"""Process a nvm backup progress event."""
event.data["nvm_backup_progress"] = NVMProgress(
event.data["bytesRead"], event.data["total"]
)
def handle_nvm_convert_progress(self, event: Event) -> None:
"""Process a nvm convert progress event."""
event.data["nvm_convert_progress"] = NVMProgress(
event.data["bytesRead"], event.data["total"]
)
def handle_nvm_restore_progress(self, event: Event) -> None:
"""Process a nvm restore progress event."""
event.data["nvm_restore_progress"] = NVMProgress(
event.data["bytesWritten"], event.data["total"]
)
def handle_identify(self, event: Event) -> None:
"""Process an identify event."""
# TODO handle event for unknown node
if node := self.nodes.get(event.data["nodeId"]):
event.data["node"] = node
def handle_status_changed(self, event: Event) -> None:
"""Process a status changed event."""
self.data["status"] = event.data["status"]
event.data["status"] = ControllerStatus(event.data["status"]) | zwave-js-server-python | /zwave_js_server_python-0.51.0-py3-none-any.whl/zwave_js_server/model/controller/__init__.py | __init__.py |
import asyncio
import json
import threading
import time
from .helpers import prepare_devices
from .WebsocketListener import WebsocketListener
class ZWaveMe:
"""Main controller class"""
def __init__(
self,
url,
token=None,
on_device_create=None,
on_device_update=None,
on_device_remove=None,
on_device_destroy=None,
on_new_device=None,
platforms=None,
):
self.on_device_create = on_device_create
self.on_device_update = on_device_update
self.on_device_remove = on_device_remove
self.on_device_destroy = on_device_destroy
self.on_new_device = on_new_device
self.url = url
self.token = token
self.platforms = platforms
self._ws = None
self._wshost = None
self.thread = None
self.devices = []
self.uuid = None
self.is_closed = False
def start_ws(self):
"""Launch thread."""
self.thread = threading.Thread(target=self.init_websocket)
self.thread.daemon = True
self.thread.start()
async def get_connection(self):
"""verify connection"""
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, self.start_ws)
try:
await asyncio.wait_for(self._ws.connect(), timeout=10.0)
return True
except asyncio.TimeoutError:
await self.close_ws()
return False
async def wait_for_info(self):
while not self.uuid:
await asyncio.sleep(0.1)
return self.uuid
async def close_ws(self):
loop = asyncio.get_event_loop()
self.is_closed = True
blocking_tasks = [
loop.run_in_executor(None, self.thread.join),
loop.run_in_executor(None, self._ws.close),
]
await asyncio.wait(blocking_tasks)
async def get_uuid(self):
"""Get uuid info"""
loop = asyncio.get_event_loop()
loop.run_in_executor(None, self.get_info)
try:
await asyncio.wait_for(self.wait_for_info(), timeout=5.0)
return self.uuid
except asyncio.TimeoutError:
return
def send_command(self, device_id, command):
self._ws.send(
json.dumps(
{
"event": "httpEncapsulatedRequest",
"data": {
"method": "GET",
"url": "/ZAutomation/api/v1/devices/{}/command/{}".format(
device_id, command
),
},
}
)
)
def get_devices(self):
self._ws.send(
json.dumps(
{
"event": "httpEncapsulatedRequest",
"responseEvent": "get_devices",
"data": {"method": "GET", "url": "/ZAutomation/api/v1/devices"},
}
)
)
def get_device_info(self, device_id):
self._ws.send(
json.dumps(
{
"event": "httpEncapsulatedRequest",
"responseEvent": "get_device_info",
"data": {
"method": "GET",
"url": "/ZAutomation/api/v1/devices/{}".format(device_id),
},
}
)
)
def get_info(self):
self._ws.send(
json.dumps(
{
"event": "httpEncapsulatedRequest",
"responseEvent": "get_info",
"data": {
"method": "GET",
"url": "/ZAutomation/api/v1/system/first-access",
},
}
)
)
def init_websocket(self):
# keep websocket open indefinitely
while True:
if self.is_closed:
return
self._ws = WebsocketListener(
ZWaveMe=self,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
token=self.token,
url=self.url,
)
try:
self._ws.run_forever(ping_interval=5)
finally:
self._ws.close()
time.sleep(5)
def on_message(self, _, utf):
if utf:
dict_data = json.loads(utf)
if "type" not in dict_data.keys():
return
try:
if dict_data["type"] == "get_devices":
if "data" not in dict_data or "body" not in dict_data["data"]:
return
body = json.loads(dict_data["data"]["body"])
if "devices" in body["data"]:
self.devices = prepare_devices([
device
for device in body["data"]["devices"]
if device["deviceType"] in self.platforms
])
if self.on_device_create:
self.on_device_create(self.devices)
elif dict_data["type"] == "get_device_info":
if "data" not in dict_data or "body" not in dict_data["data"]:
return
body = json.loads(dict_data["data"]["body"])
if "id" in body["data"]:
new_device = prepare_devices(
[
body["data"],
]
)[0]
if self.on_new_device:
self.on_new_device(new_device)
elif dict_data["type"] == "me.z-wave.devices.level":
device = prepare_devices(
[
dict_data["data"],
]
)[0]
if device.deviceType == "sensorMultilevel":
device.level = str(
round(float(dict_data["data"]["metrics"]["level"]), 1)
)
if self.on_device_update:
self.on_device_update(device)
elif dict_data["type"] == "me.z-wave.namespaces.update":
for data in dict_data["data"]:
if data["id"] == "devices_all":
new_devices = [x["deviceId"] for x in data["params"]]
devices_to_install = set(new_devices) - set(
[x["id"] for x in self.devices]
)
for device in devices_to_install:
self.get_device_info(device)
elif dict_data["type"] == "get_info":
uuid = json.loads(dict_data["data"]["body"])["data"]["uuid"]
if uuid and uuid is not None:
self.uuid = uuid
elif dict_data["type"] == "me.z-wave.devices.remove":
if self.on_device_remove:
self.on_device_remove(dict_data["data"])
elif dict_data["type"] == "me.z-wave.devices.wipe":
if self.on_device_destroy:
self.on_device_destroy(dict_data["data"])
except Exception as e:
pass
def on_error(self, *args, **kwargs):
error = args[-1]
def on_close(self, _, *args):
self._ws.connected = False
def get_ws(self):
return self._ws
def get_wshost(self):
return self._wshost | zwave-me-ws | /zwave_me_ws-0.4.3-py3-none-any.whl/zwave_me_ws/ZWaveMe.py | ZWaveMe.py |
from dataclasses import dataclass, field
from typing import Union
FIELDS = [
"id",
"deviceType",
"probeType",
"locationName",
"manufacturer",
"firmware",
"tags",
"creatorId",
"nodeId",
]
METRICS_SCALE = ["title", "level", "scaleTitle", "min", "max", "color", "isFailed"]
TYPE_TAGS = {
'type-sensor-binary': "sensorBinary",
'type-light': "lightMultilevel",
'type-button': "toggleButton",
'type-thermostat': "thermostat",
'type-motor': "motor",
'type-fan': "fan",
'type-doorlock': "doorlock",
'type-number': "switchMultilevel",
'type-switch': "switchBinary",
'type-sensor': "sensorMultilevel",
'type-siren': "siren",
}
@dataclass
class ZWaveMeData:
id: str
deviceType: str
title: str
level: Union[str, int, float]
deviceIdentifier: str
probeType: str = ""
scaleTitle: str = ""
min: str = ""
max: str = ""
color: dict = field(default_factory=dict)
isFailed: bool = False
locationName: str = ""
manufacturer: str = ""
firmware: str = ""
tags: list[str] = field(default_factory=list)
nodeId: str = ""
creatorId: str = ""
def prepare_devices(devices: list[dict]) -> list[ZWaveMeData]:
prepared_devices = []
for device in devices:
if device['permanently_hidden']:
continue
prepared_device = {
**{key: device[key] for key in FIELDS if key in device},
**{
key: device["metrics"][key]
for key in METRICS_SCALE
if key in device["metrics"]
},
}
prepared_device = set_device_type(prepared_device)
if prepared_device["deviceType"] == "motor":
if prepared_device["level"] == "off":
prepared_device["level"] = 0
if prepared_device["level"] == "on":
prepared_device["level"] = 99.0
prepared_device["level"] = float(prepared_device["level"])
if "creatorId" in prepared_device and "nodeId" in prepared_device:
prepared_device[
"deviceIdentifier"
] = f"{prepared_device['creatorId']}_{prepared_device['nodeId']}"
else:
prepared_device["deviceIdentifier"] = prepared_device["id"]
prepared_devices.append(prepared_device)
return [ZWaveMeData(**d) for d in prepared_devices]
def set_device_type(prepared_device):
if prepared_device["probeType"] == "siren":
prepared_device["deviceType"] = "siren"
if prepared_device["tags"]:
for tag in prepared_device["tags"]:
if tag in TYPE_TAGS:
prepared_device["deviceType"] = TYPE_TAGS[tag]
prepared_device = set_value_by_device_type(prepared_device)
return prepared_device
if prepared_device["probeType"] == "motor":
prepared_device["deviceType"] = "motor"
elif prepared_device["probeType"] == "fan":
prepared_device["deviceType"] = "fan"
elif prepared_device['deviceType'] == 'sensorMultilevel':
if prepared_device["probeType"] == "light":
prepared_device['deviceType'] = 'lightMultilevel'
prepared_device = set_light_level(prepared_device)
elif prepared_device['deviceType'] == 'switchMultilevel':
prepared_device['deviceType'] = 'lightMultilevel'
prepared_device = set_light_level(prepared_device)
elif 'alarm' in prepared_device["probeType"]:
prepared_device["deviceType"] = "sensorBinary"
prepared_device = set_value_by_device_type(prepared_device)
return prepared_device
def set_value_by_device_type(prepared_device) -> dict:
if prepared_device['deviceType'] == "sensorBinary":
if prepared_device['level'] in ('on', 'off'):
return prepared_device
elif prepared_device['level'] in ('open', 'close'):
prepared_device['level'] = {'open': 'off', 'close': 'on'}[prepared_device['level']]
else:
prepared_device['level'] = 'on' if bool(prepared_device['level']) else 'off'
elif prepared_device['deviceType'] == 'lightMultilevel':
prepared_device = set_light_level(prepared_device)
elif prepared_device['deviceType'] == 'toggleButton':
return prepared_device
elif prepared_device['deviceType'] == 'thermostat':
if str(prepared_device['level']).replace('.', '', 1).isdigit():
return prepared_device
elif prepared_device['level'] == 'on':
prepared_device['level'] = 99
elif prepared_device['level'] == 'off':
prepared_device['level'] = 0
else:
prepared_device['level'] = 99 if bool(prepared_device['level']) else 0
elif prepared_device['deviceType'] == 'motor':
if str(prepared_device['level']).replace('.', '', 1).isdigit():
return prepared_device
elif prepared_device['level'] == 'on':
prepared_device['level'] = 99
elif prepared_device['level'] == 'off':
prepared_device['level'] = 0
else:
prepared_device['level'] = 99 if bool(prepared_device['level']) else 0
elif prepared_device['deviceType'] == 'fan':
if str(prepared_device['level']).replace('.', '', 1).isdigit():
return prepared_device
elif prepared_device['level'] == 'on':
prepared_device['level'] = 99
elif prepared_device['level'] == 'off':
prepared_device['level'] = 0
else:
prepared_device['level'] = 99 if bool(prepared_device['level']) else 0
elif prepared_device['deviceType'] == 'doorlock':
if prepared_device['level'] in ('open', 'close'):
return prepared_device
elif prepared_device['level'] in ('on', 'off'):
prepared_device['level'] = {'off': 'open', 'on': 'close'}[prepared_device['level']]
else:
prepared_device['level'] = 'close' if bool(prepared_device['level']) else 'on'
elif prepared_device['deviceType'] == 'switchMultilevel':
if str(prepared_device['level']).replace('.', '', 1).isdigit():
return prepared_device
elif prepared_device['level'] == 'on':
prepared_device['level'] = 99
elif prepared_device['level'] == 'off':
prepared_device['level'] = 0
else:
prepared_device['level'] = 99 if bool(prepared_device['level']) else 0
elif prepared_device['deviceType'] == 'switchBinary':
if prepared_device['level'] in ('on', 'off'):
return prepared_device
elif prepared_device['level'] in ('open', 'close'):
prepared_device['level'] = {'open': 'off', 'close': 'on'}[prepared_device['level']]
else:
prepared_device['level'] = 'on' if bool(prepared_device['level']) else 'off'
elif prepared_device['deviceType'] == 'sensorMultilevel':
if str(prepared_device['level']).replace('.', '', 1).isdigit():
return prepared_device
elif prepared_device['level'] == 'on':
prepared_device['level'] = 99
elif prepared_device['level'] == 'off':
prepared_device['level'] = 0
else:
prepared_device['level'] = 99 if bool(prepared_device['level']) else 0
elif prepared_device['deviceType'] == 'siren':
if prepared_device['level'] in ('on', 'off'):
return prepared_device
elif prepared_device['level'] in ('open', 'close'):
prepared_device['level'] = {'open': 'off', 'close': 'on'}[prepared_device['level']]
else:
prepared_device['level'] = 'on' if bool(prepared_device['level']) else 'off'
return prepared_device
def set_light_level(prepared_device):
if str(prepared_device['level']).replace('.', '', 1).isdigit():
prepared_device["color"] = {
"r": round(2.55 * float(prepared_device["level"])),
"g": round(2.55 * float(prepared_device["level"])),
"b": round(2.55 * float(prepared_device["level"])),
}
prepared_device["level"] = (
"on" if float(prepared_device["level"]) > 0 else "off"
)
elif prepared_device['level'] == 'on':
prepared_device["color"] = {
"r": 255,
"g": 255,
"b": 255,
}
elif prepared_device['level'] == 'off':
prepared_device["color"] = {
"r": 0,
"g": 0,
"b": 0,
}
else:
prepared_device['level'] = 'on' if bool(prepared_device['level']) else 'off'
if prepared_device['level'] == 'on':
prepared_device["color"] = {
"r": 255,
"g": 255,
"b": 255,
}
elif prepared_device['level'] == 'off':
prepared_device["color"] = {
"r": 0,
"g": 0,
"b": 0,
}
return prepared_device | zwave-me-ws | /zwave_me_ws-0.4.3-py3-none-any.whl/zwave_me_ws/helpers.py | helpers.py |
import json
import threading
from .WebsocketListener import WebsocketListener
import time
class ZWaveMe:
"""Main controller class"""
def __init__(self, on_device_create, on_device_update, on_new_device,
url, token, platforms=None):
self.on_device_create = on_device_create
self.on_device_update = on_device_update
self.on_new_device = on_new_device
self.url = url
self.token = token
self.platforms = platforms
self._ws = None
self._wshost = None
self.start_ws()
self.thread = None
self.devices = []
def start_ws(self):
"""get/find the websocket host"""
self.thread = threading.Thread(target=self.init_websocket)
self.thread.daemon = True
self.thread.start()
def send_command(self, device_id, command):
self._ws.send(
json.dumps(
{
"event": "httpEncapsulatedRequest",
"data": {
"method": "GET",
"url": "/ZAutomation/api/v1/devices/{}/command/{}".format(
device_id, command
)
}
}
)
)
def get_device_info(self, device_id):
self._ws.send(
json.dumps(
{
"event": "httpEncapsulatedRequest",
"data": {
"method": "GET",
"url": "/ZAutomation/api/v1/devices/{}".format(
device_id
)
}
}
)
)
def init_websocket(self):
# keep websocket open indefinitely
while True:
self._ws = WebsocketListener(
ZWaveMe=self,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
token=self.token,
url=self.url,
)
try:
self._ws.run_forever(ping_interval=5)
finally:
self._ws.close()
time.sleep(5)
def on_message(self, _, utf):
if utf:
dict_data = json.loads(utf)
if "type" not in dict_data.keys():
return
try:
if dict_data["type"] == "ws-reply":
body = json.loads(dict_data["data"]["body"])
if "devices" in body["data"]:
self.devices = [
device
for device in body["data"]["devices"]
if device["deviceType"] in self.platforms
]
self.on_device_create(self.devices)
elif "id" in body['data']:
self.on_new_device(body['data'])
elif dict_data["type"] == "me.z-wave.devices.level":
self.on_device_update(dict_data["data"])
elif dict_data["type"] == "me.z-wave.namespaces.update":
for data in dict_data['data']:
if data['id'] == 'devices_all':
new_devices = [x['deviceId'] for x in data['params']]
devices_to_install = set(new_devices)-set([x['id'] for x in self.devices])
for device in devices_to_install:
self.get_device_info(device)
except Exception as e:
pass
def on_error(self, *args, **kwargs):
error = args[-1]
def on_close(self, _, *args):
self._ws.connected = False
def get_ws(self):
return self._ws
def get_wshost(self):
return self._wshost | zwave-ws | /zwave_ws-0.1.15-py3-none-any.whl/zwave_ws/ZWaveMe.py | ZWaveMe.py |
Zweifach
========
Zweifach (german for "two times") is an app to make integration of django-otp a bit more biased.
Integration of two factor auth is enforced by a middleware which will ensure two things:
- make sure a user who is required to enable 2FA for its account will be redirected to the setup-view until setup is done.
- make sure a user who has 2FA enabled will be redirected to verify-view for token input after login until verified.
Quickstart
----------
- Install packages by running 'pip install zweifach django-otp qrcode'
- Add 'zweifach' to INSTALLED_APPS.
- Add 'zweifach.middleware.ZweifachMiddleware' to MIDDLEWARE, *after* AuthenticationMiddleware.
- Inlcude 'zweifach.urls' somewhere in your url-config.
- Configure django-otp as described further down below
Settings
--------
settings.ZWEIFACH_AUTH_REQUIRED
default: []
A list of checks which determine, if a user needs 2FA to use its account.
examaple::
ZWEIFACH_AUTH_REQUIRED = [
lambda user: user.is_staff, # all staff unsers must use two factor auth
lambda user: '2fa' in user.groups.values_list("name", flat=True), # all users in group '2fa' must use two factor auth
]
settings.ZWEIFACH_URL_EXCLUDES
default: []
A list of url which are always accessible without 2FA.
Verify and Setup views are always excluded as well as settings.LOGIN_URL and the admin login view, if admin is enabled.
example::
ZWEIFACH_URL_EXCLUDES = [
'/imprint/',
'/faq/how-to-setup-2fa/',
]
Note: If a url is accessible without login, it can of course still be viewed without any 2FA interaction.
Notes about django-otp configuration
------------------------------------
A compatible installation of django-otp should be setup as follows:
Add to INSTALLED_APPS::
'django_otp',
'django_otp.plugins.otp_totp',
'django_otp.plugins.otp_static',
Add to MIDDLEWARE (between AuthenticationMiddleware and ZweifachMiddleware)::
'django_otp.middleware.OTPMiddleware'
Configure issuer::
OTP_TOTP_ISSUER = 'MyProject'
Usage
-----
To generate static recovery tokens (also useful for first login on freshly installed systems) use::
./manage.py addstatictoken <username>
Development
-----------
Ensure basic code style with::
tox
Build package with::
python3 -m build
Upload package to PyPI::
python3 -m twine upload dist/zweifach-x.x.x*
| zweifach | /zweifach-1.0.3.tar.gz/zweifach-1.0.3/README.rst | README.rst |
from __future__ import unicode_literals
import os
import sys
import ast
from io import StringIO
from contextlib import contextmanager
from itertools import chain
from functools import reduce
__version__ = '0.1.0'
__version_info__ = (0, 1, 0)
PY2 = sys.version_info[0] == 2
def walk_preorder(tree):
"""
Yields the nodes in the `tree` in preorder.
"""
yield tree
for child in ast.iter_child_nodes(tree):
for descendent in walk_preorder(child):
yield descendent
def to_source(tree):
"""
Returns the Python source code representation of the `tree`.
"""
writer = _SourceWriter()
writer.visit(tree)
return writer.output.getvalue()
class _SourceWriter(ast.NodeVisitor):
def __init__(self):
self.output = StringIO()
self.indentation_level = 0
self.newline = True
@contextmanager
def indented(self):
self.indentation_level += 1
try:
yield
finally:
self.indentation_level -= 1
def write(self, source):
if self.newline:
self.newline = False
self.write_indentation()
self.output.write(source)
def write_indentation(self):
self.write(' ' * self.indentation_level)
def write_newline(self):
if self.newline:
self.newline = False
self.write('\n')
self.newline = True
def write_line(self, source):
self.write(source)
self.write_newline()
def write_identifier(self, identifier):
if PY2:
self.write(identifier.decode('ascii'))
else:
self.write(identifier)
def write_repr(self, obj):
if PY2:
self.write(repr(obj).decode('ascii'))
else:
self.write(repr(obj))
def writing_comma_separated(self, items):
if items:
for item in items[:-1]:
yield item
self.write(', ')
yield items[-1]
def write_comma_separated_nodes(self, nodes):
for node in self.writing_comma_separated(nodes):
self.visit(node)
@contextmanager
def writing_statement(self):
yield
self.write_newline()
def visit_statements(self, statements):
for statement in statements[:-1]:
self.visit(statement)
if isinstance(statement, (ast.FunctionDef, ast.ClassDef)):
self.write_newline()
self.visit(statements[-1])
def visit_Module(self, node):
self.visit_statements(node.body)
def visit_FunctionDef(self, node):
for decorator in node.decorator_list:
self.write('@')
self.visit(decorator)
self.write_newline()
self.write('def ')
self.write_identifier(node.name)
self.write('(')
self.visit(node.args)
self.write(')')
if not PY2 and node.returns is not None:
self.write(' -> ')
self.visit(node.returns)
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
def visit_ClassDef(self, node):
for decorator in node.decorator_list:
self.write('@')
self.visit(decorator)
self.write_newline()
self.write('class ')
self.write_identifier(node.name)
if (
node.bases or
(not PY2 and (node.keywords or node.starargs or node.kwargs))
):
self.write('(')
self.write_comma_separated_nodes(node.bases)
if not PY2:
if node.keywords:
if node.bases:
self.write(', ')
self.write_comma_separated_nodes(node.keywords)
if node.starargs is not None:
if node.bases or node.keywords:
self.write(', ')
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
if node.bases or node.keywords or node.starargs:
self.write(', ')
self.write('**')
self.visit(node.kwargs)
self.write(')')
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
def visit_Return(self, node):
with self.writing_statement():
self.write('return')
if node.value:
self.write(' ')
self.visit(node.value)
def visit_Delete(self, node):
with self.writing_statement():
self.write('del ')
self.write_comma_separated_nodes(node.targets)
def visit_Assign(self, node):
with self.writing_statement():
for target in node.targets:
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
with self.writing_statement():
self.visit(node.target)
self.write(' ')
self.visit(node.op)
self.write('= ')
self.visit(node.value)
if PY2:
def visit_Print(self, node):
with self.writing_statement():
self.write('print')
if node.values:
self.write(' ')
self.write_comma_separated_nodes(node.values)
if not node.nl:
self.write(',')
def visit_For(self, node):
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
if node.orelse:
self.write_line('else:')
with self.indented():
self.visit_statements(node.orelse)
def visit_While(self, node):
self.write('while ')
self.visit(node.test)
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
if node.orelse:
self.write_line('else:')
with self.indented():
self.visit_statements(node.orelse)
def visit_If(self, node):
self.write('if ')
self.visit(node.test)
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
if node.orelse:
self.write_line('else:')
with self.indented():
self.visit_statements(node.orelse)
def visit_With(self, node):
self.write('with ')
if PY2:
self.visit(node.context_expr)
if node.optional_vars:
self.write(' as ')
self.visit(node.optional_vars)
else:
self.write_comma_separated_nodes(node.items)
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
def visit_Raise(self, node):
with self.writing_statement():
self.write('raise')
if PY2:
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
else:
if node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
def visit_Try(self, node):
self.write_line('try:')
with self.indented():
self.visit_statements(node.body)
for excepthandler in node.handlers:
self.visit(excepthandler)
if node.orelse:
self.write_line('else:')
with self.indented():
self.visit_statements(node.orelse)
if node.finalbody:
self.write_line('finally:')
with self.indented():
self.visit_statements(node.finalbody)
if PY2:
def visit_TryExcept(self, node):
self.write_line('try:')
with self.indented():
self.visit_statements(node.body)
for excepthandler in node.handlers:
self.visit(excepthandler)
if node.orelse:
self.write_line('else:')
with self.indented():
self.visit_statements(node.orelse)
def visit_TryFinally(self, node):
self.write_line('try:')
with self.indented():
self.visit_statements(node.body)
self.write_line('finally:')
with self.indented():
self.visit_statements(node.finalbody)
def visit_Assert(self, node):
with self.writing_statement():
self.write('assert ')
self.visit(node.test)
if node.msg is not None:
self.write(', ')
self.visit(node.msg)
def visit_Import(self, node):
with self.writing_statement():
self.write('import ')
self.write_comma_separated_nodes(node.names)
def visit_ImportFrom(self, node):
with self.writing_statement():
self.write('from ')
if node.module is None:
self.write('.')
else:
self.write_identifier(node.module)
self.write(' import ')
self.write_comma_separated_nodes(node.names)
def visit_Global(self, node):
with self.writing_statement():
self.write('global ')
for name in self.writing_comma_separated(node.names):
self.write_identifier(name)
def visit_Nonlocal(self, node):
with self.writing_statement():
self.write('nonlocal ')
for name in self.writing_comma_separated(node.names):
self.write_identifier(name)
def visit_Expr(self, node):
with self.writing_statement():
self.visit(node.value)
def visit_Pass(self, node):
self.write_line('pass')
def visit_Break(self, node):
self.write_line('break')
def visit_Continue(self, node):
self.write_line('continue')
def visit_BoolOp(self, node):
def write_value(value):
if _requires_parentheses(node, value):
self.write('(')
self.visit(value)
self.write(')')
else:
self.visit(value)
for value in node.values[:-1]:
write_value(value)
self.visit(node.op)
write_value(node.values[-1])
def visit_BinOp(self, node):
if (
_requires_parentheses(node, node.left) or
PY2 and isinstance(node.left, ast.Num) and node.left.n < 0
):
self.write('(')
self.visit(node.left)
self.write(')')
else:
self.visit(node.left)
self.write(u' ')
self.visit(node.op)
self.write(u' ')
if _requires_parentheses(
ast.Mult() if isinstance(node.op, ast.Pow) else node,
node.right
):
self.write('(')
self.visit(node.right)
self.write(')')
else:
self.visit(node.right)
def visit_UnaryOp(self, node):
self.visit(node.op)
if _requires_parentheses(node, node.operand):
self.write('(')
self.visit(node.operand)
self.write(')')
else:
self.visit(node.operand)
def visit_Lambda(self, node):
self.write('lambda ')
self.visit(node.args)
self.write(': ')
self.visit(node.body)
def visit_IfExp(self, node):
if _requires_parentheses(node, node.body):
self.write('(')
self.visit(node.body)
self.write(')')
else:
self.visit(node.body)
self.write(' if ')
if _requires_parentheses(node, node.test):
self.write('(')
self.visit(node.test)
self.write(')')
else:
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Dict(self, node):
self.write('{')
items = list(zip(node.keys, node.values))
for key, value in self.writing_comma_separated(items):
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_Set(self, node):
self.write('{')
self.write_comma_separated_nodes(node.elts)
self.write('}')
def visit_ListComp(self, node):
self.write('[')
self.visit(node.elt)
for generator in node.generators:
self.visit(generator)
self.write(']')
def visit_SetComp(self, node):
self.write('{')
self.visit(node.elt)
for generator in node.generators:
self.visit(generator)
self.write('}')
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for generator in node.generators:
self.visit(generator)
self.write('}')
def visit_GeneratorExp(self, node):
self.write('(')
self.visit(node.elt)
for generator in node.generators:
self.visit(generator)
self.write(')')
def visit_Yield(self, node):
self.write('yield')
if node.value is not None:
self.write(' ')
self.visit(node.value)
def visit_YieldFrom(self, node):
self.write('yield from ')
self.visit(node.value)
def visit_Compare(self, node):
self.visit(node.left)
for op, comparator in zip(node.ops, node.comparators):
self.write(' ')
self.visit(op)
self.write(' ')
self.visit(comparator)
def visit_Call(self, node):
if _requires_parentheses(node, node.func):
self.write('(')
self.visit(node.func)
self.write(')')
else:
self.visit(node.func)
self.write('(')
self.write_comma_separated_nodes(node.args)
if node.keywords:
if node.args:
self.write(', ')
self.write_comma_separated_nodes(node.keywords)
if node.starargs is not None:
if node.args or node.keywords:
self.write(', ')
self.write('*')
self.visit(node.starargs)
if node.kwargs:
if node.args or node.keywords or node.starargs:
self.write(', ')
self.write('**')
self.visit(node.kwargs)
self.write(')')
if PY2:
def visit_Repr(self, node):
self.write('`')
self.visit(node.value)
self.write('`')
def visit_Num(self, node):
self.write_repr(node.n)
def visit_Str(self, node):
self.write_repr(node.s)
def visit_Bytes(self, node):
self.write_repr(node.s)
def visit_Ellipsis(self, node):
self.write('...')
def visit_Attribute(self, node):
if (
_requires_parentheses(node, node.value) and
not isinstance(node.value, ast.Attribute)
):
self.write('(')
self.visit(node.value)
self.write(')')
else:
self.visit(node.value)
self.write('.')
self.write_identifier(node.attr)
def visit_Subscript(self, node):
if (
_requires_parentheses(node, node.value) and
not isinstance(node.value, ast.Subscript)
):
self.write('(')
self.visit(node.value)
self.write(')')
else:
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Name(self, node):
self.write_identifier(node.id)
def visit_List(self, node):
self.write('[')
self.write_comma_separated_nodes(node.elts)
self.write(']')
def visit_Tuple(self, node):
self.write_comma_separated_nodes(node.elts)
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
if node.lower is None:
self.write(':')
self.visit(node.upper)
if node.step is not None:
if node.lower is None and node.upper is None:
self.write('::')
if node.lower is not None or node.upper is not None:
self.write(':')
self.visit(node.step)
if node.lower is None and node.upper is None and node.step is None:
self.write(':')
def visit_And(self, node):
self.write(' and ')
def visit_Or(self, node):
self.write(' or ')
def visit_Add(self, node):
self.write('+')
def visit_Sub(self, node):
self.write('-')
def visit_Mult(self, node):
self.write('*')
def visit_Div(self, node):
self.write('/')
def visit_Mod(self, node):
self.write('%')
def visit_Pow(self, node):
self.write('**')
def visit_LShift(self, node):
self.write('<<')
def visit_RShift(self, node):
self.write('>>')
def visit_BitOr(self, node):
self.write('|')
def visit_BitXor(self, node):
self.write('^')
def visit_BitAnd(self, node):
self.write('&')
def visit_FloorDiv(self, node):
self.write('//')
def visit_Invert(self, node):
self.write('~')
def visit_Not(self, node):
self.write('not ')
def visit_UAdd(self, node):
self.write('+')
def visit_USub(self, node):
self.write('-')
def visit_Eq(self, node):
self.write('==')
def visit_NotEq(self, node):
self.write('!=')
def visit_Lt(self, node):
self.write('<')
def visit_LtE(self, node):
self.write('<=')
def visit_Gt(self, node):
self.write('>')
def visit_GtE(self, node):
self.write('>=')
def visit_Is(self, node):
self.write('is')
def visit_IsNot(self, node):
self.write('is not')
def visit_In(self, node):
self.write('in')
def visit_NotIn(self, node):
self.write('not in')
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
self.write(' if ')
for filter in node.ifs[:-1]:
self.visit(filter)
self.write(' if ')
self.visit(node.ifs[-1])
def visit_ExceptHandler(self, node):
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
if PY2:
self.visit(node.name)
else:
self.write(node.name)
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
def visit_arguments(self, node):
if node.args:
if node.defaults:
non_defaults = node.args[:-len(node.defaults)]
defaults = node.args[-len(node.defaults):]
else:
non_defaults = node.args
defaults = []
if non_defaults:
self.write_comma_separated_nodes(non_defaults)
if defaults:
if non_defaults:
self.write(', ')
for argument, default in zip(defaults, node.defaults):
self.visit(argument)
self.write('=')
self.visit(default)
if node.vararg:
if node.args:
self.write(', ')
self.write('*')
self.write_identifier(node.vararg)
if not PY2 and node.kwonlyargs:
if not node.vararg:
self.write('*, ')
arguments = list(zip(node.kwonlyargs, node.kw_defaults))
if arguments:
for argument, default in self.writing_comma_separated(arguments):
self.visit(argument)
if default is not None:
self.write('=')
self.visit(default)
if node.kwarg:
if node.args or node.vararg or (not PY2 and node.kwonlyargs):
self.write(', ')
self.write('**')
self.write_identifier(node.kwarg)
def visit_arg(self, node):
self.write(node.arg)
if node.annotation is not None:
self.write(': ')
self.visit(node.annotation)
def visit_keyword(self, node):
self.write_identifier(node.arg)
self.write('=')
self.visit(node.value)
def visit_alias(self, node):
self.write_identifier(node.name)
if node.asname is not None:
self.write(' as ')
self.write_identifier(node.asname)
def visit_withitem(self, node):
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
_precedence_tower = [
{ast.Lambda},
{ast.IfExp},
{ast.Or},
{ast.And},
{ast.Not},
{
ast.In, ast.NotIn, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.Gt, ast.GtE,
ast.NotEq, ast.Eq
},
{ast.BitOr},
{ast.BitXor},
{ast.BitAnd},
{ast.LShift, ast.RShift},
{ast.Add, ast.Sub},
{ast.Mult, ast.Div, ast.FloorDiv, ast.Mod},
{ast.UAdd, ast.USub, ast.Invert},
{ast.Pow},
{ast.Subscript, ast.Call, ast.Attribute},
{
ast.Tuple, ast.List, ast.Dict, ast.Set, ast.ListComp, ast.DictComp,
ast.SetComp
}
]
_all_nodes = set(chain.from_iterable(_precedence_tower))
_node2lower_equal_upper_nodes = {}
lower = set()
for i, nodes in enumerate(_precedence_tower):
lower = reduce(set.union, _precedence_tower[:i], set())
upper = reduce(set.union, _precedence_tower[i + 1:], set())
for node in nodes:
_node2lower_equal_upper_nodes[node] = (lower, nodes, upper)
def _requires_parentheses(parent, child):
def _normalize(obj):
if isinstance(obj, (ast.BoolOp, ast.BinOp, ast.UnaryOp)):
return obj.op.__class__
return obj.__class__
parent, child = _normalize(parent), _normalize(child)
lower, equal = _node2lower_equal_upper_nodes[parent][:2]
return child in lower | equal
def dump(node, annotate_fields=True, include_attributes=False):
"""
Like :func:`ast.dump` but with a more readable return value, making the
output actually useful for debugging purposes.
"""
def _format(node, level=0):
if isinstance(node, ast.AST):
fields = [
(name, _format(value, level))
for name, value in ast.iter_fields(node)
]
if include_attributes and node._attributes:
fields.extend((name, _format(getattr(node, name), level))
for name in node._attributes)
return '{}({})'.format(
node.__class__.__name__,
', '.join(
map('='.join, fields)
if annotate_fields else
(value for _, value in fields)
)
)
elif isinstance(node, list):
if node:
indentation = ' ' * (level + 1)
lines = ['[']
lines.extend(
indentation + _format(n, level + 1) + ',' for n in node
)
lines.append(indentation + ']')
return '\n'.join(lines)
return '[]'
return repr(node).decode('ascii') if PY2 else repr(node)
if not isinstance(node, ast.AST):
raise TypeError(
'expected AST, got {!r}'.format(node.__class__.__name__)
)
return _format(node)
def is_possible_target(node):
"""
Returns `True`, if the `node` could be a target for example in an
assignment statement ignoring the expression contexts.
"""
return (
isinstance(node, (ast.Name, ast.Subscript, ast.Attribute)) or
isinstance(node, (ast.Tuple, ast.List)) and
all(
is_possible_target(element) or
not PY2 and
isinstance(element, ast.Starred) and
is_possible_target(element.value)
for element in node.elts
)
)
def set_target_contexts(node):
"""
Given a node that could be a target, sets the `.ctx` attribute to
:class:`ast.Store` instances as appropriate.
"""
node.ctx = ast.Store()
if isinstance(node, (ast.Tuple, ast.List)):
for element in node.elts:
set_target_contexts(element)
elif not PY2 and isinstance(node, ast.Starred):
set_target_contexts(node.value) | zweig | /zweig-0.1.0.tar.gz/zweig-0.1.0/zweig.py | zweig.py |
from typing import Callable, Dict, Union
import sys
import traceback
import warnings
import os
import io
import struct
import fnmatch
from pathlib import Path
import docopt
from hurry.filesize import size as human_readable_size
__version__ = '0.0.0'
WAD_MAGIC = b'ZWF!'
docopt_usage = __doc__ + """
usage:
zwf list [options] <file>
zwf extract [options] <file> <dir> [<glob>]
options:
-l List more information
-H Show file sizes in human-readable form
--verbose Print more info
--traceback Print stack trace on errors
"""
class CommandError(RuntimeError):
pass
def read_wad(f: io.RawIOBase):
f.seek(0)
header = f.read(256)
if header[:4] != WAD_MAGIC:
raise CommandError(
f'File does not appear to be a Zwift WAD file, Expected '
f'magic: {WAD_MAGIC}, actual: {header[:4]}')
body_size = struct.unpack('<I', header[248:252])[0]
wad_size = 256 + body_size
actual_size = os.fstat(f.fileno()).st_size
if actual_size < wad_size:
raise CommandError(f'Truncated wad file: header implies '
f'{wad_size} bytes but file is {actual_size} bytes')
if actual_size > wad_size:
warnings.warn(
f'wad file is larger than header implies. expected size: '
f'{actual_size} bytes, actual size: {actual_size} bytes')
entry_pointers = read_entry_pointers(f)
return {'file': f, 'entry_pointers': entry_pointers}
def read_entry_pointers(f):
# There's a 8k block containing 8-byte chunks. First 4 bytes are a pointer
# to a wad file entry, second 4 bytes seem to be either 0 or 1. When 0 the
# pointer is null and the entry seems not to be used. Null and active
# entries are spread throughout.
data = f.read(1024*8)
entries = list(struct.iter_unpack('<I?xxx', data))
offset = min(ptr for ptr, in_use in entries if in_use) - 1024*8 - 256
assert offset != 0
return [ptr - offset for ptr, in_use in entries if in_use]
def cstring(data):
end = data.index(b'\x00')
if end < 0:
return data
return data[:end]
def read_wad_entry(wad, ptr,
include_body: Union[bool, Callable[[Dict], bool]] = True):
f = wad['file']
assert ptr in wad['entry_pointers']
f.seek(ptr)
header = f.read(192)
# Not sure what encoding (if any) is used
path = cstring(header[4:100]).decode('ascii')
size = struct.unpack('<I', header[104:108])[0]
entry = {
'path': path,
'size': size
}
if callable(include_body) and include_body(entry) or include_body:
entry['body'] = f.read(size)
return entry
def list_wad(wad, long_listing=False, human_readable_sizes=False):
for ptr in wad['entry_pointers']:
entry = read_wad_entry(wad, ptr, include_body=False)
if long_listing:
if human_readable_sizes:
size = human_readable_size(entry['size'])
else:
size = entry['size']
print(f'{size} {entry["path"]}')
else:
print(entry["path"])
def extract_wad(wad, dest_dir: Path, entry_predicate: Callable[[Dict], bool],
verbose=False):
if not dest_dir.is_dir():
raise CommandError(
f'Destination directory is not an existing directory')
if next(dest_dir.iterdir(), None) is not None:
raise CommandError(f'Destination dir is not empty')
for ptr in wad['entry_pointers']:
entry = read_wad_entry(wad, ptr, include_body=entry_predicate)
if 'body' not in entry:
continue
entry_path = dest_dir / entry['path']
if dest_dir not in entry_path.parents:
raise CommandError(f'Entry would extract out of destination '
f'directory: {entry["path"]!r}')
if verbose:
print(f' extracting: {entry["path"]} ... ', end='', flush=True)
entry_path.parent.mkdir(parents=True, exist_ok=True)
with open(entry_path, 'wb') as f:
f.write(entry['body'])
if verbose:
print(f'done')
def main():
args = docopt.docopt(docopt_usage)
try:
f = open(args['<file>'], 'rb')
wad = read_wad(f)
if args['list']:
list_wad(wad, long_listing=args['-l'],
human_readable_sizes=args['-H'])
elif args['extract']:
dest = Path(args['<dir>'])
if args['--verbose']:
print(f' Zwift WAD: {args["<file>"]}')
print(f'Destination: {dest}')
predicate = lambda x: True
if args['<glob>']:
glob = args['<glob>']
predicate = lambda entry: fnmatch.fnmatchcase(entry['path'],
glob)
extract_wad(wad, dest, entry_predicate=predicate,
verbose=args['--verbose'])
else:
raise NotImplementedError()
except CommandError as e:
print(f'Fatal: {e}', file=sys.stderr)
if args['--traceback']:
print('\nTraceback follows:\n', file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
if __name__ == '__main__':
main() | zwf | /zwf-0.1.0.tar.gz/zwf-0.1.0/zwf.py | zwf.py |
MIT License
Copyright (c) 2021 damon anton permezel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| zwi | /zwi-0.3a1.tar.gz/zwi-0.3a1/LICENSE.md | LICENSE.md |
[](https://zwift.com/)
# zwi
Here are some small python programmes to facilitate viewing Zwift data.
# requirements
a Zwift user account
python3.9 or later
pip3
# usage
The easist way to use this is to use `pip3 install` to install
everything you need. Prior to that, you should probably have done
something like:
ZWI_ENV=/tmp/zwi_env
python3 --version
python3 -m venv ${ZWI_ENV}
. ${ZWI_ENV}/bin/activate
At this point, you will have a dedicated play area to install `zwi`
and dependencies.
pip3 install zwi
This will install everything required, and place two command scripts
in `${ZWI_ENV}/bin` which will be in your `${PATH}` while the
environjment is activated.
zwi --help
## version
zwi version
The `version` function returns the version number of the currently
installewd `zwi` package.
## authentication
Before you can do much of interest, you must store your authentication
information in the local key store. For OsX, this is the system
keychain. For linux, it is something similar.
zwi auth --help
zwi auth [email protected] --password=SuperSecret123
zwi auth
You can have the `auth` programme prompt for both `name` and
`password`. You need to enter the password twice, in that case, and
it will not be stored unless it successfully aithenticates with the
Zwift server.
## verification
zwi check --help
zwi check
The `check` function will verify that the stored credentials function.
At times on MacOS, the keychain decides it doesn't like you any more
and requires you to enter your login password, twice, whenever `zwi`
access the stored user name and password. Make sure you click on
`always allow` unless you need to practice typing your password.
## initialise/reset database
Once you are authenticated, the next step is to populate the local
database cache with information from Zwift. `zwi` maintains state in
`${HOME}/.zwi/`. An `sqlite3` database is used to cache the state of
the user's `followers` and `followees` lists. In addition, the
profiles of all Zwift users encountered (via the
`followers`/`followees` lists) are saved in a separate database.
zwi reset --help
zwi reset
The `reset` function deletes the `${HOME}/.zwi/zwi.db` database file
if it exists, creates the `sqlite3` database, and populates the
database with the `followers` and `followees` tables.
It will not delete the profiles database, but it will ensure that
there are profile entries for each user in the `followers` and
`followees` lists.
## update followers/followees database
zwi update --help
zwi -v update
The `update` function refreshes the `followers` and `followees` information.
(Currently, this function is being fleshed out. It does not yet
report any differences. Also, it fails to process deletions.)
## update profile database
zwi pro-update --help
zwi [-v] pro-update [--force]
The `pro-update` function will update the local DB profile cache using
information in the local Zwift user `followers` and `followees` DB
cache.
## list profile database entries
zwi pro-list --help
zwi pro-list
The profiles list can be displayed.
## bokeh
zwibok serve [--port=#]
The `profile` database can be viewed using the `zwibok` app.
This will pop up a page on your browser allowing you to explore
various attributes of the users in the `profile` data base.
It should be more or less obvious. Eventually I might try to write
some usage info, but as it is more or less a proof-of-concept, it
might change again soon.
Basically, it presents an X/Y plot of subsets of the data. You can
select different data columns for X and Y. You can adjust range
sliders to reduce the set of data points in the plot.
Male-only or female-only or both can be selected.
The cross-hairs of the cursor select users and display some more info
pertaining to the user.
## gui
zwi gui --help
zwi gui
The `gui` function pops up a window displaying data from the local
database copy of the Zwift `followers` and `followees` tables.
This was my second attempt at writing a gui to view some of the
data. Currently, it only displays information from the `followers` and
`followees` lists.
Key Bingings (for OSX):
CMD-1 Switch to `followers` table.
CMD-2 Switch to `followees` table.
CMD-a Toggle `auto` mode.
CMD-n Move to next entry.
CMD-p Move to previous entry.
CMD-f Search (not yet implemented).
CMD-q Quit
If `auto` mode is enabled:
CMD-n increase interval
CMD-p decrease interval
The slider at the bottom can be used to move rapidly thru the list.
For Linux, it appears the key bindings map to the CTRL key. The menu items will indicate whatever it is.
## followees
zwi wees --help
zwi wees
The `wees` function will check the cached followees list (them who's followed).
Any subject who is being followed but who is not reciprocating is displayed.
You will have to manually search for the user in the Zwift companion and decide what punishment to hand out.
## followers
zwi wers --help
zwi wers
The `wers` function will check the cached followers list and display any lacking reciprocity.
Certain users will follow you, but not respond to reciprocal follow requests, remaining forever in limbo.
One can always try unfollowing/refollowing to see if the recalcitrant is interested in reciprocity.
As above, as far as I know, one has to use the Zwift companion app to search by name.
## inspect other user's public information.
Per the Zwift privacy policy, various data are publicly accessible. The `inspect` command
facilitates examination of the publicly available data.
zwi inspect --help
zwi inspect --zid=ZwiftUser
zwi -v inspect --zid=ZwiftUser --update
## removing authentication information
The `clear` function will remove any cached user/password information from the keystore.
# development
I have been using `anaconda` on `OsX` for development. Supposedly, this will install things
to facilitate development:
conda env create -f environment.yml
conda activate zwi
flit install --symlink
pip3 install zwift-client
pip3 install PyQt5
# hints
When manually deleting followees, using the Zwift companion app, and
searching by name, I find it helps to type in the bits of the name
which are more likely to be unique, so as to limit the lists
presented.
# user feedback
## issues
If you have any problems with or questions about this image, please contact me
through a [GitHub issue](https://github.com/permezel/zwi/issues).
| zwi | /zwi-0.3a1.tar.gz/zwi-0.3a1/README.md | README.md |
=======================
Zwift Mobile API client
=======================
.. image:: https://img.shields.io/pypi/v/zwift-client.svg
:target: https://pypi.python.org/pypi/zwift-client
.. image:: https://img.shields.io/travis/jsmits/zwift-client.svg
:target: https://travis-ci.org/jsmits/zwift-client
.. image:: https://pyup.io/repos/github/jsmits/zwift-client/shield.svg
:target: https://pyup.io/repos/github/jsmits/zwift-client/
:alt: Updates
Zwift Mobile API client written in Python. Heavily inspired by zwift-mobile-api_.
Installation
------------
::
$ pip install zwift-client
Usage
-----
Client
++++++
::
>>> from zwift import Client
>>> username = 'your-username'
>>> password = 'your-password'
>>> player_id = your-player-id
>>> client = Client(username, password)
Profile
+++++++
::
>>> profile = client.get_profile()
>>> profile.profile # fetch your profile data
>>> profile.followers
>>> profile.followees
>>> profile.get_activities() # metadata of your activities
>>> profile.latest_activity # metadata of your latest activity
Activity
++++++++
::
>>> activity = client.get_activity(player_id)
>>> activities = activity.list() # your activities (default start is 0, default limit is 20)
>>> activities = activity.list(start=20, limit=50)
>>> latest_activity_id = activities[0]['id']
>>> activity.get_activity(latest_activity_id) # metadata of your latest activity
>>> activity.get_data(latest_activity_id) # processed FIT file data
World
+++++
::
>>> world = client.get_world(1) # get world with id 1
>>> world.players # players currently present in this world
>>> world.player_status(player_id) # current player status information like speed, cadence, power, etc.
Credits
---------
This package was created with cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
.. _zwift-mobile-api: https://github.com/Ogadai/zwift-mobile-api
| zwift-client | /zwift-client-0.2.0.tar.gz/zwift-client-0.2.0/README.rst | README.rst |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/jsmits/zwift-client/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
and "help wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
Zwift Mobile API client could always use more documentation, whether as part of the
official Zwift Mobile API client docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/jsmits/zwift-client/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `zwift-client` for local development.
1. Fork the `zwift-client` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/zwift-client.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zwift-client
$ cd zwift-client/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 zwift tests
$ python setup.py test or py.test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.7, 3.4, 3.5 and 3.6, and for PyPy. Check
https://travis-ci.org/jsmits/zwift-client/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ py.test tests.test_zwift
| zwift-client | /zwift-client-0.2.0.tar.gz/zwift-client-0.2.0/CONTRIBUTING.rst | CONTRIBUTING.rst |
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install Zwift Mobile API client, run this command in your terminal:
.. code-block:: console
$ pip install zwift-client
This is the preferred method to install Zwift Mobile API client, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for Zwift Mobile API client can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/jsmits/zwift-client
Or download the `tarball`_:
.. code-block:: console
$ curl -OL https://github.com/jsmits/zwift-client/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/jsmits/zwift-client
.. _tarball: https://github.com/jsmits/zwift-client/tarball/master
| zwift-client | /zwift-client-0.2.0.tar.gz/zwift-client-0.2.0/docs/installation.rst | installation.rst |
from __future__ import print_function
import time
import requests
class AuthToken:
def __init__(self, username, password):
self.username = username
self.password = password
# variables from the API response
self.access_token = None
self.expires_in = None
self.id_token = None
self.not_before_policy = None
self.refresh_token = None
self.refresh_expires_in = None
self.session_state = None
self.token_type = None
# absolute expire times of the access and refresh tokens
self.access_token_expiration = None
self.refresh_token_expiration = None
def fetch_token_data(self):
if self.have_valid_refresh_token():
data = {
"refresh_token": self.refresh_token,
"grant_type": "refresh_token",
}
else:
data = {
"username": self.username,
"password": self.password,
"grant_type": "password",
}
data['client_id'] = "Zwift_Mobile_Link"
r = requests.post(
'https://secure.zwift.com/auth/realms/zwift/tokens/access/codes',
data=data)
if not r.ok:
# TODO: handle exceptions
pass
return r.json()
def update_token_data(self):
"""Parse the access token response."""
token_data = self.fetch_token_data()
now = time.time()
for key, value in token_data.items():
key = key.replace('-', '_')
setattr(self, key, value)
self.access_token_expiration = now + self.expires_in - 5
self.refresh_token_expiration = now + self.refresh_expires_in - 5
def have_valid_access_token(self):
if not self.access_token or time.time() > self.access_token_expiration:
return False
else:
return True
def have_valid_refresh_token(self):
if (not self.refresh_token or
time.time() > self.refresh_token_expiration):
return False
else:
return True
def get_access_token(self):
if self.have_valid_access_token():
return self.access_token
else:
self.update_token_data()
return self.access_token | zwift-client | /zwift-client-0.2.0.tar.gz/zwift-client-0.2.0/zwift/auth.py | auth.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='zwift_messages.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x14zwift_messages.proto\"\xc6\x03\n\x0bPlayerState\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x11\n\tworldTime\x18\x02 \x01(\x03\x12\x10\n\x08\x64istance\x18\x03 \x01(\x05\x12\x10\n\x08roadTime\x18\x04 \x01(\x05\x12\x0c\n\x04laps\x18\x05 \x01(\x05\x12\r\n\x05speed\x18\x06 \x01(\x05\x12\x14\n\x0croadPosition\x18\x08 \x01(\x05\x12\x12\n\ncadenceUHz\x18\t \x01(\x05\x12\x11\n\theartrate\x18\x0b \x01(\x05\x12\r\n\x05power\x18\x0c \x01(\x05\x12\x0f\n\x07heading\x18\r \x01(\x03\x12\x0c\n\x04lean\x18\x0e \x01(\x05\x12\x10\n\x08\x63limbing\x18\x0f \x01(\x05\x12\x0c\n\x04time\x18\x10 \x01(\x05\x12\x0b\n\x03\x66\x31\x39\x18\x13 \x01(\x05\x12\x0b\n\x03\x66\x32\x30\x18\x14 \x01(\x05\x12\x10\n\x08progress\x18\x15 \x01(\x05\x12\x17\n\x0f\x63ustomisationId\x18\x16 \x01(\x03\x12\x14\n\x0cjustWatching\x18\x17 \x01(\x05\x12\x10\n\x08\x63\x61lories\x18\x18 \x01(\x05\x12\t\n\x01x\x18\x19 \x01(\x02\x12\x10\n\x08\x61ltitude\x18\x1a \x01(\x02\x12\t\n\x01y\x18\x1b \x01(\x02\x12\x17\n\x0fwatchingRiderId\x18\x1c \x01(\x05\x12\x0f\n\x07groupId\x18\x1d \x01(\x05\x12\r\n\x05sport\x18\x1f \x01(\x03\"\xd1\x01\n\x0e\x43lientToServer\x12\x11\n\tconnected\x18\x01 \x01(\x05\x12\x10\n\x08rider_id\x18\x02 \x01(\x05\x12\x12\n\nworld_time\x18\x03 \x01(\x03\x12\x1b\n\x05state\x18\x07 \x01(\x0b\x32\x0c.PlayerState\x12\r\n\x05seqno\x18\x04 \x01(\x05\x12\x0c\n\x04tag8\x18\x08 \x01(\x03\x12\x0c\n\x04tag9\x18\t \x01(\x03\x12\x13\n\x0blast_update\x18\n \x01(\x03\x12\r\n\x05tag11\x18\x0b \x01(\x03\x12\x1a\n\x12last_player_update\x18\x0c \x01(\x03\"\xe2\x01\n\rSegmentResult\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x10\n\x08rider_id\x18\x02 \x01(\x03\x12\x19\n\x11\x65vent_subgroup_id\x18\x06 \x01(\x03\x12\x12\n\nfirst_name\x18\x07 \x01(\t\x12\x11\n\tlast_name\x18\x08 \x01(\t\x12\x17\n\x0f\x66inish_time_str\x18\n \x01(\t\x12\x12\n\nelapsed_ms\x18\x0b \x01(\x03\x12\x12\n\npowermeter\x18\x0c \x01(\x05\x12\x0e\n\x06weight\x18\r \x01(\x05\x12\r\n\x05power\x18\x0f \x01(\x05\x12\x11\n\theartrate\x18\x13 \x01(\x05\"z\n\x0eSegmentResults\x12\x10\n\x08world_id\x18\x01 \x01(\x03\x12\x12\n\nsegment_id\x18\x02 \x01(\x03\x12\x19\n\x11\x65vent_subgroup_id\x18\x03 \x01(\x03\x12\'\n\x0fsegment_results\x18\x04 \x03(\x0b\x32\x0e.SegmentResult\"\x11\n\x0fUnknownMessage1\"\x10\n\x0eUnknownMessage\"\xe1\x01\n\x0eServerToClient\x12\x0c\n\x04tag1\x18\x01 \x01(\x05\x12\x10\n\x08rider_id\x18\x02 \x01(\x05\x12\x12\n\nworld_time\x18\x03 \x01(\x03\x12\r\n\x05seqno\x18\x04 \x01(\x05\x12#\n\rplayer_states\x18\x08 \x03(\x0b\x32\x0c.PlayerState\x12\'\n\x0eplayer_updates\x18\t \x03(\x0b\x32\x0f.UnknownMessage\x12\r\n\x05tag11\x18\x0b \x01(\x03\x12\r\n\x05tag17\x18\x11 \x01(\x03\x12\x10\n\x08num_msgs\x18\x12 \x01(\x05\x12\x0e\n\x06msgnum\x18\x13 \x01(\x05\"u\n\x0fWorldAttributes\x12\x10\n\x08world_id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04tag3\x18\x03 \x01(\x03\x12\x0c\n\x04tag5\x18\x04 \x01(\x03\x12\x12\n\nworld_time\x18\x06 \x01(\x03\x12\x12\n\nclock_time\x18\x07 \x01(\x03\"$\n\x0eWorldAttribute\x12\x12\n\nworld_time\x18\x02 \x01(\x03\"\xa9\x01\n\x15\x45ventSubgroupProtobuf\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05rules\x18\x08 \x01(\x05\x12\r\n\x05route\x18\x16 \x01(\x05\x12\x0c\n\x04laps\x18\x19 \x01(\x05\x12\x15\n\rstartLocation\x18\x1d \x01(\x05\x12\r\n\x05label\x18\x1e \x01(\x05\x12\x10\n\x08paceType\x18\x1f \x01(\x05\x12\x12\n\njerseyHash\x18$ \x01(\x05\"\xf1\x01\n\x0fRiderAttributes\x12\n\n\x02\x66\x32\x18\x02 \x01(\x05\x12\n\n\x02\x66\x33\x18\x03 \x01(\x05\x12;\n\x10\x61ttributeMessage\x18\x04 \x01(\x0b\x32!.RiderAttributes.AttributeMessage\x12\x0f\n\x07theirId\x18\n \x01(\x05\x12\x0b\n\x03\x66\x31\x33\x18\r \x01(\x05\x1ak\n\x10\x41ttributeMessage\x12\x0c\n\x04myId\x18\x01 \x01(\x05\x12\x0f\n\x07theirId\x18\x02 \x01(\x05\x12\x11\n\tfirstName\x18\x03 \x01(\t\x12\x10\n\x08lastName\x18\x04 \x01(\t\x12\x13\n\x0b\x63ountryCode\x18\x05 \x01(\x05\"&\n\x08Profiles\x12\x1a\n\x08profiles\x18\x01 \x03(\x0b\x32\x08.Profile\"\x8a\x03\n\x07Profile\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x11\n\tfirstName\x18\x04 \x01(\t\x12\x10\n\x08lastName\x18\x05 \x01(\t\x12\x0c\n\x04male\x18\x06 \x01(\x05\x12\x0e\n\x06weight\x18\t \x01(\x05\x12\x10\n\x08\x62odyType\x18\x0c \x01(\x05\x12\x13\n\x0b\x63ountryCode\x18\" \x01(\x05\x12\x15\n\rtotalDistance\x18# \x01(\x05\x12\x1c\n\x14totalDistanceClimbed\x18$ \x01(\x05\x12\x1a\n\x12totalTimeInMinutes\x18% \x01(\x05\x12\x16\n\x0etotalWattHours\x18) \x01(\x05\x12\x0e\n\x06height\x18* \x01(\x05\x12\x1d\n\x15totalExperiencePoints\x18. \x01(\x05\x12\x18\n\x10\x61\x63hievementLevel\x18\x31 \x01(\x05\x12\x13\n\x0bpowerSource\x18\x34 \x01(\x05\x12\x0b\n\x03\x61ge\x18\x37 \x01(\x05\x12\x1a\n\x12launchedGameClient\x18l \x01(\t\x12\x19\n\x11\x63urrentActivityId\x18m \x01(\x05\x62\x06proto3')
)
_PLAYERSTATE = _descriptor.Descriptor(
name='PlayerState',
full_name='PlayerState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='PlayerState.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='worldTime', full_name='PlayerState.worldTime', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='distance', full_name='PlayerState.distance', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='roadTime', full_name='PlayerState.roadTime', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='laps', full_name='PlayerState.laps', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='speed', full_name='PlayerState.speed', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='roadPosition', full_name='PlayerState.roadPosition', index=6,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cadenceUHz', full_name='PlayerState.cadenceUHz', index=7,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='heartrate', full_name='PlayerState.heartrate', index=8,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='power', full_name='PlayerState.power', index=9,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='heading', full_name='PlayerState.heading', index=10,
number=13, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lean', full_name='PlayerState.lean', index=11,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='climbing', full_name='PlayerState.climbing', index=12,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='time', full_name='PlayerState.time', index=13,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f19', full_name='PlayerState.f19', index=14,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f20', full_name='PlayerState.f20', index=15,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='progress', full_name='PlayerState.progress', index=16,
number=21, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='customisationId', full_name='PlayerState.customisationId', index=17,
number=22, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='justWatching', full_name='PlayerState.justWatching', index=18,
number=23, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='calories', full_name='PlayerState.calories', index=19,
number=24, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='x', full_name='PlayerState.x', index=20,
number=25, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='altitude', full_name='PlayerState.altitude', index=21,
number=26, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='y', full_name='PlayerState.y', index=22,
number=27, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='watchingRiderId', full_name='PlayerState.watchingRiderId', index=23,
number=28, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='groupId', full_name='PlayerState.groupId', index=24,
number=29, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sport', full_name='PlayerState.sport', index=25,
number=31, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=25,
serialized_end=479,
)
_CLIENTTOSERVER = _descriptor.Descriptor(
name='ClientToServer',
full_name='ClientToServer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='connected', full_name='ClientToServer.connected', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rider_id', full_name='ClientToServer.rider_id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='world_time', full_name='ClientToServer.world_time', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='ClientToServer.state', index=3,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seqno', full_name='ClientToServer.seqno', index=4,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag8', full_name='ClientToServer.tag8', index=5,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag9', full_name='ClientToServer.tag9', index=6,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_update', full_name='ClientToServer.last_update', index=7,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag11', full_name='ClientToServer.tag11', index=8,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_player_update', full_name='ClientToServer.last_player_update', index=9,
number=12, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=482,
serialized_end=691,
)
_SEGMENTRESULT = _descriptor.Descriptor(
name='SegmentResult',
full_name='SegmentResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SegmentResult.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rider_id', full_name='SegmentResult.rider_id', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='event_subgroup_id', full_name='SegmentResult.event_subgroup_id', index=2,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_name', full_name='SegmentResult.first_name', index=3,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_name', full_name='SegmentResult.last_name', index=4,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='finish_time_str', full_name='SegmentResult.finish_time_str', index=5,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='elapsed_ms', full_name='SegmentResult.elapsed_ms', index=6,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='powermeter', full_name='SegmentResult.powermeter', index=7,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight', full_name='SegmentResult.weight', index=8,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='power', full_name='SegmentResult.power', index=9,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='heartrate', full_name='SegmentResult.heartrate', index=10,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=694,
serialized_end=920,
)
_SEGMENTRESULTS = _descriptor.Descriptor(
name='SegmentResults',
full_name='SegmentResults',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='world_id', full_name='SegmentResults.world_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='segment_id', full_name='SegmentResults.segment_id', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='event_subgroup_id', full_name='SegmentResults.event_subgroup_id', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='segment_results', full_name='SegmentResults.segment_results', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=922,
serialized_end=1044,
)
_UNKNOWNMESSAGE1 = _descriptor.Descriptor(
name='UnknownMessage1',
full_name='UnknownMessage1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1046,
serialized_end=1063,
)
_UNKNOWNMESSAGE = _descriptor.Descriptor(
name='UnknownMessage',
full_name='UnknownMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1065,
serialized_end=1081,
)
_SERVERTOCLIENT = _descriptor.Descriptor(
name='ServerToClient',
full_name='ServerToClient',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tag1', full_name='ServerToClient.tag1', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rider_id', full_name='ServerToClient.rider_id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='world_time', full_name='ServerToClient.world_time', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seqno', full_name='ServerToClient.seqno', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_states', full_name='ServerToClient.player_states', index=4,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_updates', full_name='ServerToClient.player_updates', index=5,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag11', full_name='ServerToClient.tag11', index=6,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag17', full_name='ServerToClient.tag17', index=7,
number=17, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_msgs', full_name='ServerToClient.num_msgs', index=8,
number=18, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='msgnum', full_name='ServerToClient.msgnum', index=9,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1084,
serialized_end=1309,
)
_WORLDATTRIBUTES = _descriptor.Descriptor(
name='WorldAttributes',
full_name='WorldAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='world_id', full_name='WorldAttributes.world_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='WorldAttributes.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag3', full_name='WorldAttributes.tag3', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag5', full_name='WorldAttributes.tag5', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='world_time', full_name='WorldAttributes.world_time', index=4,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='clock_time', full_name='WorldAttributes.clock_time', index=5,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1311,
serialized_end=1428,
)
_WORLDATTRIBUTE = _descriptor.Descriptor(
name='WorldAttribute',
full_name='WorldAttribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='world_time', full_name='WorldAttribute.world_time', index=0,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1430,
serialized_end=1466,
)
_EVENTSUBGROUPPROTOBUF = _descriptor.Descriptor(
name='EventSubgroupProtobuf',
full_name='EventSubgroupProtobuf',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='EventSubgroupProtobuf.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='EventSubgroupProtobuf.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rules', full_name='EventSubgroupProtobuf.rules', index=2,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='route', full_name='EventSubgroupProtobuf.route', index=3,
number=22, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='laps', full_name='EventSubgroupProtobuf.laps', index=4,
number=25, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='startLocation', full_name='EventSubgroupProtobuf.startLocation', index=5,
number=29, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='EventSubgroupProtobuf.label', index=6,
number=30, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paceType', full_name='EventSubgroupProtobuf.paceType', index=7,
number=31, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='jerseyHash', full_name='EventSubgroupProtobuf.jerseyHash', index=8,
number=36, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1469,
serialized_end=1638,
)
_RIDERATTRIBUTES_ATTRIBUTEMESSAGE = _descriptor.Descriptor(
name='AttributeMessage',
full_name='RiderAttributes.AttributeMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='myId', full_name='RiderAttributes.AttributeMessage.myId', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='theirId', full_name='RiderAttributes.AttributeMessage.theirId', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='firstName', full_name='RiderAttributes.AttributeMessage.firstName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lastName', full_name='RiderAttributes.AttributeMessage.lastName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='countryCode', full_name='RiderAttributes.AttributeMessage.countryCode', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1775,
serialized_end=1882,
)
_RIDERATTRIBUTES = _descriptor.Descriptor(
name='RiderAttributes',
full_name='RiderAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='f2', full_name='RiderAttributes.f2', index=0,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f3', full_name='RiderAttributes.f3', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attributeMessage', full_name='RiderAttributes.attributeMessage', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='theirId', full_name='RiderAttributes.theirId', index=3,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f13', full_name='RiderAttributes.f13', index=4,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RIDERATTRIBUTES_ATTRIBUTEMESSAGE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1641,
serialized_end=1882,
)
_PROFILES = _descriptor.Descriptor(
name='Profiles',
full_name='Profiles',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='profiles', full_name='Profiles.profiles', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1884,
serialized_end=1922,
)
_PROFILE = _descriptor.Descriptor(
name='Profile',
full_name='Profile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Profile.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='firstName', full_name='Profile.firstName', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lastName', full_name='Profile.lastName', index=2,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='male', full_name='Profile.male', index=3,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight', full_name='Profile.weight', index=4,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bodyType', full_name='Profile.bodyType', index=5,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='countryCode', full_name='Profile.countryCode', index=6,
number=34, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='totalDistance', full_name='Profile.totalDistance', index=7,
number=35, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='totalDistanceClimbed', full_name='Profile.totalDistanceClimbed', index=8,
number=36, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='totalTimeInMinutes', full_name='Profile.totalTimeInMinutes', index=9,
number=37, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='totalWattHours', full_name='Profile.totalWattHours', index=10,
number=41, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='Profile.height', index=11,
number=42, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='totalExperiencePoints', full_name='Profile.totalExperiencePoints', index=12,
number=46, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='achievementLevel', full_name='Profile.achievementLevel', index=13,
number=49, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='powerSource', full_name='Profile.powerSource', index=14,
number=52, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='age', full_name='Profile.age', index=15,
number=55, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='launchedGameClient', full_name='Profile.launchedGameClient', index=16,
number=108, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='currentActivityId', full_name='Profile.currentActivityId', index=17,
number=109, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1925,
serialized_end=2319,
)
_CLIENTTOSERVER.fields_by_name['state'].message_type = _PLAYERSTATE
_SEGMENTRESULTS.fields_by_name['segment_results'].message_type = _SEGMENTRESULT
_SERVERTOCLIENT.fields_by_name['player_states'].message_type = _PLAYERSTATE
_SERVERTOCLIENT.fields_by_name['player_updates'].message_type = _UNKNOWNMESSAGE
_RIDERATTRIBUTES_ATTRIBUTEMESSAGE.containing_type = _RIDERATTRIBUTES
_RIDERATTRIBUTES.fields_by_name['attributeMessage'].message_type = _RIDERATTRIBUTES_ATTRIBUTEMESSAGE
_PROFILES.fields_by_name['profiles'].message_type = _PROFILE
DESCRIPTOR.message_types_by_name['PlayerState'] = _PLAYERSTATE
DESCRIPTOR.message_types_by_name['ClientToServer'] = _CLIENTTOSERVER
DESCRIPTOR.message_types_by_name['SegmentResult'] = _SEGMENTRESULT
DESCRIPTOR.message_types_by_name['SegmentResults'] = _SEGMENTRESULTS
DESCRIPTOR.message_types_by_name['UnknownMessage1'] = _UNKNOWNMESSAGE1
DESCRIPTOR.message_types_by_name['UnknownMessage'] = _UNKNOWNMESSAGE
DESCRIPTOR.message_types_by_name['ServerToClient'] = _SERVERTOCLIENT
DESCRIPTOR.message_types_by_name['WorldAttributes'] = _WORLDATTRIBUTES
DESCRIPTOR.message_types_by_name['WorldAttribute'] = _WORLDATTRIBUTE
DESCRIPTOR.message_types_by_name['EventSubgroupProtobuf'] = _EVENTSUBGROUPPROTOBUF
DESCRIPTOR.message_types_by_name['RiderAttributes'] = _RIDERATTRIBUTES
DESCRIPTOR.message_types_by_name['Profiles'] = _PROFILES
DESCRIPTOR.message_types_by_name['Profile'] = _PROFILE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PlayerState = _reflection.GeneratedProtocolMessageType('PlayerState', (_message.Message,), dict(
DESCRIPTOR = _PLAYERSTATE,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:PlayerState)
))
_sym_db.RegisterMessage(PlayerState)
ClientToServer = _reflection.GeneratedProtocolMessageType('ClientToServer', (_message.Message,), dict(
DESCRIPTOR = _CLIENTTOSERVER,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:ClientToServer)
))
_sym_db.RegisterMessage(ClientToServer)
SegmentResult = _reflection.GeneratedProtocolMessageType('SegmentResult', (_message.Message,), dict(
DESCRIPTOR = _SEGMENTRESULT,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:SegmentResult)
))
_sym_db.RegisterMessage(SegmentResult)
SegmentResults = _reflection.GeneratedProtocolMessageType('SegmentResults', (_message.Message,), dict(
DESCRIPTOR = _SEGMENTRESULTS,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:SegmentResults)
))
_sym_db.RegisterMessage(SegmentResults)
UnknownMessage1 = _reflection.GeneratedProtocolMessageType('UnknownMessage1', (_message.Message,), dict(
DESCRIPTOR = _UNKNOWNMESSAGE1,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:UnknownMessage1)
))
_sym_db.RegisterMessage(UnknownMessage1)
UnknownMessage = _reflection.GeneratedProtocolMessageType('UnknownMessage', (_message.Message,), dict(
DESCRIPTOR = _UNKNOWNMESSAGE,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:UnknownMessage)
))
_sym_db.RegisterMessage(UnknownMessage)
ServerToClient = _reflection.GeneratedProtocolMessageType('ServerToClient', (_message.Message,), dict(
DESCRIPTOR = _SERVERTOCLIENT,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:ServerToClient)
))
_sym_db.RegisterMessage(ServerToClient)
WorldAttributes = _reflection.GeneratedProtocolMessageType('WorldAttributes', (_message.Message,), dict(
DESCRIPTOR = _WORLDATTRIBUTES,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:WorldAttributes)
))
_sym_db.RegisterMessage(WorldAttributes)
WorldAttribute = _reflection.GeneratedProtocolMessageType('WorldAttribute', (_message.Message,), dict(
DESCRIPTOR = _WORLDATTRIBUTE,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:WorldAttribute)
))
_sym_db.RegisterMessage(WorldAttribute)
EventSubgroupProtobuf = _reflection.GeneratedProtocolMessageType('EventSubgroupProtobuf', (_message.Message,), dict(
DESCRIPTOR = _EVENTSUBGROUPPROTOBUF,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:EventSubgroupProtobuf)
))
_sym_db.RegisterMessage(EventSubgroupProtobuf)
RiderAttributes = _reflection.GeneratedProtocolMessageType('RiderAttributes', (_message.Message,), dict(
AttributeMessage = _reflection.GeneratedProtocolMessageType('AttributeMessage', (_message.Message,), dict(
DESCRIPTOR = _RIDERATTRIBUTES_ATTRIBUTEMESSAGE,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:RiderAttributes.AttributeMessage)
))
,
DESCRIPTOR = _RIDERATTRIBUTES,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:RiderAttributes)
))
_sym_db.RegisterMessage(RiderAttributes)
_sym_db.RegisterMessage(RiderAttributes.AttributeMessage)
Profiles = _reflection.GeneratedProtocolMessageType('Profiles', (_message.Message,), dict(
DESCRIPTOR = _PROFILES,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:Profiles)
))
_sym_db.RegisterMessage(Profiles)
Profile = _reflection.GeneratedProtocolMessageType('Profile', (_message.Message,), dict(
DESCRIPTOR = _PROFILE,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:Profile)
))
_sym_db.RegisterMessage(Profile)
# @@protoc_insertion_point(module_scope) | zwift-client | /zwift-client-0.2.0.tar.gz/zwift-client-0.2.0/zwift/zwift_messages_pb2.py | zwift_messages_pb2.py |
from . import zwift_messages_pb2
from .request import Request
COURSE_TO_WORLD = {3: 1, 4: 2, 5: 3, 6: 1}
class COURSES:
WATOPIA = 3
RICHMOND = 4
LONDON = 5
class World:
def __init__(self, world_id, get_access_token):
self.world_id = world_id
self.request = Request(get_access_token)
@property
def players(self):
return self.request.json('/relay/worlds/{}'.format(self.world_id))
def player_status(self, player_id):
buffer = self.request.protobuf(
'/relay/worlds/{}/players/{}'.format(self.world_id, player_id))
player_state = zwift_messages_pb2.PlayerState()
player_state.ParseFromString(buffer)
return PlayerStateWrapper(player_state)
class PlayerStateWrapper(object):
"""
Wrap a PlayerState instance to make it more usable.
Fields provided by wrapped player_state:
id, worldTime, distance, roadTime, laps, speed, roadPosition, cadenceUHz,
heartrate, power, heading, lean, climbing, time, f19, f20, progress,
customisationId, justWatching, calories, x, altitude, y, watchingRiderId,
groupId, sport
"""
class TURN_SIGNALS:
RIGHT = 'right'
LEFT = 'left'
STRAIGHT = 'straight'
def __init__(self, player_state):
self.player_state = player_state
@property
def ride_ons(self):
return (self.player_state.f19 >> 24) & 0xfff
@property
def is_turning(self):
return (self.player_state.f19 & 8) != 0
@property
def is_forward(self):
return (self.player_state.f19 & 4) != 0
@property
def course(self):
return (self.player_state.f19 & 0xff0000) >> 16
@property
def world(self):
return COURSE_TO_WORLD[self.course]
@property
def road_id(self):
return (self.player_state.f20 & 0xff00) >> 8
@property
def road_direction(self):
return (self.player_state.f20 & 0xffff000000) >> 24
@property
def turn_signal(self):
signal_code = self.player_state.f20 & 0x70
if signal_code == 0x10:
return self.TURN_SIGNALS.RIGHT
elif signal_code == 0x20:
return self.TURN_SIGNALS.LEFT
elif signal_code == 0x40:
return self.TURN_SIGNALS.STRAIGHT
else:
return None
@property
def power_up(self):
return self.player_state.f20 & 0xf
@property
def has_feather_boost(self):
return self.power_up == 0
@property
def has_draft_boost(self):
return self.power_up == 1
@property
def has_aero_boost(self):
return self.power_up == 5
@property
def cadence(self):
return int((self.player_state.cadenceUHz * 60) / 1000000)
def __getattr__(self, item):
"""
First try to get the requested item from the player_state. When it's
not found, try to get it directly from the wrapper.
"""
try:
return getattr(self.player_state, item)
except AttributeError:
return self.__getattribute__(item) | zwift-client | /zwift-client-0.2.0.tar.gz/zwift-client-0.2.0/zwift/world.py | world.py |
import xml.etree.ElementTree as ET
def parse_cadence(row: str) -> int:
"""Parses cadence value string 'Xrpm' and returns X as int"""
keyword = 'rpm'
if keyword not in row: return -1, row
if ',' in row: keyword += ','
cadence, rest = row.split(keyword)
if '/' in cadence: cadence = sum([int(c) for c in cadence.split('/')])/2
return int(cadence), rest
def parse_power(row: str) -> int:
"""Parses power value string 'X%' or 'XW' and returns X as int"""
power = row
if '%' in power: power, _ = power.split('%')
if 'W' in power: power, _ = power.split('W')
return float(power)/100
def parse_duration(row: str) -> int:
"""Parses duration value string 'Xhr', 'Ymin' or 'Zsec' and returns (X::Y::Z) as seconds"""
import re
def filter_digits(s): return "".join(re.findall('\d+', s))
seconds = 0
if 'hr' in row:
hr, row = row.split('hr')
seconds += int(filter_digits(hr)) * 3600
if 'min' in row:
min, row = row.split('min')
seconds += int(filter_digits(min)) * 60
if 'sec' in row:
sec, _ = row.split('sec')
seconds += int(filter_digits(sec))
return seconds
class ZSteadyState:
def __init__(self, row: str) -> None:
duration, row = [r.strip() for r in row.split('@')]
duration = parse_duration(duration)
cadence, row = parse_cadence(row)
self.duration = duration
self.power = parse_power(row)
self.cadence = cadence
def __repr__(self) -> str:
return f'SteadyState (duration: {self.duration} power: {self.power} cadence: {self.cadence}'
def to_xml(self, root: ET.Element) -> ET.Element:
"""Creates an XML element from the steady state interval data
Params
root : ET.Element
Root of the created steady state interval XML element
"""
interval = ET.SubElement(root, 'SteadyState')
interval.set('Duration', str(self.duration))
interval.set('Power', str(self.power))
if self.cadence > 0: interval.set('Cadence', str(self.cadence))
return interval
class ZRangedInterval():
def __init__(self, row: str) -> None:
duration, row = row.split('from')
cadence = -1
if '@' in duration:
duration, cadence = duration.split('@')
cadence, _ = parse_cadence(cadence)
duration = parse_duration(duration)
from_power, to_power = [parse_power(p) for p in row.split('to')]
self.duration = duration
self.from_power = from_power
self.to_power = to_power
self.cadence = cadence
self.name = "Warmup" if from_power < to_power else "Cooldown"
def __repr__(self) -> str:
return f'{self.name} (duration: {self.duration} from_power: {self.from_power} to_power: {self.to_power} cadence: {self.cadence})'
def to_xml(self, root: ET.Element) -> ET.Element:
"""Creates an XML element from the ranged interval interval data
Params
root : ET.Element
Root of the created free ranged interval XML element
"""
interval = ET.SubElement(root, self.name)
interval.set('Duration', str(self.duration))
interval.set('PowerLow', str(self.from_power))
interval.set("PowerHigh", str(self.to_power))
if self.cadence > 0: interval.set('Cadence', str(self.cadence))
class ZIntervalsT():
def __init__(self, row: str):
number, rest = row.split('x')
rest = rest.replace("rpm,", 'rpm')
first_interval, second_interval = [ZSteadyState(r) for r in rest.split(',')]
self.number = number
self.first_interval = first_interval
self.second_interval = second_interval
def __repr__(self) -> str:
return f'IntervalT ({self.number} x {self.first_interval}, {self.second_interval})'
def to_xml(self, root: ET.Element) -> ET.Element:
"""Creates an XML element from the intervals data
Params
root : ET.Element
Root of the created free ride intervals XML element
"""
interval = ET.SubElement(root, 'IntervalsT')
interval.set('Repeat', str(self.number))
interval.set('OnDuration', str(self.first_interval.duration))
interval.set('OffDuration', str(self.second_interval.duration))
interval.set("OnPower", str(self.first_interval.power))
interval.set('OffPower', str(self.second_interval.power))
interval.set('Cadence', str(self.first_interval.cadence))
interval.set("CadenceResting", str(self.second_interval.cadence))
class ZFreeRide():
def __init__(self, row: str):
duration, _ = row.split('free ride')
cadence = -1
if '@' in duration:
duration, cadence = duration.split("@")
cadence = parse_cadence(cadence)
duration = parse_duration(duration)
self.duration = duration
self.cadence = cadence
self.flat_road = 1
def __repr__(self) -> str:
return f'ZFreeRide (duration: {self.duration} cadence: {self.cadence})'
def to_xml(self, root: ET.Element) -> ET.Element:
"""Creates an XML element from the free ride interval data
Params
root : ET.Element
Root of the created free ride interval XML element
"""
interval = ET.SubElement(root, 'FreeRide')
interval.set('Duration', str(self.duration))
interval.set('FlatRoad', str(self.flat_road))
if self.cadence > 0: interval.set("Cadence", str(self.cadence))
pass | zwift-workouts-parser | /zwift_workouts_parser-1.0.0-py3-none-any.whl/zwift_parser/zwift_intervals.py | zwift_intervals.py |
from zwift_workout import ZWorkout, ZWorkoutParseMode
class Parser:
"""
A class is used to parse any **bike** zwift workout, presented on the
www.whatsonzwift.com
"""
def __init__(self, export_dir, urls, skip: bool = False, replace: bool = False) -> None:
"""
Parameters
----------
export_dir : str
The folder that is used to save all the parsed workouts.
urls : List[str]
A list of urls that need to be parsed, can be either a
direct link to a single workout, plan or a page which
contains multiple plans/workouts.
skip : Bool
Should the workouts that can be downloaded be skipped by
the parser?
replace : Bool
Should the workouts that can be downloaded be replaced with
the files uploaded to the site?
"""
self.export_dir = export_dir
self.skip = skip
self.replace = replace
for i, url in enumerate(urls):
print(f'Parsing url {url} ({i+1}/{len(urls)})')
parsed = self.__try_parse(url)
if not parsed:
print(f"Couldn't find a parser for {url} hence skipping it.")
continue
def __try_parse(self, url):
parsed = self.__try_parse_plans(url);
if not parsed:
parsed = self.__try_parse_workout(url);
return parsed
def __try_parse_plans(self, url):
plans_data = Parser.__get_web_content(url, 'div', 'card')
if not plans_data: return False;
any_parsed = False
for i, plan_data in enumerate(plans_data):
card_sports = plan_data.find('div', class_='card-sports')
if not card_sports: continue
card_classes = card_sports.i['class']
valid = ZWorkout.is_valid_sport_type(card_classes)
url = plan_data.find('a', class_='button')['href']
if not valid:
print(f"Couldn't parse {url} because some of the {card_classes} sports are not suppored yet")
continue
print(f"Parsing plan ({i+1}/{len(plans_data)})")
self.__try_parse_workout(url)
any_parsed = True
return any_parsed
def __try_parse_workout(self, url):
workouts_data = Parser.__get_web_content(url, 'article', 'workout')
if not workouts_data:
print(f"Couldn't get workout data by {url} for unknown reason.")
return False
for i, workout_data in enumerate(workouts_data):
print(f"- Parsing workout ({i+1}/{len(workouts_data)})")
mode = ZWorkoutParseMode.DEFAULT
if self.skip: mode = ZWorkoutParseMode.SKIP
elif self.replace: mode = ZWorkoutParseMode.REPLACE
ZWorkout(workout_data, mode).save(self.export_dir)
return True
def __get_web_content(url, tag, tag_class):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36 Vivaldi/4.3',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'
}
import ssl, certifi, urllib.request
req = urllib.request.Request(url, headers=headers)
context = ssl.create_default_context(cafile=certifi.where())
response = urllib.request.urlopen(req, context=context)
content = response.read().decode('utf-8')
from bs4 import BeautifulSoup
soup = BeautifulSoup(content, features='html.parser')
return soup.find_all(tag, class_ = tag_class)
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Parses Zwift workouts from www.whatsonzwift.com")
parser.add_argument('--skip', action='store_true', help='skips workouts which can be downloaded from the site')
parser.add_argument('--replace', action='store_false', help='replaces workouts which can be downloaded from the site with their uploaded files')
parser.add_argument('urls', metavar='URLs', type=str, nargs="+", help="an URL of the workout to parse")
parser.add_argument('-ed', '--export_dir', nargs="?", default='export', help="output directory of the parsed workouts")
args = parser.parse_args()
if args.urls: Parser(**vars(args)) | zwift-workouts-parser | /zwift_workouts_parser-1.0.0-py3-none-any.whl/zwift_parser/zwift_parser.py | zwift_parser.py |
from typing import List
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup, element
from zwift_intervals import *
from enum import Enum
class ZWorkoutParseMode(Enum):
DEFAULT = 1
SKIP = 2
REPLACE = 3
class ZWorkout():
def is_valid_sport_type(class_values: List[str]):
"""Checks if workout's sport type is supported by the parser
At the moment the parser supports only the bike workouts,
so this function checks if there is a bike type in the sport
types.
Parameters
----------
class_values : List[str]
A list of the class values on the workout's html page
"""
return len([s for s in class_values if 'bike' in s]) > 0
def parse_interval(raw_str: str):
"""Returns an interval based on some specific format of input raw interval string
Return
------
ZFreeRide - If the raw interval string contains a 'free ride' sub-string in it
for example '10 min free ride'
ZRangedInterval - If the raw interval string contains a 'from','to' pair of the sub-strings
for example '1 min from 50 to 90% FTP'
ZIntervalT - If the raw interval string contains a 'x' symbol (meaning times)
for example '10x 3min @ 100% FTP, 1 min @ 55% FTP'
ZSteadyState - Otherwise
for example '3min @ 100rpm, 95% FTP'
"""
if 'free ride' in raw_str: return ZFreeRide(raw_str) #10min free ride
if 'from' in raw_str and 'to' in raw_str: return ZRangedInterval(raw_str) #1min from 50 to 90% FTP
if 'x' in raw_str: return ZIntervalsT(raw_str) #10x 3min @ 100% FTP, 1min @ 55% FTP
return ZSteadyState(raw_str) #3min @ 100rpmm, 95% FTP
def __init__(self, article: element.Tag, mode: ZWorkoutParseMode = ZWorkoutParseMode.DEFAULT) -> None:
self.path, self.filename = (None, None)
self.mode = mode
breadcrumbs = article.select_one('div.breadcrumbs')
sport_type = breadcrumbs.find('h4')['class']
self.valid = ZWorkout.is_valid_sport_type(sport_type)
if not self.valid: return
try:
breadcrumbs = [item.string.strip() for item in breadcrumbs]
except Exception as e:
#Sometimes if @ is contained in the breadcrumbs, it might be obfuscated with Cloudflare, so
# it's not really possible to deobfuscate it back. This is why we just ignore it.
self.valid = False
return
breadcrumbs = [slugify(b) for b in breadcrumbs if len(b) > 0 and b != '»' and b != 'Workouts']
self.filename = breadcrumbs.pop(-1)
self.path = '/'.join(breadcrumbs)
self.intervals = []
download_button = [a for a in article.find_all('a') if a.string and 'Download workout' in a.string]
self.download_link = download_button[0]['href'] if download_button and self.mode is not ZWorkoutParseMode.DEFAULT else None
if not self.download_link:
def convert_to_string(data):
output = []
if isinstance(data, element.NavigableString): return data.string
for content in data.contents:
if isinstance(content, str): output.append(content)
else: output.extend([convert_to_string(c) for c in content.contents])
return "".join(output)
data = article.select_one('div.one-third.column.workoutlist')
for div in data.find_all('div'):
interval = "".join([convert_to_string(c) for c in div.contents])
self.intervals.append(ZWorkout.parse_interval(interval))
overview = article.select_one('div.overview')
self.author = 'Zwift Workouts Parser'
self.description = overview.next_sibling
if 'Author:' in overview.next_sibling.get_text():
self.author = overview.next_sibling
self.description = self.author.next_sibling
if not isinstance(self.author, str) and 'Author:' in self.author.get_text():
_, self.author = self.author.get_text().split('Author:')
self.description = self.description.get_text("\n")
self.name = 'Zwift Workout'
self.sport_type = 'bike'
self.lookup = {
'author': self.author,
'name': self.name,
'description': self.description,
'sport_type': self.sport_type,
}
def save(self, export_dir: str):
"""Saves workout to a specific folder
Params
------
export_dir : str
Folder to save the workout
"""
if not self.valid: return
workout_fullname = f"{self.path}/{self.filename}"
text = ""
if not self.download_link:
data = self.to_xml()
import xml.etree.ElementTree as ET
text = ET.tostring(data)
xml_header = b'<?xml version="1.0" encoding="utf-8"?>'
text = BeautifulSoup(text, 'xml').prettify().encode('utf-8')
text = text.replace(xml_header, b'').strip()
elif self.mode is ZWorkoutParseMode.REPLACE:
import requests
url = f"https://whatsonzwift.com{self.download_link}"
text = requests.get(url, allow_redirects=True).content
else:
print(f"-- Skipped workout {workout_fullname}")
return
directory = f"{export_dir}/{self.path}"
from os import path, makedirs
if not path.isdir(directory): makedirs(directory)
with open(f"{directory}/{slugify(self.filename, True)}.zwo", 'wb') as f:
f.write(text)
file_version = "Original" if self.download_link else "Parsed"
print(f"-- Parsed workout {workout_fullname} ({file_version})")
def to_xml(self, root : ET.Element = None) -> ET.Element:
"""Creates an XML element from the workout data
Params
root : ET.Element
Root of the created workout XML element
"""
root = ET.Element('workout_file')
for k,v in self.lookup.items():
ET.SubElement(root, k).text = v
tags = ET.SubElement(root, 'tags')
for t in tags:
tag = ET.SubElement(tags, 'tag')
tag.set('name', t)
workout = ET.SubElement(root, 'workout')
for i in self.intervals: i.to_xml(workout)
return root
import unicodedata
import re
def slugify(value, allow_unicode=False):
"""
Taken from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value)
return re.sub(r'[-\s]+', '-', value).strip('-_') | zwift-workouts-parser | /zwift_workouts_parser-1.0.0-py3-none-any.whl/zwift_parser/zwift_workout.py | zwift_workout.py |
__version__ = '0.1.0'
def zwixel():
def rgb(fg = [255, 255, 255], bg = None):
try: return f"\033[38;2;{fg[0]};{fg[1]};{fg[2]};48;2;{bg[0]};{bg[1]};{bg[2]}m"
except: return f"\033[38;2;{fg[0]};{fg[1]};{fg[2]}m"
boards = [[" " for i in range(20)] for i in range(20)]
reset = "\033[0m"
black = rgb(bg = [0, 0, 0]) + " " + reset
lpp = [[0, 0], [1, 0], [3, 0], [4, 0], [9, 0], [11, 0], [16, 0], [2, 1], [3, 1], [8, 1], [10, 1], [13, 1], [15, 1], [16, 1], [17, 1], [2, 2], [14, 2], [16, 2], [17, 2], [18, 2], [1, 3], [16, 3], [17, 3], [2, 5], [0, 10], [1, 10], [0, 11], [19, 10], [1, 13], [0, 14], [0, 15], [1, 15], [0, 16], [1, 16], [2, 16], [4, 17], [6, 17], [7, 17], [8, 17], [9, 17], [16, 17], [6, 18], [7, 18], [6, 19]]
lp = rgb(bg = [156, 36, 176]) + " " + reset
for i in lpp: boards[i[1]][i[0]] = lp
dpp = [[5, 0], [6, 0], [13, 0], [18, 0], [19, 1], [19, 2], [0, 3], [18, 6], [0, 7], [2, 8], [2, 9], [0, 18], [1, 18], [8, 18], [9, 18], [13, 18], [2, 19], [3, 19], [4, 19], [18, 19], [19, 19]]
dp = rgb(bg = [101, 25, 114]) + " " + reset
for i in dpp: boards[i[1]][i[0]] = dp
bp = [[2, 0], [10, 0], [1, 1], [4, 1], [6, 1], [9, 1], [12, 1], [14, 1], [3, 2], [4, 2], [5, 2], [12, 2], [13, 2], [15, 2], [2, 3], [3, 3], [15, 3], [3, 4], [17, 4], [0, 5], [2, 6], [19, 6], [18, 7], [1, 8], [19, 8], [0, 9], [2, 10], [1, 11], [2, 11], [18, 11], [19, 12], [18, 13], [1, 14], [19, 14], [19, 15], [19, 16], [1, 17], [5, 17], [10, 17], [11, 17], [12, 17], [14, 17], [18, 17], [3, 18], [4, 18], [5, 18], [14, 18], [15, 18], [16, 18], [17, 18], [18, 18], [7, 19], [8, 19]]
b = rgb(bg = [63, 81, 181]) + " " + reset
for i in bp: boards[i[1]][i[0]] = b
bp = [[0, 2], [1, 2], [18, 3], [19, 3], [0, 4], [1, 4], [2, 4], [18, 4], [19, 4], [1, 5], [18, 5], [19, 5], [0, 6], [1, 6], [1, 7], [2, 7], [19, 7], [0, 8], [1, 9], [19, 9], [19, 11], [0, 12], [1, 12], [18, 12], [0, 13], [19, 13], [7, 7], [8, 7], [7, 8], [8, 8], [12, 8], [13, 8], [12, 9], [13, 9], [7, 11], [8, 11], [9, 11], [9, 12], [10, 12], [11, 12], [12, 12], [13, 12]]
for i in bp: boards[i[1]][i[0]] = black
wp = [[6, 2], [7, 2], [8, 2], [9, 2], [10, 2], [11, 2], [4, 3], [5, 3], [11, 3], [12, 3], [13, 3], [14, 3], [4, 4], [14, 4], [15, 4], [16, 4], [3, 5], [4, 5], [16, 5], [17, 5], [3, 6], [17, 6], [3, 7], [17, 7], [3, 8], [17, 8], [18, 8], [3, 9], [18, 9], [3, 10], [18, 10], [3, 11], [17, 11], [2, 12], [17, 12], [2, 13], [17, 13], [2, 14], [17, 14], [18, 14], [2, 15], [18, 15], [3, 16], [18, 16], [4, 16], [5, 16], [6, 16], [7, 16], [8, 16], [9, 16], [10, 16], [11, 16], [12, 16], [13, 16], [14, 16], [15, 16], [16, 16], [17, 16]]
w = rgb(bg = [255, 255, 255]) + " " + reset
for i in wp: boards[i[1]][i[0]] = w
for i, v in enumerate(boards[0]):
if v == " ": boards[0][i] = black
for i, v in enumerate(boards[1]):
if v == " ": boards[1][i] = black
for i, v in enumerate(boards[17]):
if v == " ": boards[17][i] = black
for i, v in enumerate(boards[18]):
if v == " ": boards[18][i] = black
for i, v in enumerate(boards[19]):
if v == " ": boards[19][i] = black
for i, v in enumerate(boards):
for j, k in enumerate(v):
if boards[i][j] == " ": boards[i][j] = rgb(bg = [255, 235, 59]) + " " + reset
return boards | zwixel-pwack | /zwixel_pwack-0.1.0-py3-none-any.whl/zwixel_pwack/__init__.py | __init__.py |
import talib
def indicator(df):
df['ma5'] = talib.MA(df['close'], timeperiod=5)
df['ma10'] = talib.MA(df['close'], timeperiod=10)
df['ma20'] = talib.MA(df['close'], timeperiod=20)
df['macd'], df['macdsignal'], df['macdhist'] = talib.MACD(df['close'], fastperiod=12, slowperiod=26, signalperiod=9)
df['rsi'] = talib.RSI(df['close'], timeperiod=14)
df['cci'] = talib.CCI(df['high'], df['low'], df['close'], timeperiod=14)
df['dx'] = talib.DX(df['high'], df['low'], df['close'], timeperiod=14)
df['slowk'], df['slowd'] = talib.STOCH(df['high'], df['low'], df['close'], fastk_period=9, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0)
df['fastk'], df['fastd'] = talib.STOCHF(df['high'], df['low'], df['close'], fastk_period=9, fastd_period=3, fastd_matype=0)
df['willr'] = talib.WILLR(df['high'], df['low'], df['close'], timeperiod=14)
df['mom'] = talib.MOM(df['close'], timeperiod=10)
df['roc'] = talib.ROC(df['close'], timeperiod=10)
df['trix'] = talib.TRIX(df['close'], timeperiod=30)
df['aroonup'], df['aroondown'] = talib.AROON(df['high'], df['low'], timeperiod=14)
df['aroonosc'] = talib.AROONOSC(df['high'], df['low'], timeperiod=14)
df['bop'] = talib.BOP(df['open'], df['high'], df['low'], df['close'])
df['midpoint'] = talib.MIDPOINT(df['close'], timeperiod=14)
df['midprice'] = talib.MIDPRICE(df['high'], df['low'], timeperiod=14)
df['sar'] = talib.SAR(df['high'], df['low'], acceleration=0, maximum=0)
df['trange'] = talib.TRANGE(df['high'], df['low'], df['close'])
df['atr'] = talib.ATR(df['high'], df['low'], df['close'], timeperiod=14)
df['natr'] = talib.NATR(df['high'], df['low'], df['close'], timeperiod=14)
df['adx'] = talib.ADX(df['high'], df['low'], df['close'], timeperiod=14)
df['adxr'] = talib.ADXR(df['high'], df['low'], df['close'], timeperiod=14)
df['apo'] = talib.APO(df['close'], fastperiod=12, slowperiod=26, matype=0)
df['ppo'] = talib.PPO(df['close'], fastperiod=12, slowperiod=26, matype=0)
df['mfi'] = talib.MFI(df['high'], df['low'], df['close'], df['vol'], timeperiod=14)
df['minus_di'] = talib.MINUS_DI(df['high'], df['low'], df['close'], timeperiod=14)
df['minus_dm'] = talib.MINUS_DM(df['high'], df['low'], timeperiod=14)
df['plus_di'] = talib.PLUS_DI(df['high'], df['low'], df['close'], timeperiod=14)
df['plus_dm'] = talib.PLUS_DM(df['high'], df['low'], timeperiod=14)
df['rocp'] = talib.ROCP(df['close'], timeperiod=10)
df['rocr'] = talib.ROCR(df['close'], timeperiod=10)
df['rocr100'] = talib.ROCR100(df['close'], timeperiod=10)
df['ultosc'] = talib.ULTOSC(df['high'], df['low'], df['close'], timeperiod1=7, timeperiod2=14, timeperiod3=28)
df['ema5'] = talib.EMA(df['close'], timeperiod=5)
df['ema10'] = talib.EMA(df['close'], timeperiod=10)
df['ema20'] = talib.EMA(df['close'], timeperiod=20)
df['dema5'] = talib.DEMA(df['close'], timeperiod=5)
df['dema10'] = talib.DEMA(df['close'], timeperiod=10)
df['dema20'] = talib.DEMA(df['close'], timeperiod=20)
df['tema5'] = talib.TEMA(df['close'], timeperiod=5)
df['tema10'] = talib.TEMA(df['close'], timeperiod=10)
df['tema20'] = talib.TEMA(df['close'], timeperiod=20)
df['trima5'] = talib.TRIMA(df['close'], timeperiod=5)
df['trima10'] = talib.TRIMA(df['close'], timeperiod=10)
df['trima20'] = talib.TRIMA(df['close'], timeperiod=20)
df['wma5'] = talib.WMA(df['close'], timeperiod=5)
df['wma10'] = talib.WMA(df['close'], timeperiod=10)
df['wma20'] = talib.WMA(df['close'], timeperiod=20)
df['kama5'] = talib.KAMA(df['close'], timeperiod=5)
df['kama10'] = talib.KAMA(df['close'], timeperiod=10)
df['kama20'] = talib.KAMA(df['close'], timeperiod=20)
df['obv'] = talib.OBV(df['close'], df['vol']) | zwkit | /zwkit-0.4.34-py3-none-any.whl/indicator/base.py | base.py |
import pandas as pd
import pywencai as wc
import tushare as ts
import os
from kit import num_kit, path_kit
import pathlib
'''
数据函数
'''
# 导入工程
class base_data_loader:
"""
数据加载器
通过问财获得股票列表
通过tushare获得股票数据
"""
def __init__(self, path="../data/", module="data/", file_name="base_data.csv"):
self.path = path
self.module = module
self.file_name = file_name
self.module_path = path + module
self.question = None
self.start = None
self.end = None
self.token = None
self.symbol_index = dict() # 股票时间索引
self.filter = set() # 股票的过滤集
self.data = pd.DataFrame()
path_kit.create_dir(self.module_path)
def __daily_data(self, symbol, start, end, token):
"""
获取日线数据
:param symbol:
:param start:
:param end:
:return:
"""
api = ts.pro_api(token)
df = ts.pro_bar(
ts_code=symbol,
api=api,
start_date=str(start) + "0101",
end_date=str(end) + "1231",
adjfactor=True,
asset="E", # 证券类型 E:股票和交易所基金,I:沪深指数,C:数字货币,FT:期货 FD:基金/O期权/H港股/CB可转债
freq="D", # D:日线 W:周 M:月
adj="hfq", # 不复权:None 前复权:qfq 后复权:hfq
retry_count=99 # 重试次数
)
return df[::-1]
def __daily(self, start_date, end_date, token, symbols: list):
"""
获取日线数据
:param start_date:
:param end_date:
:param symbols:
:return:
"""
result = pd.DataFrame()
if len(symbols) == 0:
return pd.DataFrame()
for symbol in symbols:
df = self.__daily_data(symbol, start_date, end_date, token)
result = pd.concat([result, df])
return result
def filter_symbols(self, symbols: list):
"""
过滤数据列表
:param symbols: 以列表的形式填入股票代码
:return:
"""
symbols_set = set(symbols)
self.filter.update(symbols_set)
def __get_symbols_by_wc(self, question, columns: list):
"""
股票列表
通过问财获得股票列表
"""
result = pd.DataFrame()
for i in range(self.start, self.end + 1):
quest = question % (i, i - 1)
data = wc.get(question=quest, loop=True)
data = data[columns]
data = data[~data['股票代码'].isin(self.filter)]
data['trade_date'] = i
result = pd.concat([result, data])
return result
def get_data(self, question, start, end, token):
"""
获取总数据集
优先在本地读取,如果本地没有从互联网获取
:param token:
:param end:
:param start:
:param question:
:param data_path: 默认的数据集路径
:return:
"""
self.question = question
self.start = start
self.end = end
if os.path.exists(self.module_path + self.file_name):
print("读取本地数据集")
self.data = pd.read_csv(self.module_path + self.file_name)
else:
print("从互联网获取数据集")
symbols_list = self.__get_symbols_by_wc(self.question, columns=['股票代码'])
print("开始遍历")
for index, symbol in enumerate(symbols_list['股票代码'].unique()):
print("数据进度百分比:%s" % (index / len(symbols_list['股票代码'].unique()) * 100), end='\r', flush=True)
self.data = pd.concat([self.data, self.__daily_data(symbol, self.start, self.end, token)])
# 重置索引
self.data = self.data.reset_index(drop=True)
# 将日期转换为字符串
self.data['trade_date'] = self.data['trade_date'].apply(lambda x: str(x))
# 将数据里的数字类型转换为float
self.data = self.data.applymap(lambda x: float(x) if isinstance(x, (int, float)) else x)
return self.data
def observe(self, mlflow):
"""
观察数据集
:return:
"""
# 数据报告
mlflow.log_text("\n".join(self.__data_report()), self.module + "data_report.txt")
# 获取每个股票的时间个数
mlflow.log_dict(self.data.groupby('ts_code')['trade_date'].count().to_dict(), self.module + "data_count.txt")
# 每个数据的缺失日期情况
mlflow.log_text(self.__row_nan_trade_date(), self.module + "data_trade_nan.txt")
def save(self, path=None, name=None):
"""
保存数据集
:param name:
:param path:
:return:
"""
file_path = self.module_path if path is None else path
file_name = self.file_name if name is None else name
self.data.to_csv(file_path + file_name, index=False, encoding='utf-8', )
def __data_report(self):
"""
数据报告
常规基础数据
:return:
"""
data = []
# stringbuffer的数据报告
data.append("开始日期:%s" % self.start)
data.append("结束日期:%s" % self.end)
data.append("数据总量:%s" % len(self.data))
data.append("数据列数:%s" % len(self.data.columns))
data.append("数据列名:%s" % self.data.columns.tolist())
data.append("数据集缺失值:%s" % self.data.isnull().sum())
return data
def __row_nan_trade_date(self):
text = ''
symbols = self.data["ts_code"].unique()
trades = self.data["trade_date"].unique()
for symbol in symbols:
trade_list = pd.DataFrame(trades)
trade_list.columns = ['trade_date']
trade_data = pd.merge(trade_list, self.data[self.data['ts_code'] == symbol], on='trade_date', how='left')
trade_data = trade_data[trade_data['ts_code'].isnull()]
if len(trade_data) != 0:
text = text + symbol + ','
text = text + ",".join(trade_data['trade_date'].astype('str').tolist()) + '\n'
return text
# 特征工程
class feature_data_loader:
"""
数据特征工程
1.添加特征
2.观察数据集
3.保存数据集
"""
def __init__(self, base_data=None, path="../data/", module="data/"):
self.data = base_data
self.features_list = []
self.result = pd.DataFrame()
self.path = path
self.module = module
self.module_path = path + module
def add_feature(self, feature):
"""
添加特征
:param feature:
:return:
"""
for func in feature:
self.features_list.append(func)
# 添加特征后,重新初始化数据集
def obverse(self):
"""
观察数据集
:return:
"""
pass
def create_execute(self, path=None):
"""
执行特征工程
:return:
"""
file_path = self.module_path if path is None else path
path_kit.create_dir(file_path + "/feature")
symbol_list = self.data['ts_code'].unique()
columns = self.data.columns
for index, symbol in enumerate(symbol_list):
print("数据进度百分比:%s" % (index / len(symbol_list) * 100))
symbol_data = pd.DataFrame(self.data[self.data['ts_code'] == symbol])
symbol_data.reset_index(drop=True, inplace=True)
for func in self.features_list:
func(symbol_data)
# 将symbol_data 按照旧列和新列分成2个数据集
symbol_data_left = symbol_data[columns]
symbol_data_right = symbol_data[symbol_data.columns[~symbol_data.columns.isin(symbol_data_left.columns)]]
symbol_data_right.applymap(lambda x: round(float(x), 2) if isinstance(x, (int, float)) else x)
# 将新列数据集和旧列数据集合并
data = pd.merge(symbol_data_left, symbol_data_right, left_index=True, right_index=True)
# 如果行数据里有空值 则删除整行
data.dropna(axis=0, how='any', inplace=True)
data.to_csv(file_path + "/feature/" + symbol + ".csv", index=False, encoding='utf-8')
return self.result
def to_execute(self, data, indicator):
"""
执行特征工程
:return:
"""
symbol_list = data['ts_code'].unique()
columns = data.columns
for index, symbol in enumerate(symbol_list):
print("数据进度百分比:%s" % (index / len(symbol_list) * 100))
symbol_data = pd.DataFrame(data[data['ts_code'] == symbol])
symbol_data.reset_index(drop=True, inplace=True)
for func in indicator:
func(symbol_data)
# 将symbol_data 按照旧列和新列分成2个数据集
symbol_data_left = symbol_data[columns]
symbol_data_right = symbol_data[symbol_data.columns[~symbol_data.columns.isin(symbol_data_left.columns)]]
symbol_data_right.applymap(lambda x: round(float(x), 2) if isinstance(x, (int, float)) else x)
# 将新列数据集和旧列数据集合并
data1 = pd.merge(symbol_data_left, symbol_data_right, left_index=True, right_index=True)
return data1
# 训练测试工程
class trains_data_loader:
def __init__(self, path="../data/", module="data/"):
self.feature_dir = None
self.path = path
self.module = module
self.module_path = path + module
self.train_X = pd.DataFrame()
self.train_y = pd.DataFrame()
self.test_X = pd.DataFrame()
self.test_y = pd.DataFrame()
self.drop_column = []
def load_feature_dir(self, feature_dir):
self.feature_dir = feature_dir
def drop_columns(self, columns):
"""
删除指定列
:param columns:
:return:
"""
for column in columns:
self.drop_column.append(column)
def split_by_time(self, trains_start, trains_end, test_start, test_end):
"""
:param test_end:
:param test_start:
:param trains_end:
:param trains_start:
:param trans:
:param start:
:param end:
:return:
"""
self.train_X = pd.DataFrame()
self.train_y = pd.DataFrame()
self.test_X = pd.DataFrame()
self.test_y = pd.DataFrame()
file_list = os.listdir(self.module_path + self.feature_dir)
for index, file in enumerate(file_list):
print(f"读取进度:{(index / len(file_list)) * 100}")
data = pd.read_csv(self.module_path + self.feature_dir + file, encoding='utf-8')
if len(data) == 0:
continue
trains_x = data[(data['trade_date'] > trains_start) & (data['trade_date'] < trains_end)]
if len(trains_x) == 0:
continue
trains_y = trains_x['flag']
trains_x = trains_x.drop(self.drop_column, axis=1)
self.train_X = pd.concat([self.train_X, trains_x])
self.train_y = pd.concat([self.train_y, trains_y])
test_X = data[(data['trade_date'] > test_start) & (data['trade_date'] < test_end)]
if len(test_X) == 0:
continue
test_y = test_X['flag']
test_X = test_X.drop(self.drop_column, axis=1)
self.test_X = pd.concat([self.test_X, test_X])
self.test_y = pd.concat([self.test_y, test_y])
def obverse(self, mlflow):
pass
# mlflow.log_metric("train_label_1", len(self.train_X[self.train_X['flag'] == 1]) / len(self.train_X) * 100)
# mlflow.log_metric("train_label_0", len(self.train_X[self.train_X['flag'] == 1]) / len(self.train_X) * 100)
# mlflow.log_metric("train_label_-1", len(self.train_X[self.train_X['flag'] == 1]) / len(self.train_X) * 100)
def save(self, path=None):
"""
保存数据集
:param path:
:return:
"""
file_path = self.module_path if path is None else path
self.train_X.to_pickle(file_path + 'train_X.pkl')
self.train_y.to_pickle(file_path + 'train_y.pkl')
self.test_X.to_pickle(file_path + 'test_X.pkl')
self.test_y.to_pickle(file_path + 'test_y.pkl')
class backtrader_data_loader:
def __init__(self, path='../data/', module='data/', csv_data='dataset.csv'):
self.path = path
self.module = module
self.data_path = self.path + self.module
self.csv_data = csv_data
self.data = pd.DataFrame()
def get_data(self, start, end):
"""
复权前的数据+ 复权后的指标
:param start:
:param end:
:return:
"""
data = pd.read_csv(self.path + self.csv_data)
self.data = data[(data['trade_date'] > start) & (data['trade_date'] < end)]
self.data['open'] = round(self.data['open'] / self.data['adj_factor'], 2)
self.data['high'] = round(self.data['high'] / self.data['adj_factor'], 2)
self.data['low'] = round(self.data['low'] / self.data['adj_factor'], 2)
self.data['close'] = round(self.data['close'] / self.data['adj_factor'], 2)
self.data['amount'] = round(self.data['amount'] / self.data['adj_factor'], 2)
self.data.drop(['pre_close', 'change', 'pct_chg', 'flag'], axis=1, inplace=True)
self.data['trade_date'] = self.data['trade_date'].apply(lambda x: pd.to_datetime(x, format='%Y%m%d'))
self.data = self.data.rename(columns={'trade_date': 'datetime',
'vol': 'volume'
})
return self.data
def save(self, module, name):
path = self.path + module
if not os.path.exists(path):
os.mkdir(path)
self.data.to_csv(path + name, index=False, encoding='utf-8') | zwkit | /zwkit-0.4.34-py3-none-any.whl/data/dataset.py | dataset.py |
import backtrader as bt
import joblib
import pandas as pd
import os
from kit import date_kit
import datetime as dt
import zw_backtrader.trade.trade_detail as td
class base_strategy(bt.Strategy):
"""
策略基类
同时只能持有一只股票
卖了以后有10天冷静期
"""
params = (
('model_path', ""), # 模型路径
('log_path', '../data/backtrader/'), # 日志路径
('log_name', f'logs_{date_kit.now()}.log'), # 日志名称
('cold_date', 10) # 冷静期
)
def __init__(self):
# 买卖列表
self.position_list = dict()
# 拦截列表
self.filter_dict = dict()
# 日志集合
self.log_text = []
def log(self, txt, dt=None):
"""
Logging function fot this strategy
:param txt:
:param dt:
:return:
"""
dt = dt or self.datas[0].datetime.date(0)
self.log_text.append(f'{dt.isoformat()}: {txt}')
print(f'{dt.isoformat()}: {txt}')
def start(self):
"""
策略启动时执行
:return:
"""
self.model = joblib.load(self.params.model_path)
# def prenext(self):
# '''策略准备阶段,对应第1根bar ~ 第 min_period-1 根bar'''
# # 该函数主要用于等待指标计算,指标计算完成前都会默认调用prenext()空函数
# # min_period 就是 __init__ 中计算完成所有指标的第1个值所需的最小时间段
# print('prenext函数')
#
# def nextstart(self):
# '''策略正常运行的第一个时点,对应第 min_period 根bar'''
# # 只有在 __init__ 中所有指标都有值可用的情况下,才会开始运行策略
# # nextstart()只运行一次,主要用于告知后面可以开始启动 next() 了
# # nextstart()的默认实现是简单地调用next(),所以next中的策略逻辑从第 min_period根bar就已经开始执行
# print('nextstart函数')
def next(self):
# 获取每只股票的信息
# 并放入模型验证
for index in range(len(self.datas)):
self._next_execute(index)
def _next_execute(self, index):
symbol = self.datas[index]._name
date = self.datas[index].datetime.date(0)
self._sell_excute(date, index)
if self._predict_excute(index) == 1:
if self._filter_execute(symbol, date):
self._buy_excute(symbol, index)
else:
pass
# self.log(f'近期购买,过滤{symbol}')
else:
pass
# self.log(f'预测不买入{symbol}')
def _filter_execute(self, symbol, date):
# 过滤持仓
if self.position_list.get(symbol) is not None:
# 有持仓 不买入
return False
else:
# 过滤冷静期
filter_date = self.filter_dict.get(symbol)
if filter_date is not None:
if date < filter_date:
# 日期小于冷静期 不买入
return False
else:
# 日期大于冷静期 买入
# 删除冷静期
self.filter_dict.pop(symbol)
return True
return True
def _sell_excute(self, date, index):
for k, v in self.position_list.items():
if date > v.sell_date:
self.sell(data=self.datas[index], size=100, exectype=bt.Order.Market, price=self.datas[index].lines.close[0])
def _predict_excute(self, index):
data = self._get_data(datas=self.datas[index], index=index)
return self.model.predict(data)
def _buy_excute(self, symbol, index):
adj_factor = self.datas[index].lines.adj_factor[1]
# open = round(self.datas[index].lines.open[1] / adj_factor, 2)
# close = round(self.datas[index].lines.close[0] / adj_factor, 2)
# high = round(self.datas[index].lines.high[0] / adj_factor, 2)
# low = round(self.datas[index].lines.low[0] / adj_factor, 2)
self.buy(data=self.datas[index], size=100)
def _get_data(self, datas, index):
# 这个index非常必要熬
data = pd.DataFrame()
for v, k in enumerate(datas._colmapping):
if self.data.data_schema is not None:
if k not in self.data.data_schema:
continue
exec(f"data.loc[0,'{k}'] = self.datas[index].lines.{k}[0]")
return data
def stop(self):
"""
策略结束时执行
将日志进行保存
:return:
"""
if os.path.exists(self.params.log_path) is False:
os.mkdir(self.params.log_path)
with open(self.params.log_path + self.params.log_name, mode='w') as f:
f.write('\n'.join(self.log_text))
def notify_order(self, order):
"""
订单状态变化时执行
:param order:
:return:
"""
# 判断订单的状态
if order.status in [order.Submitted, order.Accepted]:
return
# 判断订单是否完成
if order.status in [order.Completed]:
if order.isbuy():
self.log(f'买入{order.data._name},价格:{order.executed.price},成本:{order.executed.value},手续费:{order.executed.comm}')
self.position_list.update({order.data._name: td.trade_detail(order.data.datetime.date(0), order.data._name, trade_id=order.ref,
sell_date=order.data.datetime.date(3))})
else:
self.log(f'卖出{order.data._name},价格:{order.executed.price},成本:{order.executed.value},手续费:{order.executed.comm}')
self.position_list.pop(order.data._name)
self.filter_dict[order.data._name] = order.data.datetime.date(0) + dt.timedelta(days=self.params.cold_date)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log(f'订单失败: {order.data._name} 状态: {order.getstatusname()}')
def notify_trade(self, trade):
"""
交易状态变化时执行
:param trade:
:return:
"""
if not trade.isclosed:
return
self.log(f'利润,毛利:{trade.pnl},净利:{trade.pnlcomm}')
# def notify_cashvalue(self, cash, value):
# """
# 资金变化时执行
# :param cash:
# :param value:
# :return:
# """
# self.log(f'资金:{cash},市值:{value}')
# def notify_fund(self, cash, value, fundvalue, shares):
# """
# 资金变化时执行
# :param cash:
# :param value:
# :param fundvalue:
# :param shares:
# :return:
# """
# self.log(f'资金:{cash},市值:{value},净值:{fundvalue},持仓:{shares}')
# class test_strategy(bt.Strategy):
# """
# 策略基类
# """
#
# def __init__(self):
# # 买卖列表
# self.buying_list = []
# self.selling_list = []
#
# def next(self):
# for index in range(len(self.datas)):
# data = self._get_data(datas=self.datas[index], index=index)
# print(data)
#
# # 进行预测
#
# def _get_data(self, datas, index):
# data = pd.DataFrame()
# for v, k in enumerate(datas._colmapping):
# if k is None:
# continue
# exec(f"data.loc[0,'{k}'] = self.datas[index].lines.{k}[0]")
# return data | zwkit | /zwkit-0.4.34-py3-none-any.whl/zw_backtrader/strategy/strategy.py | strategy.py |
import os
import platform
import sys
import struct
# Because ctypes is new from Python 2.5, so pytransform doesn't work
# before Python 2.5
#
from ctypes import cdll, c_char, c_char_p, c_int, c_void_p, \
pythonapi, py_object, PYFUNCTYPE, CFUNCTYPE
from fnmatch import fnmatch
#
# Support Platforms
#
plat_path = 'platforms'
plat_table = (
('windows', ('windows', 'cygwin-*')),
('darwin', ('darwin',)),
('ios', ('ios',)),
('linux', ('linux*',)),
('freebsd', ('freebsd*', 'openbsd*', 'isilon onefs')),
('poky', ('poky',)),
)
arch_table = (
('x86', ('i?86', )),
('x86_64', ('x64', 'x86_64', 'amd64', 'intel')),
('arm', ('armv5',)),
('armv6', ('armv6l',)),
('armv7', ('armv7l',)),
('ppc64', ('ppc64le',)),
('mips32', ('mips',)),
('aarch32', ('aarch32',)),
('aarch64', ('aarch64', 'arm64'))
)
#
# Hardware type
#
HT_HARDDISK, HT_IFMAC, HT_IPV4, HT_IPV6, HT_DOMAIN = range(5)
#
# Global
#
_pytransform = None
class PytransformError(Exception):
pass
def dllmethod(func):
def wrap(*args, **kwargs):
return func(*args, **kwargs)
return wrap
@dllmethod
def version_info():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('version_info', _pytransform))
return dlfunc()
@dllmethod
def init_pytransform():
major, minor = sys.version_info[0:2]
# Python2.5 no sys.maxsize but sys.maxint
# bitness = 64 if sys.maxsize > 2**32 else 32
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_void_p)
init_module = prototype(('init_module', _pytransform))
ret = init_module(major, minor, pythonapi._handle)
if (ret & 0xF000) == 0x1000:
raise PytransformError('Initialize python wrapper failed (%d)'
% (ret & 0xFFF))
return ret
@dllmethod
def init_runtime():
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
_init_runtime = prototype(('init_runtime', _pytransform))
return _init_runtime(0, 0, 0, 0)
@dllmethod
def encrypt_code_object(pubkey, co, flags, suffix=''):
_pytransform.set_option(6, suffix.encode())
prototype = PYFUNCTYPE(py_object, py_object, py_object, c_int)
dlfunc = prototype(('encrypt_code_object', _pytransform))
return dlfunc(pubkey, co, flags)
@dllmethod
def generate_license_file(filename, priname, rcode, start=-1, count=1):
prototype = PYFUNCTYPE(c_int, c_char_p, c_char_p, c_char_p, c_int, c_int)
dlfunc = prototype(('generate_project_license_files', _pytransform))
return dlfunc(filename.encode(), priname.encode(), rcode.encode(),
start, count) if sys.version_info[0] == 3 \
else dlfunc(filename, priname, rcode, start, count)
@dllmethod
def generate_license_key(prikey, keysize, rcode):
prototype = PYFUNCTYPE(py_object, c_char_p, c_int, c_char_p)
dlfunc = prototype(('generate_license_key', _pytransform))
return dlfunc(prikey, keysize, rcode) if sys.version_info[0] == 2 \
else dlfunc(prikey, keysize, rcode.encode())
@dllmethod
def get_registration_code():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('get_registration_code', _pytransform))
return dlfunc()
@dllmethod
def get_expired_days():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('get_expired_days', _pytransform))
return dlfunc()
@dllmethod
def clean_obj(obj, kind):
prototype = PYFUNCTYPE(c_int, py_object, c_int)
dlfunc = prototype(('clean_obj', _pytransform))
return dlfunc(obj, kind)
def clean_str(*args):
tdict = {
'str': 0,
'bytearray': 1,
'unicode': 2
}
for obj in args:
k = tdict.get(type(obj).__name__)
if k is None:
raise RuntimeError('Can not clean object: %s' % obj)
clean_obj(obj, k)
def get_hd_info(hdtype, name=None):
if hdtype not in range(HT_DOMAIN + 1):
raise RuntimeError('Invalid parameter hdtype: %s' % hdtype)
size = 256
t_buf = c_char * size
buf = t_buf()
cname = c_char_p(0 if name is None
else name.encode('utf-8') if hasattr('name', 'encode')
else name)
if (_pytransform.get_hd_info(hdtype, buf, size, cname) == -1):
raise PytransformError('Get hardware information failed')
return buf.value.decode()
def show_hd_info():
return _pytransform.show_hd_info()
def assert_armored(*names):
prototype = PYFUNCTYPE(py_object, py_object)
dlfunc = prototype(('assert_armored', _pytransform))
def wrapper(func):
def wrap_execute(*args, **kwargs):
dlfunc(names)
return func(*args, **kwargs)
return wrap_execute
return wrapper
def check_armored(*names):
try:
prototype = PYFUNCTYPE(py_object, py_object)
prototype(('assert_armored', _pytransform))(names)
return True
except RuntimeError:
return False
def get_license_info():
info = {
'ISSUER': None,
'EXPIRED': None,
'HARDDISK': None,
'IFMAC': None,
'IFIPV4': None,
'DOMAIN': None,
'DATA': None,
'CODE': None,
}
rcode = get_registration_code().decode()
if rcode.startswith('*VERSION:'):
index = rcode.find('\n')
info['ISSUER'] = rcode[9:index].split('.')[0].replace('-sn-1.txt', '')
rcode = rcode[index+1:]
index = 0
if rcode.startswith('*TIME:'):
from time import ctime
index = rcode.find('\n')
info['EXPIRED'] = ctime(float(rcode[6:index]))
index += 1
if rcode[index:].startswith('*FLAGS:'):
index += len('*FLAGS:') + 1
info['FLAGS'] = ord(rcode[index - 1])
prev = None
start = index
for k in ['HARDDISK', 'IFMAC', 'IFIPV4', 'DOMAIN', 'FIXKEY', 'CODE']:
index = rcode.find('*%s:' % k)
if index > -1:
if prev is not None:
info[prev] = rcode[start:index]
prev = k
start = index + len(k) + 2
info['CODE'] = rcode[start:]
i = info['CODE'].find(';')
if i > 0:
info['DATA'] = info['CODE'][i+1:]
info['CODE'] = info['CODE'][:i]
return info
def get_license_code():
return get_license_info()['CODE']
def get_user_data():
return get_license_info()['DATA']
def _match_features(patterns, s):
for pat in patterns:
if fnmatch(s, pat):
return True
def _gnu_get_libc_version():
try:
prototype = CFUNCTYPE(c_char_p)
ver = prototype(('gnu_get_libc_version', cdll.LoadLibrary('')))()
return ver.decode().split('.')
except Exception:
pass
def format_platform(platid=None):
if platid:
return os.path.normpath(platid)
plat = platform.system().lower()
mach = platform.machine().lower()
for alias, platlist in plat_table:
if _match_features(platlist, plat):
plat = alias
break
if plat == 'linux':
cname, cver = platform.libc_ver()
if cname == 'musl':
plat = 'musl'
elif cname == 'libc':
plat = 'android'
elif cname == 'glibc':
v = _gnu_get_libc_version()
if v and len(v) >= 2 and (int(v[0]) * 100 + int(v[1])) < 214:
plat = 'centos6'
for alias, archlist in arch_table:
if _match_features(archlist, mach):
mach = alias
break
if plat == 'windows' and mach == 'x86_64':
bitness = struct.calcsize('P'.encode()) * 8
if bitness == 32:
mach = 'x86'
return os.path.join(plat, mach)
# Load _pytransform library
def _load_library(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
path = os.path.dirname(__file__) if path is None \
else os.path.normpath(path)
plat = platform.system().lower()
for alias, platlist in plat_table:
if _match_features(platlist, plat):
plat = alias
break
name = '_pytransform' + suffix
if plat == 'linux':
filename = os.path.abspath(os.path.join(path, name + '.so'))
elif plat in ('darwin', 'ios'):
filename = os.path.join(path, name + '.dylib')
elif plat == 'windows':
filename = os.path.join(path, name + '.dll')
elif plat in ('freebsd', 'poky'):
filename = os.path.join(path, name + '.so')
else:
filename = None
if platid is not None and os.path.isfile(platid):
filename = platid
elif platid is not None or not os.path.exists(filename) or not is_runtime:
libpath = platid if platid is not None and os.path.isabs(platid) else \
os.path.join(path, plat_path, format_platform(platid))
filename = os.path.join(libpath, os.path.basename(filename))
if filename is None:
raise PytransformError('Platform %s not supported' % plat)
if not os.path.exists(filename):
raise PytransformError('Could not find "%s"' % filename)
try:
m = cdll.LoadLibrary(filename)
except Exception as e:
if sys.flags.debug:
print('Load %s failed:\n%s' % (filename, e))
raise
# Removed from v4.6.1
# if plat == 'linux':
# m.set_option(-1, find_library('c').encode())
if not os.path.abspath('.') == os.path.abspath(path):
m.set_option(1, path.encode() if sys.version_info[0] == 3 else path)
# Required from Python3.6
m.set_option(2, sys.byteorder.encode())
if sys.flags.debug:
m.set_option(3, c_char_p(1))
m.set_option(4, c_char_p(not is_runtime))
# Disable advanced mode by default
m.set_option(5, c_char_p(not advanced))
# Set suffix for private package
if suffix:
m.set_option(6, suffix.encode())
return m
def pyarmor_init(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
global _pytransform
_pytransform = _load_library(path, is_runtime, platid, suffix, advanced)
return init_pytransform()
def pyarmor_runtime(path=None, suffix='', advanced=0):
if _pytransform is not None:
return
try:
pyarmor_init(path, is_runtime=1, suffix=suffix, advanced=advanced)
init_runtime()
except Exception as e:
if sys.flags.debug or hasattr(sys, '_catch_pyarmor'):
raise
sys.stderr.write("%s\n" % str(e))
sys.exit(1)
# ----------------------------------------------------------
# End of pytransform
# ----------------------------------------------------------
#
# Not available from v5.6
#
def generate_capsule(licfile):
prikey, pubkey, prolic = _generate_project_capsule()
capkey, newkey = _generate_pytransform_key(licfile, pubkey)
return prikey, pubkey, capkey, newkey, prolic
@dllmethod
def _generate_project_capsule():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('generate_project_capsule', _pytransform))
return dlfunc()
@dllmethod
def _generate_pytransform_key(licfile, pubkey):
prototype = PYFUNCTYPE(py_object, c_char_p, py_object)
dlfunc = prototype(('generate_pytransform_key', _pytransform))
return dlfunc(licfile.encode() if sys.version_info[0] == 3 else licfile,
pubkey)
#
# Deprecated functions from v5.1
#
@dllmethod
def encrypt_project_files(proname, filelist, mode=0):
prototype = PYFUNCTYPE(c_int, c_char_p, py_object, c_int)
dlfunc = prototype(('encrypt_project_files', _pytransform))
return dlfunc(proname.encode(), filelist, mode)
def generate_project_capsule(licfile):
prikey, pubkey, prolic = _generate_project_capsule()
capkey = _encode_capsule_key_file(licfile)
return prikey, pubkey, capkey, prolic
@dllmethod
def _encode_capsule_key_file(licfile):
prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
dlfunc = prototype(('encode_capsule_key_file', _pytransform))
return dlfunc(licfile.encode(), None)
@dllmethod
def encrypt_files(key, filelist, mode=0):
t_key = c_char * 32
prototype = PYFUNCTYPE(c_int, t_key, py_object, c_int)
dlfunc = prototype(('encrypt_files', _pytransform))
return dlfunc(t_key(*key), filelist, mode)
@dllmethod
def generate_module_key(pubname, key):
t_key = c_char * 32
prototype = PYFUNCTYPE(py_object, c_char_p, t_key, c_char_p)
dlfunc = prototype(('generate_module_key', _pytransform))
return dlfunc(pubname.encode(), t_key(*key), None)
#
# Compatible for PyArmor v3.0
#
@dllmethod
def old_init_runtime(systrace=0, sysprofile=1, threadtrace=0, threadprofile=1):
'''Only for old version, before PyArmor 3'''
pyarmor_init(is_runtime=1)
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
_init_runtime = prototype(('init_runtime', _pytransform))
return _init_runtime(systrace, sysprofile, threadtrace, threadprofile)
@dllmethod
def import_module(modname, filename):
'''Only for old version, before PyArmor 3'''
prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
_import_module = prototype(('import_module', _pytransform))
return _import_module(modname.encode(), filename.encode())
@dllmethod
def exec_file(filename):
'''Only for old version, before PyArmor 3'''
prototype = PYFUNCTYPE(c_int, c_char_p)
_exec_file = prototype(('exec_file', _pytransform))
return _exec_file(filename.encode()) | zwlib | /zwlib-1.2.0.tar.gz/zwlib-1.2.0/pytransform/__init__.py | __init__.py |
from __future__ import annotations
import datetime as dt
import typing as t
from collections import deque
from dataclasses import dataclass
from enum import Enum, StrEnum, auto
from textwrap import dedent
from parsimonious.grammar import Grammar
from parsimonious.nodes import Node, NodeVisitor
RAW_GRAMMAR = r"""
workout = ((comment / block) elws*)+ / elws
block = tag ws "{" ((comment / params) / elws)+ "}"
params = (message / value) ","?
value = tag ws (string / range / rangeval)
message = "@" ws duration ws string
range = rangeval ws "->" ws rangeval
rangeval = duration / numeric / zone
duration = number ":" number
percent = number "%"
zone = ("Z" number) / "SS"
numeric = percent / number
elws = ws / emptyline
comment = ~r"\;[^\r\n]*"
tag = ~"[A-Z_]+"
string = ~'"[^\"]+"'
number = ~"\d+"
ws = ~"\s*"
emptyline = ws+
"""
GRAMMAR = Grammar(RAW_GRAMMAR)
class Tag(StrEnum):
AUTHOR = auto()
CADENCE = auto()
COOLDOWN = auto()
DESCRIPTION = auto()
DURATION = auto()
FREE = auto()
FTP = auto()
INTERVALS = auto()
META = auto()
NAME = auto()
POWER = auto()
RAMP = auto()
REPEAT = auto()
SEGMENT = auto()
TAGS = auto()
WARMUP = auto()
MESSAGES = auto() # Included for tidier housekeeping, not a valid keyword in the ZWO file
# Repeat segment metasyntax
START_REPEAT = auto()
END_REPEAT = auto()
@dataclass(frozen=True, slots=True)
class Percentage:
value: int
def __str__(self) -> str:
return f"{self.value / 100:0.3f}"
@classmethod
def from_node(cls, node: Node) -> Percentage:
return cls(value=int(node.text.rstrip("%")))
class PowerZone(Enum):
Z1 = Percentage(value=50)
Z2 = Percentage(value=65)
Z3 = Percentage(value=81)
SS = Percentage(value=90)
Z4 = Percentage(value=95)
Z5 = Percentage(value=109)
Z6 = Percentage(value=125)
Z7 = Percentage(value=150)
def __str__(self) -> str:
return str(self.value)
@dataclass(frozen=True, slots=True)
class Duration:
value: dt.timedelta
def __str__(self) -> str:
return str(int(self.value.total_seconds()))
@classmethod
def from_node(cls, node: Node) -> Duration:
minutes, seconds = (int(chunk) for chunk in node.text.split(":"))
return cls(value=dt.timedelta(minutes=minutes, seconds=seconds))
RANGE_T = Percentage | Duration | PowerZone | int
@dataclass(frozen=True, slots=True)
class Range:
left: RANGE_T
right: RANGE_T
@classmethod
def from_node(cls, visited_children: list[Node]) -> Range:
left, *_, right = visited_children
# I'm not sure how to best keep the numeric values from nesting, I might be misunderstanding
# how the parser is working or have something written poorly in the grammar but for now this
# hack functions
if isinstance(left, list):
left = left[0]
if isinstance(right, list):
right = right[0]
return cls(left=left, right=right)
@dataclass(frozen=True, slots=True)
class Message:
timestamp: Duration
message: str
@classmethod
def from_node(cls, visited_children: list[Node]) -> Message:
_, _, timestamp, _, message = visited_children
return cls(timestamp=timestamp, message=message)
T = t.TypeVar("T")
def deep_flatten(in_iter: list, key_type: type[T]) -> t.Generator[T, None, None]:
"""Accept an arbitrary list of lists and yield objects of the matching data type."""
# Use a deque as an iterator stack to keep track of any nested iterables
iterators = deque((iter(in_iter),))
# Iterate over the elements of each iterable & add them to the stack if they're also a list,
# otherwise yield only dicts & then pop the iterable once exhausted
while iterators:
for item in iterators[-1]:
if isinstance(item, list):
iterators.append(iter(item))
break
elif isinstance(item, key_type):
yield item
else:
iterators.pop()
VAL_T = int | str | Percentage | Duration | Range | list[Message] | None
PARAM_T = dict[Tag, VAL_T]
BLOCK_T = dict[Tag, PARAM_T]
class ZWOVisitor(NodeVisitor):
grammar = GRAMMAR
# Indices of visited_children are determined by the grammar specification
def visit_workout(self, node: Node, visited_children: list[Node]) -> list[BLOCK_T]:
# Catch an empty document
if not node.text.strip():
return []
blocks = []
for chunk in visited_children[0]:
# The grammar here matches comments or blocks, if there are no dictionaries then we
# have a comment, which we just discard
if block := list(deep_flatten(chunk, key_type=dict)):
blocks.append(block[0])
return blocks
def visit_block(self, node: Node, visited_children: list[Node]) -> BLOCK_T:
tag = visited_children[0]
params = list(deep_flatten(visited_children[-2], key_type=dict))
block_messages = list(deep_flatten(visited_children[-2], key_type=Message))
block_params: BLOCK_T = {tag: {key: val for param in params for key, val in param.items()}}
block_params[Tag.MESSAGES] = block_messages if block_messages else None # type: ignore[assignment] # noqa: E501
return block_params
def visit_value(self, node: Node, visited_children: list[Node]) -> PARAM_T:
tag, _, value = visited_children
# I'm not sure how to best keep the numeric values from nesting, I might be misunderstanding
# how the parser is working or have something written poorly in the grammar but for now this
# hack functions
val = value[0]
if isinstance(val, list):
val = val[0]
return {tag: val}
def visit_string(self, node: Node, visited_children: list[Node]) -> str:
return dedent(node.text.strip('"'))
def visit_range(self, node: Node, visited_children: list[Node]) -> Range:
return Range.from_node(visited_children)
def visit_duration(self, node: Node, visited_children: list[Node]) -> Duration:
return Duration.from_node(node)
def visit_tag(self, node: Node, visited_children: list[Node]) -> Tag:
return Tag[node.text]
def visit_message(self, node: Node, visited_children: list[Node]) -> Message:
return Message.from_node(visited_children)
def visit_numeric(self, node: Node, visited_children: list[Node]) -> int | Percentage:
return visited_children[0] # type: ignore[no-any-return]
def visit_number(self, node: Node, visited_children: list[Node]) -> int:
return int(node.text)
def visit_percent(self, node: Node, visited_children: list[Node]) -> Percentage:
return Percentage.from_node(node)
def visit_zone(self, node: Node, visited_children: list[Node]) -> PowerZone:
return PowerZone[node.text]
def generic_visit(self, node: Node, visited_children: list[Node]) -> list[Node] | Node:
return visited_children or node
def parse_src(src: str) -> list[BLOCK_T]:
"""Parse the provided source into a list of raw workout blocks."""
tree = ZWOVisitor.grammar.parse(src)
visitor = ZWOVisitor()
parsed: list[BLOCK_T] = visitor.visit(tree)
return parsed | zwolang | /zwolang-0.3.0-py3-none-any.whl/zwo/parser.py | parser.py |
from collections import abc
from zwo.parser import BLOCK_T, PARAM_T, Range, Tag, VAL_T
class ZWOMValidationError(BaseException): # noqa: D101
...
def _check_keys(required: set[Tag], check_tags: abc.KeysView[Tag], block_tag: Tag) -> bool:
missing = required - check_tags
if missing:
pretty_tags = ", ".join(tag.upper() for tag in missing)
raise ZWOMValidationError(f"{block_tag.upper()} block missing required keys: {pretty_tags}")
return True
class ZWOMValidator:
raw_blocks: list[BLOCK_T]
validated_blocks: list[BLOCK_T]
_ftp: int | None
_in_repeat: bool
_n_repeats: int
__slots__ = ("raw_blocks", "validated_blocks", "_ftp", "_in_repeat", "_n_repeats")
def __init__(self, raw_blocks: list[BLOCK_T]) -> None:
self.raw_blocks = raw_blocks
self._ftp = None
self.validated_blocks = self.validate_scanned()
def validate_scanned(self) -> list[BLOCK_T]:
if Tag.META not in self.raw_blocks[0]:
raise ZWOMValidationError("ZWOM file must begin with a META block")
self.visit_meta_block(self.raw_blocks[0][Tag.META], Tag.META)
# To account for expansion of any chunk repeats we need to build a new list
# Initialize it with the META block since we skip it in the rest of the validation
validated_blocks: list[BLOCK_T] = [self.raw_blocks[0]]
repeat_blocks = []
self._in_repeat = False
for block in self.raw_blocks[1:]:
# Blocks only have one key, so we can dispatch validators using the first key
block_tag = next(iter(block))
params = block[block_tag]
match block_tag:
case Tag.FREE | Tag.SEGMENT:
self.visit_segment_block(params, block_tag)
case Tag.RAMP | Tag.WARMUP | Tag.COOLDOWN:
self.visit_ramp_block(params, block_tag)
case Tag.INTERVALS:
self.visit_interval_block(params, block_tag)
case Tag.START_REPEAT:
self.visit_start_repeat_block(params, block_tag)
case Tag.END_REPEAT:
self.visit_end_repeat_block(params, block_tag)
case _:
raise ZWOMValidationError(f"Unknown workout tag: '{block_tag}'")
# Dispatch any additional generic parameter validation within the block
for param, val in params.items():
match param:
case Tag.POWER:
self.visit_power(val)
case Tag.CADENCE:
self.visit_cadence(val, block_tag)
case _:
continue
if self._in_repeat:
# Don't include the repeat metablocks in the final output
if block_tag != Tag.START_REPEAT:
repeat_blocks.append(block)
else:
# Check to see if we've just hit the END_REPEAT tag & dump the blocks accordingly
if repeat_blocks:
validated_blocks.extend(repeat_blocks * self._n_repeats)
repeat_blocks.clear()
self._n_repeats = -1
else:
# Don't include the repeat metablocks in the final output
if block_tag != Tag.END_REPEAT:
validated_blocks.append(block)
# Make sure the chunk repeat block was closed
if self._in_repeat:
raise ZWOMValidationError("START_REPEAT is missing a matching END_REPEAT.")
return validated_blocks
def visit_meta_block(self, params: PARAM_T, block_tag: Tag) -> None:
required_tags = {Tag.NAME, Tag.AUTHOR, Tag.DESCRIPTION}
_check_keys(required_tags, params.keys(), block_tag)
ftp = params.get(Tag.FTP)
if ftp is not None:
if isinstance(ftp, int):
if ftp == 0: # The parser already won't accept negative numbers
raise ZWOMValidationError(f"FTP must be > 0, received: {ftp}")
self._ftp = ftp
else:
raise ZWOMValidationError(
f"FTP must be a positive integer, received: '{type(ftp).__name__}'"
)
def visit_segment_block(self, params: PARAM_T, block_tag: Tag) -> None:
required_tags = {Tag.DURATION}
if block_tag == Tag.SEGMENT:
required_tags = required_tags | {Tag.POWER}
_check_keys(required_tags, params.keys(), block_tag)
def visit_ramp_block(self, params: PARAM_T, block_tag: Tag) -> None:
required_tags = {Tag.DURATION, Tag.POWER}
_check_keys(required_tags, params.keys(), block_tag)
def visit_interval_block(self, params: PARAM_T, block_tag: Tag) -> None:
required_tags = {Tag.REPEAT, Tag.DURATION, Tag.POWER}
_check_keys(required_tags, params.keys(), block_tag)
def visit_start_repeat_block(self, params: PARAM_T, block_tag: Tag) -> None:
if self._in_repeat:
raise ZWOMValidationError("Nested block chunk repetition is not supported.")
required_tags = {Tag.REPEAT}
_check_keys(required_tags, params.keys(), block_tag)
n_repeats = params[Tag.REPEAT] # If we're here we know the key is there
if not isinstance(n_repeats, int):
raise ZWOMValidationError("START_REPEAT must have an integer REPEAT value.")
if n_repeats == 0:
raise ZWOMValidationError("REPEAT must be > 0.")
self._in_repeat = True
self._n_repeats = n_repeats
def visit_end_repeat_block(self, params: PARAM_T, block_tag: Tag) -> None:
if not self._in_repeat:
raise ZWOMValidationError("Missing opening START_REPEAT block.")
self._in_repeat = False
def visit_power(self, power_spec: VAL_T) -> None:
# Validate that an FTP is set in order to use absolute watts
if isinstance(power_spec, int):
if power_spec == 0: # The parser already won't accept negative numbers
raise ZWOMValidationError(f"Power must be > 0, received: {power_spec}")
if not self._ftp:
raise ZWOMValidationError(
"An FTP must be specified in the META block to use absolute watts."
)
elif isinstance(power_spec, Range):
if not self._ftp:
if isinstance(power_spec.left, int) or isinstance(power_spec.right, int):
raise ZWOMValidationError(
"An FTP must be specified in the META block to use absolute watts."
)
def visit_cadence(self, cadence_spec: VAL_T, block_tag: Tag) -> None:
# Cadence range is only valid for use in an interval block
if isinstance(cadence_spec, Range) and block_tag != Tag.INTERVALS:
raise ZWOMValidationError("Cadence ranges are only valid for Interval blocks.")
if block_tag == Tag.INTERVALS and not isinstance(cadence_spec, Range):
raise ZWOMValidationError("Cadence spec for Interval blocks must be a range.") | zwolang | /zwolang-0.3.0-py3-none-any.whl/zwo/interpreter.py | interpreter.py |
from dataclasses import dataclass
from io import StringIO
from pathlib import Path
from xml.dom import minidom
from zwo.interpreter import ZWOMValidator
from zwo.parser import (
BLOCK_T,
Duration,
Message,
PARAM_T,
Percentage,
PowerZone,
Range,
Tag,
parse_src,
)
STATIC_META_PARAMS = {"sportType": "bike"}
BLOCK_MAPPING = {
Tag.COOLDOWN: "Cooldown",
Tag.FREE: "FreeRide",
Tag.INTERVALS: "IntervalsT",
Tag.RAMP: "Ramp",
Tag.SEGMENT: "SteadyState",
Tag.WARMUP: "WarmUp",
}
@dataclass(slots=True)
class Workout:
blocks: list[BLOCK_T]
ftp: int | None = None
def to_zwo(self, out_filepath: Path) -> None:
doc = minidom.Document()
root = doc.createElement("workout_file")
doc.appendChild(root)
# If we're here then we've validate that the meta tag is the first block
doc = self.serialize_meta(doc, root, self.blocks[0][Tag.META])
doc = self.serialize_workout_blocks(doc, root, self.blocks[1:])
# Drop encoding line before writing the XML, Zwift doesn't use it
buff = StringIO()
buff.write(doc.toprettyxml(indent=" " * 4))
buff.seek(0)
_ = buff.readline()
out_filepath.write_text(buff.read())
def serialize_meta(
self, doc: minidom.Document, root: minidom.Element, meta_block: PARAM_T
) -> minidom.Document:
for tag, val in meta_block.items():
if tag == Tag.FTP:
continue
tmp = doc.createElement(tag)
if tag == Tag.TAGS:
if not isinstance(val, str):
raise ValueError("Type narrowing, shouldn't be able to get here")
for hashtag in val.split():
sub_tag = doc.createElement("tag")
sub_tag.setAttribute("name", hashtag.lstrip("#"))
tmp.appendChild(sub_tag)
else:
if tag == Tag.DESCRIPTION:
if not isinstance(val, str):
raise ValueError("Type narrowing, shouldn't be able to get here")
tmp.appendChild(doc.createTextNode(val))
root.appendChild(tmp)
# Add any remaining static parameters that Zwift is expecting
for element, val in STATIC_META_PARAMS.items():
tmp = doc.createElement(element)
tmp.appendChild(doc.createTextNode(val))
root.appendChild(tmp)
return doc
def serialize_workout_blocks(
self, doc: minidom.Document, root: minidom.Element, blocks: list[BLOCK_T]
) -> minidom.Document:
workout = doc.createElement("workout")
root.appendChild(workout)
n_blocks = len(blocks)
for idx, block in enumerate(blocks, start=1):
# Blocks only have one key, so we can dispatch serializers using the first key
block_tag = next(iter(block))
params = block[block_tag]
match block_tag:
case Tag.FREE:
block_element = self._build_simple_block(
doc, BLOCK_MAPPING[block_tag], params, add_flat_road=True
)
case Tag.SEGMENT:
block_element = self._build_simple_block(
doc, BLOCK_MAPPING[block_tag], params, add_power=True, add_pace=True
)
case Tag.RAMP | Tag.WARMUP | Tag.COOLDOWN:
zwift_key = _classify_ramp_type(idx, n_blocks)
block_element = self._build_simple_block(doc, zwift_key, params, add_pace=True)
block_element = self.serialize_ramp(block_element, params)
case Tag.INTERVALS:
block_element = self._build_simple_block(
doc,
BLOCK_MAPPING[block_tag],
params,
add_duration=False,
add_cadence=False, # Unlike the other blocks, intervals have a range
add_pace=True,
)
block_element = self.serialize_interval(block_element, params)
case _:
...
if messages := block.get(Tag.MESSAGES):
if not isinstance(messages, list):
raise ValueError("Type narrowing, shouldn't be able to get here")
block_element = self.serialize_messages(doc, block_element, messages)
workout.appendChild(block_element)
return doc
def _build_simple_block(
self,
doc: minidom.Document,
zwift_key: str,
params: PARAM_T,
add_duration: bool = True,
add_cadence: bool = True,
add_power: bool = False,
add_flat_road: bool = False,
add_pace: bool = False,
) -> minidom.Element:
block_element: minidom.Element = doc.createElement(zwift_key)
if add_duration:
block_element.setAttribute("Duration", str(params[Tag.DURATION]))
if add_cadence and (cadence := params.get(Tag.CADENCE)):
block_element.setAttribute("Cadence", str(cadence))
if add_power:
power = params[Tag.POWER]
if not isinstance(power, (int, Percentage, PowerZone)):
raise ValueError("Type narrowing, shouldn't be able to get here")
block_element.setAttribute("Power", self.serialize_power(power))
if add_flat_road:
block_element.setAttribute("FlatRoad", "0")
if add_pace:
block_element.setAttribute("pace", "0")
return block_element
def serialize_ramp(self, block_element: minidom.Element, params: PARAM_T) -> minidom.Element:
power_range = params[Tag.POWER]
if not isinstance(power_range, Range):
raise ValueError("Type narrowing, shouldn't be able to get here")
if isinstance(power_range.left, Duration) or isinstance(power_range.right, Duration):
raise ValueError("Type narrowing, shouldn't be able to get here")
block_element.setAttribute("PowerLow", self.serialize_power(power_range.left))
block_element.setAttribute("PowerHigh", self.serialize_power(power_range.right))
return block_element
def serialize_interval(
self, block_element: minidom.Element, params: PARAM_T
) -> minidom.Element:
block_element.setAttribute("Repeat", str(params[Tag.REPEAT]))
duration_range = params[Tag.DURATION]
if not isinstance(duration_range, Range):
raise ValueError("Type narrowing, shouldn't be able to get here")
block_element.setAttribute("OnDuration", str(duration_range.left))
block_element.setAttribute("OffDuration", str(duration_range.right))
power_range = params[Tag.POWER]
if not isinstance(power_range, Range):
raise ValueError("Type narrowing, shouldn't be able to get here")
if isinstance(power_range.left, Duration) or isinstance(power_range.right, Duration):
raise ValueError("Type narrowing, shouldn't be able to get here")
block_element.setAttribute("OnPower", self.serialize_power(power_range.left))
block_element.setAttribute("OffPower", self.serialize_power(power_range.right))
cadence_range = params.get(Tag.CADENCE)
if not isinstance(cadence_range, Range):
raise ValueError("Type narrowing, shouldn't be able to get here")
if cadence_range:
block_element.setAttribute("Cadence", str(cadence_range.left))
block_element.setAttribute("CadenceResting", str(cadence_range.right))
return block_element
def serialize_messages(
self, doc: minidom.Document, root: minidom.Element, messages: list[Message]
) -> minidom.Element:
for message in messages:
msg = doc.createElement("textevent")
msg.setAttribute("timeoffset", str(message.timestamp))
msg.setAttribute("message", message.message)
root.appendChild(msg)
return root
def serialize_power(self, power: int | Percentage | PowerZone) -> str:
if isinstance(power, int):
if self.ftp is None:
raise ValueError("Type narrowing, shouldn't be able to get here")
return str(power / self.ftp)
else:
return str(power)
def _classify_ramp_type(block_idx: int, n_blocks: int) -> str:
"""
Locate the appropriate Zwift block tag for the provided ramp block location.
While there is no specific Ramp block in the workout building UI, some experimental observations
have been made:
* If a ramp is at the very beginning of the workout, Zwift serializes it as a Warmup block
* If there are multiple blocks in a workout and a ramp is at the end, Zwift serializes it
as a Cooldown block
* If there are multiple blocks in a workout and a ramp is not at the beginning or the end,
Zwift serializes it as a Ramp block
"""
if block_idx == 1:
return BLOCK_MAPPING[Tag.WARMUP]
if block_idx == n_blocks:
return BLOCK_MAPPING[Tag.COOLDOWN]
else:
return BLOCK_MAPPING[Tag.RAMP]
def convert_zwom(zwom_filepath: Path, out_filepath: Path | None = None) -> None:
"""
Validate and convert the provided ZWOM file to ZWO.
If no `out_filepath` is provided, the resulting ZWO file is written to the same directory as the
input ZWOM file.
NOTE: Any existing ZWO files sharing the specified name will be overwritten.
"""
if out_filepath is None:
out_filepath = zwom_filepath.with_suffix(".zwo")
blocks = parse_src(zwom_filepath.read_text())
val = ZWOMValidator(blocks)
wo = Workout(val.validated_blocks, val._ftp)
wo.to_zwo(out_filepath) | zwolang | /zwolang-0.3.0-py3-none-any.whl/zwo/serialize.py | serialize.py |
# zwsp-steg-py
Zero-Width Space Steganography. Encodes and decodes hidden messages as non printable/readable characters.
This repository is a Python porting of [zwsp-steg-js](https://github.com/offdev/zwsp-steg-js).
All credits to [offdev](https://github.com/offdev)!
### Installation
```bash
$ pip install zwsp-steg-py
```
### Usage Example
```.py
import zwsp_steg
encoded = zwsp_steg.encode('hidden message')
decoded = zwsp_steg.decode(encoded)
print(decoded) # hidden message
```
Note that decoding a message will ignore all non 'special' characters. That means if you hide your message within a readable string, and decode the whole string, you will only return the hidden message.
### Parameters
You can use different sets of characters in different encoding / decoding modes.
```.py
import zwsp_steg
zwsp_steg.encode('hidden message', zwsp_steg.MODE_ZWSP)
zwsp_steg.encode('hidden message', zwsp_steg.MODE_FULL)
```
#### Character sets used
- **MODE_ZWSP**: Zero-Width Space (\u200b), Zero-Width Non-Joiner (\u200c), Zero-Width Joiner (\u200d)
- **MODE_FULL**: All MODE_ZWSP characters, Left-To-Right Mark (\u200e), Right-To-Left Mark (\u200f)
### License
[Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0)
| zwsp-steg-py | /zwsp-steg-py-0.2.0.tar.gz/zwsp-steg-py-0.2.0/README.md | README.md |
MODE_ZWSP = 0
MODE_FULL = 1
ZERO_WIDTH_SPACE = '\u200b'
ZERO_WIDTH_NON_JOINER = '\u200c'
ZERO_WIDTH_JOINER = '\u200d'
LEFT_TO_RIGHT_MARK = '\u200e'
RIGHT_TO_LEFT_MARK = '\u200f'
list_ZWSP = [
ZERO_WIDTH_SPACE,
ZERO_WIDTH_NON_JOINER,
ZERO_WIDTH_JOINER,
]
list_FULL = [
ZERO_WIDTH_SPACE,
ZERO_WIDTH_NON_JOINER,
ZERO_WIDTH_JOINER,
LEFT_TO_RIGHT_MARK,
RIGHT_TO_LEFT_MARK,
]
def get_padding_length(mode):
return 11 if mode == MODE_ZWSP else 7 # Keep padding as small as possible
def to_base(num, b, numerals='0123456789abcdefghijklmnopqrstuvwxyz'):
"""
Python implementation of number.toString(radix)
Thanks to jellyfishtree from https://stackoverflow.com/a/2267428
"""
return ((num == 0) and numerals[0]) or (to_base(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])
def encode(message, mode=MODE_FULL):
if not isinstance(message, str):
raise TypeError('Cannot encode {0}'.format(type(message).__name__))
alphabet = list_ZWSP if mode == MODE_ZWSP else list_FULL
padding = get_padding_length(mode)
encoded = ''
if (len(message) == 0):
return ''
for message_char in message:
code = '{0}{1}'.format('0' * padding, int(str(to_base(ord(message_char), len(alphabet)))))
code = code[len(code) - padding:]
for code_char in code:
index = int(code_char)
encoded = encoded + alphabet[index]
return encoded
def decode(message, mode=MODE_FULL):
if not isinstance(message, str):
raise TypeError('Cannot decode {0}'.format(type(message).__name__))
alphabet = list_ZWSP if mode == MODE_ZWSP else list_FULL
padding = get_padding_length(mode)
encoded = ''
decoded = ''
for message_char in message:
if message_char in alphabet:
encoded = encoded + str(alphabet.index(message_char))
if (len(encoded) % padding != 0):
raise TypeError('Unknown encoding detected!')
cur_encoded_char = ''
for index, encoded_char in enumerate(encoded):
cur_encoded_char = cur_encoded_char + encoded_char
if index > 0 and (index + 1) % padding == 0:
decoded = decoded + chr(int(cur_encoded_char, len(alphabet)))
cur_encoded_char = ''
return decoded | zwsp-steg-py | /zwsp-steg-py-0.2.0.tar.gz/zwsp-steg-py-0.2.0/zwsp_steg/steganography.py | steganography.py |
__all__ = ['module', 'Identity', 'Lambda', 'PartialLambda', 'Flatten', 'View', 'ResizeBatch', 'Debugger',
'sigmoid_range', 'SigmoidRange', 'AdaptiveConcatPool1d', 'AdaptiveConcatPool2d', 'PoolType', 'adaptive_pool',
'PoolFlatten', 'NormType', 'BatchNorm', 'InstanceNorm', 'BatchNorm1dFlat', 'LinBnDrop', 'sigmoid',
'sigmoid_', 'vleaky_relu', 'init_default', 'init_linear', 'ConvLayer', 'AdaptiveAvgPool', 'MaxPool',
'AvgPool', 'trunc_normal_', 'Embedding', 'SelfAttention', 'PooledSelfAttention2d', 'SimpleSelfAttention',
'icnr_init', 'PixelShuffle_ICNR', 'sequential', 'SequentialEx', 'MergeLayer', 'Cat', 'SimpleCNN',
'ProdLayer', 'inplace_relu', 'SEModule', 'ResBlock', 'SEBlock', 'SEResNeXtBlock', 'SeparableBlock', 'swish',
'Swish', 'MishJitAutoFn', 'mish', 'Mish', 'ParameterModule', 'children_and_parameters', 'has_children',
'flatten_model', 'NoneReduce', 'in_channels']
# Cell
from .imports import *
from .torch_imports import *
from .torch_core import *
from torch.nn.utils import weight_norm, spectral_norm
# Cell
def module(*flds, **defaults):
"Decorator to create an `nn.Module` using `f` as `forward` method"
pa = [inspect.Parameter(o, inspect.Parameter.POSITIONAL_OR_KEYWORD) for o in flds]
pb = [inspect.Parameter(k, inspect.Parameter.POSITIONAL_OR_KEYWORD, default=v)
for k,v in defaults.items()]
params = pa+pb
all_flds = [*flds,*defaults.keys()]
def _f(f):
class c(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
for i,o in enumerate(args): kwargs[all_flds[i]] = o
kwargs = merge(defaults,kwargs)
for k,v in kwargs.items(): setattr(self,k,v)
__repr__ = basic_repr(all_flds)
forward = f
c.__signature__ = inspect.Signature(params)
c.__name__ = c.__qualname__ = f.__name__
c.__doc__ = f.__doc__
return c
return _f
# Cell
@module()
def Identity(self, x):
"Do nothing at all"
return x
# Cell
@module('func')
def Lambda(self, x):
"An easy way to create a pytorch layer for a simple `func`"
return self.func(x)
# Cell
class PartialLambda(Lambda):
"Layer that applies `partial(func, **kwargs)`"
def __init__(self, func, **kwargs):
super().__init__(partial(func, **kwargs))
self.repr = f'{func.__name__}, {kwargs}'
def forward(self, x): return self.func(x)
def __repr__(self): return f'{self.__class__.__name__}({self.repr})'
# Cell
@module(full=False)
def Flatten(self, x):
"Flatten `x` to a single dimension, e.g. at end of a model. `full` for rank-1 tensor"
return TensorBase(x.view(-1) if self.full else x.view(x.size(0), -1))
# Cell
class View(Module):
"Reshape `x` to `size`"
def __init__(self, *size): self.size = size
def forward(self, x): return x.view(self.size)
# Cell
class ResizeBatch(Module):
"Reshape `x` to `size`, keeping batch dim the same size"
def __init__(self, *size): self.size = size
def forward(self, x): return x.view((x.size(0),) + self.size)
# Cell
@module()
def Debugger(self,x):
"A module to debug inside a model."
set_trace()
return x
# Cell
def sigmoid_range(x, low, high):
"Sigmoid function with range `(low, high)`"
return torch.sigmoid(x) * (high - low) + low
# Cell
@module('low','high')
def SigmoidRange(self, x):
"Sigmoid module with range `(low, high)`"
return sigmoid_range(x, self.low, self.high)
# Cell
class AdaptiveConcatPool1d(Module):
"Layer that concats `AdaptiveAvgPool1d` and `AdaptiveMaxPool1d`"
def __init__(self, size=None):
self.size = size or 1
self.ap = nn.AdaptiveAvgPool1d(self.size)
self.mp = nn.AdaptiveMaxPool1d(self.size)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
# Cell
class AdaptiveConcatPool2d(Module):
"Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`"
def __init__(self, size=None):
self.size = size or 1
self.ap = nn.AdaptiveAvgPool2d(self.size)
self.mp = nn.AdaptiveMaxPool2d(self.size)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
# Cell
class PoolType: Avg,Max,Cat = 'Avg','Max','Cat'
# Cell
def adaptive_pool(pool_type):
return nn.AdaptiveAvgPool2d if pool_type=='Avg' else nn.AdaptiveMaxPool2d if pool_type=='Max' else AdaptiveConcatPool2d
# Cell
class PoolFlatten(nn.Sequential):
"Combine `nn.AdaptiveAvgPool2d` and `Flatten`."
def __init__(self, pool_type=PoolType.Avg): super().__init__(adaptive_pool(pool_type)(1), Flatten())
# Cell
NormType = Enum('NormType', 'Batch BatchZero Weight Spectral Instance InstanceZero')
# Cell
def _get_norm(prefix, nf, ndim=2, zero=False, **kwargs):
"Norm layer with `nf` features and `ndim` initialized depending on `norm_type`."
assert 1 <= ndim <= 3
bn = getattr(nn, f"{prefix}{ndim}d")(nf, **kwargs)
if bn.affine:
bn.bias.data.fill_(1e-3)
bn.weight.data.fill_(0. if zero else 1.)
return bn
# Cell
@delegates(nn.BatchNorm2d)
def BatchNorm(nf, ndim=2, norm_type=NormType.Batch, **kwargs):
"BatchNorm layer with `nf` features and `ndim` initialized depending on `norm_type`."
return _get_norm('BatchNorm', nf, ndim, zero=norm_type==NormType.BatchZero, **kwargs)
# Cell
@delegates(nn.InstanceNorm2d)
def InstanceNorm(nf, ndim=2, norm_type=NormType.Instance, affine=True, **kwargs):
"InstanceNorm layer with `nf` features and `ndim` initialized depending on `norm_type`."
return _get_norm('InstanceNorm', nf, ndim, zero=norm_type==NormType.InstanceZero, affine=affine, **kwargs)
# Cell
class BatchNorm1dFlat(nn.BatchNorm1d):
"`nn.BatchNorm1d`, but first flattens leading dimensions"
def forward(self, x):
if x.dim()==2: return super().forward(x)
*f,l = x.shape
x = x.contiguous().view(-1,l)
return super().forward(x).view(*f,l)
# Cell
class LinBnDrop(nn.Sequential):
"Module grouping `BatchNorm1d`, `Dropout` and `Linear` layers"
def __init__(self, n_in, n_out, bn=True, p=0., act=None, lin_first=False):
layers = [BatchNorm(n_out if lin_first else n_in, ndim=1)] if bn else []
if p != 0: layers.append(nn.Dropout(p))
lin = [nn.Linear(n_in, n_out, bias=not bn)]
if act is not None: lin.append(act)
layers = lin+layers if lin_first else layers+lin
super().__init__(*layers)
# Cell
def sigmoid(input, eps=1e-7):
"Same as `torch.sigmoid`, plus clamping to `(eps,1-eps)"
return input.sigmoid().clamp(eps,1-eps)
# Cell
def sigmoid_(input, eps=1e-7):
"Same as `torch.sigmoid_`, plus clamping to `(eps,1-eps)"
return input.sigmoid_().clamp_(eps,1-eps)
# Cell
from torch.nn.init import kaiming_uniform_,uniform_,xavier_uniform_,normal_
# Cell
def vleaky_relu(input, inplace=True):
"`F.leaky_relu` with 0.3 slope"
return F.leaky_relu(input, negative_slope=0.3, inplace=inplace)
# Cell
for o in F.relu,nn.ReLU,F.relu6,nn.ReLU6,F.leaky_relu,nn.LeakyReLU:
o.__default_init__ = kaiming_uniform_
# Cell
for o in F.sigmoid,nn.Sigmoid,F.tanh,nn.Tanh,sigmoid,sigmoid_:
o.__default_init__ = xavier_uniform_
# Cell
def init_default(m, func=nn.init.kaiming_normal_):
"Initialize `m` weights with `func` and set `bias` to 0."
if func and hasattr(m, 'weight'): func(m.weight)
with torch.no_grad():
if getattr(m, 'bias', None) is not None: m.bias.fill_(0.)
return m
# Cell
def init_linear(m, act_func=None, init='auto', bias_std=0.01):
if getattr(m,'bias',None) is not None and bias_std is not None:
if bias_std != 0: normal_(m.bias, 0, bias_std)
else: m.bias.data.zero_()
if init=='auto':
if act_func in (F.relu_,F.leaky_relu_): init = kaiming_uniform_
else: init = getattr(act_func.__class__, '__default_init__', None)
if init is None: init = getattr(act_func, '__default_init__', None)
if init is not None: init(m.weight)
# Cell
def _conv_func(ndim=2, transpose=False):
"Return the proper conv `ndim` function, potentially `transposed`."
assert 1 <= ndim <=3
return getattr(nn, f'Conv{"Transpose" if transpose else ""}{ndim}d')
# Cell
defaults.activation=nn.ReLU
# Cell
class ConvLayer(nn.Sequential):
"Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and `norm_type` layers."
@delegates(nn.Conv2d)
def __init__(self, ni, nf, ks=3, stride=1, padding=None, bias=None, ndim=2, norm_type=NormType.Batch, bn_1st=True,
act_cls=defaults.activation, transpose=False, init='auto', xtra=None, bias_std=0.01, **kwargs):
if padding is None: padding = ((ks-1)//2 if not transpose else 0)
bn = norm_type in (NormType.Batch, NormType.BatchZero)
inn = norm_type in (NormType.Instance, NormType.InstanceZero)
if bias is None: bias = not (bn or inn)
conv_func = _conv_func(ndim, transpose=transpose)
conv = conv_func(ni, nf, kernel_size=ks, bias=bias, stride=stride, padding=padding, **kwargs)
act = None if act_cls is None else act_cls()
init_linear(conv, act, init=init, bias_std=bias_std)
if norm_type==NormType.Weight: conv = weight_norm(conv)
elif norm_type==NormType.Spectral: conv = spectral_norm(conv)
layers = [conv]
act_bn = []
if act is not None: act_bn.append(act)
if bn: act_bn.append(BatchNorm(nf, norm_type=norm_type, ndim=ndim))
if inn: act_bn.append(InstanceNorm(nf, norm_type=norm_type, ndim=ndim))
if bn_1st: act_bn.reverse()
layers += act_bn
if xtra: layers.append(xtra)
super().__init__(*layers)
# Cell
def AdaptiveAvgPool(sz=1, ndim=2):
"nn.AdaptiveAvgPool layer for `ndim`"
assert 1 <= ndim <= 3
return getattr(nn, f"AdaptiveAvgPool{ndim}d")(sz)
# Cell
def MaxPool(ks=2, stride=None, padding=0, ndim=2, ceil_mode=False):
"nn.MaxPool layer for `ndim`"
assert 1 <= ndim <= 3
return getattr(nn, f"MaxPool{ndim}d")(ks, stride=stride, padding=padding)
# Cell
def AvgPool(ks=2, stride=None, padding=0, ndim=2, ceil_mode=False):
"nn.AvgPool layer for `ndim`"
assert 1 <= ndim <= 3
return getattr(nn, f"AvgPool{ndim}d")(ks, stride=stride, padding=padding, ceil_mode=ceil_mode)
# Cell
def trunc_normal_(x, mean=0., std=1.):
"Truncated normal initialization (approximation)"
# From https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/12
return x.normal_().fmod_(2).mul_(std).add_(mean)
# Cell
class Embedding(nn.Embedding):
"Embedding layer with truncated normal initialization"
def __init__(self, ni, nf, std=0.01):
super().__init__(ni, nf)
trunc_normal_(self.weight.data, std=std)
# Cell
class SelfAttention(Module):
"Self attention layer for `n_channels`."
def __init__(self, n_channels):
self.query,self.key,self.value = [self._conv(n_channels, c) for c in (n_channels//8,n_channels//8,n_channels)]
self.gamma = nn.Parameter(tensor([0.]))
def _conv(self,n_in,n_out):
return ConvLayer(n_in, n_out, ks=1, ndim=1, norm_type=NormType.Spectral, act_cls=None, bias=False)
def forward(self, x):
#Notation from the paper.
size = x.size()
x = x.view(*size[:2],-1)
f,g,h = self.query(x),self.key(x),self.value(x)
beta = F.softmax(torch.bmm(f.transpose(1,2), g), dim=1)
o = self.gamma * torch.bmm(h, beta) + x
return o.view(*size).contiguous()
# Cell
class PooledSelfAttention2d(Module):
"Pooled self attention layer for 2d."
def __init__(self, n_channels):
self.n_channels = n_channels
self.query,self.key,self.value = [self._conv(n_channels, c) for c in (n_channels//8,n_channels//8,n_channels//2)]
self.out = self._conv(n_channels//2, n_channels)
self.gamma = nn.Parameter(tensor([0.]))
def _conv(self,n_in,n_out):
return ConvLayer(n_in, n_out, ks=1, norm_type=NormType.Spectral, act_cls=None, bias=False)
def forward(self, x):
n_ftrs = x.shape[2]*x.shape[3]
f = self.query(x).view(-1, self.n_channels//8, n_ftrs)
g = F.max_pool2d(self.key(x), [2,2]).view(-1, self.n_channels//8, n_ftrs//4)
h = F.max_pool2d(self.value(x), [2,2]).view(-1, self.n_channels//2, n_ftrs//4)
beta = F.softmax(torch.bmm(f.transpose(1, 2), g), -1)
o = self.out(torch.bmm(h, beta.transpose(1,2)).view(-1, self.n_channels//2, x.shape[2], x.shape[3]))
return self.gamma * o + x
# Cell
def _conv1d_spect(ni:int, no:int, ks:int=1, stride:int=1, padding:int=0, bias:bool=False):
"Create and initialize a `nn.Conv1d` layer with spectral normalization."
conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias)
nn.init.kaiming_normal_(conv.weight)
if bias: conv.bias.data.zero_()
return spectral_norm(conv)
# Cell
class SimpleSelfAttention(Module):
def __init__(self, n_in:int, ks=1, sym=False):
self.sym,self.n_in = sym,n_in
self.conv = _conv1d_spect(n_in, n_in, ks, padding=ks//2, bias=False)
self.gamma = nn.Parameter(tensor([0.]))
def forward(self,x):
if self.sym:
c = self.conv.weight.view(self.n_in,self.n_in)
c = (c + c.t())/2
self.conv.weight = c.view(self.n_in,self.n_in,1)
size = x.size()
x = x.view(*size[:2],-1)
convx = self.conv(x)
xxT = torch.bmm(x,x.permute(0,2,1).contiguous())
o = torch.bmm(xxT, convx)
o = self.gamma * o + x
return o.view(*size).contiguous()
# Cell
def icnr_init(x, scale=2, init=nn.init.kaiming_normal_):
"ICNR init of `x`, with `scale` and `init` function"
ni,nf,h,w = x.shape
ni2 = int(ni/(scale**2))
k = init(x.new_zeros([ni2,nf,h,w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, -1)
k = k.repeat(1, 1, scale**2)
return k.contiguous().view([nf,ni,h,w]).transpose(0, 1)
# Cell
class PixelShuffle_ICNR(nn.Sequential):
"Upsample by `scale` from `ni` filters to `nf` (default `ni`), using `nn.PixelShuffle`."
def __init__(self, ni, nf=None, scale=2, blur=False, norm_type=NormType.Weight, act_cls=defaults.activation):
super().__init__()
nf = ifnone(nf, ni)
layers = [ConvLayer(ni, nf*(scale**2), ks=1, norm_type=norm_type, act_cls=act_cls, bias_std=0),
nn.PixelShuffle(scale)]
layers[0][0].weight.data.copy_(icnr_init(layers[0][0].weight.data))
if blur: layers += [nn.ReplicationPad2d((1,0,1,0)), nn.AvgPool2d(2, stride=1)]
super().__init__(*layers)
# Cell
def sequential(*args):
"Create an `nn.Sequential`, wrapping items with `Lambda` if needed"
if len(args) != 1 or not isinstance(args[0], OrderedDict):
args = list(args)
for i,o in enumerate(args):
if not isinstance(o,nn.Module): args[i] = Lambda(o)
return nn.Sequential(*args)
# Cell
class SequentialEx(Module):
"Like `nn.Sequential`, but with ModuleList semantics, and can access module input"
def __init__(self, *layers): self.layers = nn.ModuleList(layers)
def forward(self, x):
res = x
for l in self.layers:
res.orig = x
nres = l(res)
# We have to remove res.orig to avoid hanging refs and therefore memory leaks
res.orig, nres.orig = None, None
res = nres
return res
def __getitem__(self,i): return self.layers[i]
def append(self,l): return self.layers.append(l)
def extend(self,l): return self.layers.extend(l)
def insert(self,i,l): return self.layers.insert(i,l)
# Cell
class MergeLayer(Module):
"Merge a shortcut with the result of the module by adding them or concatenating them if `dense=True`."
def __init__(self, dense:bool=False): self.dense=dense
def forward(self, x): return torch.cat([x,x.orig], dim=1) if self.dense else (x+x.orig)
# Cell
class Cat(nn.ModuleList):
"Concatenate layers outputs over a given dim"
def __init__(self, layers, dim=1):
self.dim=dim
super().__init__(layers)
def forward(self, x): return torch.cat([l(x) for l in self], dim=self.dim)
# Cell
class SimpleCNN(nn.Sequential):
"Create a simple CNN with `filters`."
def __init__(self, filters, kernel_szs=None, strides=None, bn=True):
nl = len(filters)-1
kernel_szs = ifnone(kernel_szs, [3]*nl)
strides = ifnone(strides , [2]*nl)
layers = [ConvLayer(filters[i], filters[i+1], kernel_szs[i], stride=strides[i],
norm_type=(NormType.Batch if bn and i<nl-1 else None)) for i in range(nl)]
layers.append(PoolFlatten())
super().__init__(*layers)
# Cell
class ProdLayer(Module):
"Merge a shortcut with the result of the module by multiplying them."
def forward(self, x): return x * x.orig
# Cell
inplace_relu = partial(nn.ReLU, inplace=True)
# Cell
def SEModule(ch, reduction, act_cls=defaults.activation):
nf = math.ceil(ch//reduction/8)*8
return SequentialEx(nn.AdaptiveAvgPool2d(1),
ConvLayer(ch, nf, ks=1, norm_type=None, act_cls=act_cls),
ConvLayer(nf, ch, ks=1, norm_type=None, act_cls=nn.Sigmoid),
ProdLayer())
# Cell
class ResBlock(Module):
"Resnet block from `ni` to `nh` with `stride`"
@delegates(ConvLayer.__init__)
def __init__(self, expansion, ni, nf, stride=1, groups=1, reduction=None, nh1=None, nh2=None, dw=False, g2=1,
sa=False, sym=False, norm_type=NormType.Batch, act_cls=defaults.activation, ndim=2, ks=3,
pool=AvgPool, pool_first=True, **kwargs):
norm2 = (NormType.BatchZero if norm_type==NormType.Batch else
NormType.InstanceZero if norm_type==NormType.Instance else norm_type)
if nh2 is None: nh2 = nf
if nh1 is None: nh1 = nh2
nf,ni = nf*expansion,ni*expansion
k0 = dict(norm_type=norm_type, act_cls=act_cls, ndim=ndim, **kwargs)
k1 = dict(norm_type=norm2, act_cls=None, ndim=ndim, **kwargs)
convpath = [ConvLayer(ni, nh2, ks, stride=stride, groups=ni if dw else groups, **k0),
ConvLayer(nh2, nf, ks, groups=g2, **k1)
] if expansion == 1 else [
ConvLayer(ni, nh1, 1, **k0),
ConvLayer(nh1, nh2, ks, stride=stride, groups=nh1 if dw else groups, **k0),
ConvLayer(nh2, nf, 1, groups=g2, **k1)]
if reduction: convpath.append(SEModule(nf, reduction=reduction, act_cls=act_cls))
if sa: convpath.append(SimpleSelfAttention(nf,ks=1,sym=sym))
self.convpath = nn.Sequential(*convpath)
idpath = []
if ni!=nf: idpath.append(ConvLayer(ni, nf, 1, act_cls=None, ndim=ndim, **kwargs))
if stride!=1: idpath.insert((1,0)[pool_first], pool(stride, ndim=ndim, ceil_mode=True))
self.idpath = nn.Sequential(*idpath)
self.act = defaults.activation(inplace=True) if act_cls is defaults.activation else act_cls()
def forward(self, x): return self.act(self.convpath(x) + self.idpath(x))
# Cell
def SEBlock(expansion, ni, nf, groups=1, reduction=16, stride=1, **kwargs):
return ResBlock(expansion, ni, nf, stride=stride, groups=groups, reduction=reduction, nh1=nf*2, nh2=nf*expansion, **kwargs)
# Cell
def SEResNeXtBlock(expansion, ni, nf, groups=32, reduction=16, stride=1, base_width=4, **kwargs):
w = math.floor(nf * (base_width / 64)) * groups
return ResBlock(expansion, ni, nf, stride=stride, groups=groups, reduction=reduction, nh2=w, **kwargs)
# Cell
def SeparableBlock(expansion, ni, nf, reduction=16, stride=1, base_width=4, **kwargs):
return ResBlock(expansion, ni, nf, stride=stride, reduction=reduction, nh2=nf*2, dw=True, **kwargs)
# Cell
from torch.jit import script
# Cell
@script
def _swish_jit_fwd(x): return x.mul(torch.sigmoid(x))
@script
def _swish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid)))
class _SwishJitAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return _swish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
return _swish_jit_bwd(x, grad_output)
# Cell
def swish(x, inplace=False): return _SwishJitAutoFn.apply(x)
# Cell
class Swish(Module):
def forward(self, x): return _SwishJitAutoFn.apply(x)
# Cell
@script
def _mish_jit_fwd(x): return x.mul(torch.tanh(F.softplus(x)))
@script
def _mish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = F.softplus(x).tanh()
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))
class MishJitAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return _mish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
return _mish_jit_bwd(x, grad_output)
# Cell
def mish(x): return MishJitAutoFn.apply(x)
# Cell
class Mish(Module):
def forward(self, x): return MishJitAutoFn.apply(x)
# Cell
for o in swish,Swish,mish,Mish: o.__default_init__ = kaiming_uniform_
# Cell
class ParameterModule(Module):
"Register a lone parameter `p` in a module."
def __init__(self, p): self.val = p
def forward(self, x): return x
# Cell
def children_and_parameters(m):
"Return the children of `m` and its direct parameters not registered in modules."
children = list(m.children())
children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])
for p in m.parameters():
if id(p) not in children_p: children.append(ParameterModule(p))
return children
# Cell
def has_children(m):
try: next(m.children())
except StopIteration: return False
return True
# Cell
def flatten_model(m):
"Return the list of all submodules and parameters of `m`"
return sum(map(flatten_model,children_and_parameters(m)),[]) if has_children(m) else [m]
# Cell
class NoneReduce():
"A context manager to evaluate `loss_func` with none reduce."
def __init__(self, loss_func): self.loss_func,self.old_red = loss_func,None
def __enter__(self):
if hasattr(self.loss_func, 'reduction'):
self.old_red = self.loss_func.reduction
self.loss_func.reduction = 'none'
return self.loss_func
else: return partial(self.loss_func, reduction='none')
def __exit__(self, type, value, traceback):
if self.old_red is not None: self.loss_func.reduction = self.old_red
# Cell
def in_channels(m):
"Return the shape of the first weight layer in `m`."
for l in flatten_model(m):
if getattr(l, 'weight', None) is not None and l.weight.ndim==4:
return l.weight.shape[1]
raise Exception('No weight layer') | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/layers.py | layers.py |
__all__ = ['BaseLoss', 'CrossEntropyLossFlat', 'BCEWithLogitsLossFlat', 'BCELossFlat', 'MSELossFlat', 'L1LossFlat',
'LabelSmoothingCrossEntropy', 'LabelSmoothingCrossEntropyFlat']
# Cell
from .imports import *
from .torch_imports import *
from .torch_core import *
from .layers import *
# Cell
class BaseLoss():
"Same as `loss_cls`, but flattens input and target."
activation=decodes=noops
def __init__(self, loss_cls, *args, axis=-1, flatten=True, floatify=False, is_2d=True, **kwargs):
store_attr("axis,flatten,floatify,is_2d")
self.func = loss_cls(*args,**kwargs)
functools.update_wrapper(self, self.func)
def __repr__(self): return f"FlattenedLoss of {self.func}"
@property
def reduction(self): return self.func.reduction
@reduction.setter
def reduction(self, v): self.func.reduction = v
def _contiguous(self,x):
return TensorBase(x.transpose(self.axis,-1).contiguous()) if isinstance(x,torch.Tensor) else x
def __call__(self, inp, targ, **kwargs):
inp,targ = map(self._contiguous, (inp,targ))
if self.floatify and targ.dtype!=torch.float16: targ = targ.float()
if targ.dtype in [torch.int8, torch.int16, torch.int32]: targ = targ.long()
if self.flatten: inp = inp.view(-1,inp.shape[-1]) if self.is_2d else inp.view(-1)
return self.func.__call__(inp, targ.view(-1) if self.flatten else targ, **kwargs)
# Cell
@delegates()
class CrossEntropyLossFlat(BaseLoss):
"Same as `nn.CrossEntropyLoss`, but flattens input and target."
y_int = True
@use_kwargs_dict(keep=True, weight=None, ignore_index=-100, reduction='mean')
def __init__(self, *args, axis=-1, **kwargs): super().__init__(nn.CrossEntropyLoss, *args, axis=axis, **kwargs)
def decodes(self, x): return x.argmax(dim=self.axis)
def activation(self, x): return F.softmax(x, dim=self.axis)
# Cell
@delegates()
class BCEWithLogitsLossFlat(BaseLoss):
"Same as `nn.BCEWithLogitsLoss`, but flattens input and target."
@use_kwargs_dict(keep=True, weight=None, reduction='mean', pos_weight=None)
def __init__(self, *args, axis=-1, floatify=True, thresh=0.5, **kwargs):
if kwargs.get('pos_weight', None) is not None and kwargs.get('flatten', None) is True:
raise ValueError("`flatten` must be False when using `pos_weight` to avoid a RuntimeError due to shape mismatch")
if kwargs.get('pos_weight', None) is not None: kwargs['flatten'] = False
super().__init__(nn.BCEWithLogitsLoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
self.thresh = thresh
def decodes(self, x): return x>self.thresh
def activation(self, x): return torch.sigmoid(x)
# Cell
@use_kwargs_dict(weight=None, reduction='mean')
def BCELossFlat(*args, axis=-1, floatify=True, **kwargs):
"Same as `nn.BCELoss`, but flattens input and target."
return BaseLoss(nn.BCELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
# Cell
@use_kwargs_dict(reduction='mean')
def MSELossFlat(*args, axis=-1, floatify=True, **kwargs):
"Same as `nn.MSELoss`, but flattens input and target."
return BaseLoss(nn.MSELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
# Cell
@use_kwargs_dict(reduction='mean')
def L1LossFlat(*args, axis=-1, floatify=True, **kwargs):
"Same as `nn.L1Loss`, but flattens input and target."
return BaseLoss(nn.L1Loss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
# Cell
class LabelSmoothingCrossEntropy(Module):
y_int = True
def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction
def forward(self, output, target):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
if self.reduction=='sum': loss = -log_preds.sum()
else:
loss = -log_preds.sum(dim=-1) #We divide by that size at the return line so sum and not mean
if self.reduction=='mean': loss = loss.mean()
return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target.long(), reduction=self.reduction)
def activation(self, out): return F.softmax(out, dim=-1)
def decodes(self, out): return out.argmax(dim=-1)
# Cell
@delegates()
class LabelSmoothingCrossEntropyFlat(BaseLoss):
"Same as `LabelSmoothingCrossEntropy`, but flattens input and target."
y_int = True
@use_kwargs_dict(keep=True, eps=0.1, reduction='mean')
def __init__(self, *args, axis=-1, **kwargs): super().__init__(LabelSmoothingCrossEntropy, *args, axis=axis, **kwargs)
def activation(self, out): return F.softmax(out, dim=-1)
def decodes(self, out): return out.argmax(dim=-1) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/losses.py | losses.py |
__all__ = ['AccumMetric', 'skm_to_fastai', 'optim_metric', 'accuracy', 'error_rate', 'top_k_accuracy', 'APScoreBinary',
'BalancedAccuracy', 'BrierScore', 'CohenKappa', 'F1Score', 'FBeta', 'HammingLoss', 'Jaccard', 'Precision',
'Recall', 'RocAuc', 'RocAucBinary', 'MatthewsCorrCoef', 'Perplexity', 'perplexity', 'accuracy_multi',
'APScoreMulti', 'BrierScoreMulti', 'F1ScoreMulti', 'FBetaMulti', 'HammingLossMulti', 'JaccardMulti',
'MatthewsCorrCoefMulti', 'PrecisionMulti', 'RecallMulti', 'RocAucMulti', 'mse', 'rmse', 'mae', 'msle',
'exp_rmspe', 'ExplainedVariance', 'R2Score', 'PearsonCorrCoef', 'SpearmanCorrCoef', 'foreground_acc', 'Dice',
'DiceMulti', 'JaccardCoeff', 'CorpusBLEUMetric', 'LossMetric', 'LossMetrics']
# Cell
from .data.all import *
from .optimizer import *
from .learner import *
# Cell
import sklearn.metrics as skm
import scipy.stats as scs
# Cell
mk_class('ActivationType', **{o:o.lower() for o in ['No', 'Sigmoid', 'Softmax', 'BinarySoftmax']},
doc="All possible activation classes for `AccumMetric")
# Cell
class AccumMetric(Metric):
"Stores predictions and targets on CPU in accumulate to perform final calculations with `func`."
def __init__(self, func, dim_argmax=None, activation=ActivationType.No, thresh=None, to_np=False,
invert_arg=False, flatten=True, **kwargs):
store_attr('func,dim_argmax,activation,thresh,flatten')
self.to_np,self.invert_args,self.kwargs = to_np,invert_arg,kwargs
def reset(self):
"Clear all targs and preds"
self.targs,self.preds = [],[]
def accumulate(self, learn):
"Store targs and preds from `learn`, using activation function and argmax as appropriate"
pred = learn.pred
if self.activation in [ActivationType.Softmax, ActivationType.BinarySoftmax]:
pred = F.softmax(pred, dim=self.dim_argmax)
if self.activation == ActivationType.BinarySoftmax: pred = pred[:, -1]
elif self.activation == ActivationType.Sigmoid: pred = torch.sigmoid(pred)
elif self.dim_argmax: pred = pred.argmax(dim=self.dim_argmax)
if self.thresh: pred = (pred >= self.thresh)
self.accum_values(pred,learn.y,learn)
def accum_values(self, preds, targs,learn=None):
"Store targs and preds"
to_d = learn.to_detach if learn is not None else to_detach
preds,targs = to_d(preds),to_d(targs)
if self.flatten: preds,targs = flatten_check(preds,targs)
self.preds.append(preds)
self.targs.append(targs)
def __call__(self, preds, targs):
"Calculate metric on one batch of data"
self.reset()
self.accum_values(preds,targs)
return self.value
@property
def value(self):
"Value of the metric using accumulated preds and targs"
if len(self.preds) == 0: return
preds,targs = torch.cat(self.preds),torch.cat(self.targs)
if self.to_np: preds,targs = preds.numpy(),targs.numpy()
return self.func(targs, preds, **self.kwargs) if self.invert_args else self.func(preds, targs, **self.kwargs)
@property
def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__
# Cell
def skm_to_fastai(func, is_class=True, thresh=None, axis=-1, activation=None, **kwargs):
"Convert `func` from sklearn.metrics to a fastai metric"
dim_argmax = axis if is_class and thresh is None else None
if activation is None:
activation = ActivationType.Sigmoid if (is_class and thresh is not None) else ActivationType.No
return AccumMetric(func, dim_argmax=dim_argmax, activation=activation, thresh=thresh,
to_np=True, invert_arg=True, **kwargs)
# Cell
def optim_metric(f, argname, bounds, tol=0.01, do_neg=True, get_x=False):
"Replace metric `f` with a version that optimizes argument `argname`"
def _f(preds, targs):
def minfunc(x):
kwargs = {argname:x}
res = f(preds, targs, **kwargs)
return -res if do_neg else res
optres = scipy.optimize.minimize_scalar(minfunc, bounds=bounds, method='bounded',
options={'xatol':0.01})
fun = -optres.fun if do_neg else optres.fun
return (fun,optres.x) if get_x else fun
_f.__name__ = f'opt_{f.__name__}'
return _f
# Cell
def accuracy(inp, targ, axis=-1):
"Compute accuracy with `targ` when `pred` is bs * n_classes"
pred,targ = flatten_check(inp.argmax(dim=axis), targ)
return (pred == targ).float().mean()
# Cell
def error_rate(inp, targ, axis=-1):
"1 - `accuracy`"
return 1 - accuracy(inp, targ, axis=axis)
# Cell
def top_k_accuracy(inp, targ, k=5, axis=-1):
"Computes the Top-k accuracy (`targ` is in the top `k` predictions of `inp`)"
inp = inp.topk(k=k, dim=axis)[1]
targ = targ.unsqueeze(dim=axis).expand_as(inp)
return (inp == targ).sum(dim=-1).float().mean()
# Cell
def APScoreBinary(axis=-1, average='macro', pos_label=1, sample_weight=None):
"Average Precision for single-label binary classification problems"
return skm_to_fastai(skm.average_precision_score, axis=axis, activation=ActivationType.BinarySoftmax,
average=average, pos_label=pos_label, sample_weight=sample_weight)
# Cell
def BalancedAccuracy(axis=-1, sample_weight=None, adjusted=False):
"Balanced Accuracy for single-label binary classification problems"
return skm_to_fastai(skm.balanced_accuracy_score, axis=axis,
sample_weight=sample_weight, adjusted=adjusted)
# Cell
def BrierScore(axis=-1, sample_weight=None, pos_label=None):
"Brier score for single-label classification problems"
return skm_to_fastai(skm.brier_score_loss, axis=axis,
sample_weight=sample_weight, pos_label=pos_label)
# Cell
def CohenKappa(axis=-1, labels=None, weights=None, sample_weight=None):
"Cohen kappa for single-label classification problems"
return skm_to_fastai(skm.cohen_kappa_score, axis=axis, labels=labels, weights=weights,
sample_weight=sample_weight)
# Cell
def F1Score(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"F1 score for single-label classification problems"
return skm_to_fastai(skm.f1_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def FBeta(beta, axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"FBeta score with `beta` for single-label classification problems"
return skm_to_fastai(skm.fbeta_score, axis=axis,
beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def HammingLoss(axis=-1, sample_weight=None):
"Hamming loss for single-label classification problems"
return skm_to_fastai(skm.hamming_loss, axis=axis,
sample_weight=sample_weight)
# Cell
def Jaccard(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Jaccard score for single-label classification problems"
return skm_to_fastai(skm.jaccard_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def Precision(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Precision for single-label classification problems"
return skm_to_fastai(skm.precision_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def Recall(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Recall for single-label classification problems"
return skm_to_fastai(skm.recall_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def RocAuc(axis=-1, average='macro', sample_weight=None, max_fpr=None, multi_class='ovr'):
"Area Under the Receiver Operating Characteristic Curve for single-label multiclass classification problems"
assert multi_class in ['ovr', 'ovo']
return skm_to_fastai(skm.roc_auc_score, axis=axis, activation=ActivationType.Softmax, flatten=False,
average=average, sample_weight=sample_weight, max_fpr=max_fpr, multi_class=multi_class)
# Cell
def RocAucBinary(axis=-1, average='macro', sample_weight=None, max_fpr=None, multi_class='raise'):
"Area Under the Receiver Operating Characteristic Curve for single-label binary classification problems"
return skm_to_fastai(skm.roc_auc_score, axis=axis, activation=ActivationType.BinarySoftmax,
average=average, sample_weight=sample_weight, max_fpr=max_fpr, multi_class=multi_class)
# Cell
def MatthewsCorrCoef(sample_weight=None, **kwargs):
"Matthews correlation coefficient for single-label classification problems"
return skm_to_fastai(skm.matthews_corrcoef, sample_weight=sample_weight, **kwargs)
# Cell
class Perplexity(AvgLoss):
"Perplexity (exponential of cross-entropy loss) for Language Models"
@property
def value(self): return torch.exp(self.total/self.count) if self.count != 0 else None
@property
def name(self): return "perplexity"
perplexity = Perplexity()
# Cell
def accuracy_multi(inp, targ, thresh=0.5, sigmoid=True):
"Compute accuracy when `inp` and `targ` are the same size."
inp,targ = flatten_check(inp,targ)
if sigmoid: inp = inp.sigmoid()
return ((inp>thresh)==targ.bool()).float().mean()
# Cell
def APScoreMulti(sigmoid=True, average='macro', pos_label=1, sample_weight=None):
"Average Precision for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.average_precision_score, activation=activation, flatten=False,
average=average, pos_label=pos_label, sample_weight=sample_weight)
# Cell
def BrierScoreMulti(thresh=0.5, sigmoid=True, sample_weight=None, pos_label=None):
"Brier score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.brier_score_loss, thresh=thresh, activation=activation, flatten=False,
sample_weight=sample_weight, pos_label=pos_label)
# Cell
def F1ScoreMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"F1 score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.f1_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def FBetaMulti(beta, thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"FBeta score with `beta` for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.fbeta_score, thresh=thresh, activation=activation, flatten=False,
beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def HammingLossMulti(thresh=0.5, sigmoid=True, labels=None, sample_weight=None):
"Hamming loss for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.hamming_loss, thresh=thresh, activation=activation, flatten=False,
sample_weight=sample_weight)
# Cell
def JaccardMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Jaccard score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.jaccard_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def MatthewsCorrCoefMulti(thresh=0.5, sigmoid=True, sample_weight=None):
"Matthews correlation coefficient for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.matthews_corrcoef, thresh=thresh, activation=activation, flatten=False, sample_weight=sample_weight)
# Cell
def PrecisionMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Precision for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.precision_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def RecallMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Recall for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.recall_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def RocAucMulti(sigmoid=True, average='macro', sample_weight=None, max_fpr=None):
"Area Under the Receiver Operating Characteristic Curve for multi-label binary classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.roc_auc_score, activation=activation, flatten=False,
average=average, sample_weight=sample_weight, max_fpr=max_fpr)
# Cell
def mse(inp,targ):
"Mean squared error between `inp` and `targ`."
return F.mse_loss(*flatten_check(inp,targ))
# Cell
def _rmse(inp, targ): return torch.sqrt(F.mse_loss(inp, targ))
rmse = AccumMetric(_rmse)
rmse.__doc__ = "Root mean squared error"
# Cell
def mae(inp,targ):
"Mean absolute error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return torch.abs(inp - targ).mean()
# Cell
def msle(inp, targ):
"Mean squared logarithmic error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return F.mse_loss(torch.log(1 + inp), torch.log(1 + targ))
# Cell
def _exp_rmspe(inp,targ):
inp,targ = torch.exp(inp),torch.exp(targ)
return torch.sqrt(((targ - inp)/targ).pow(2).mean())
exp_rmspe = AccumMetric(_exp_rmspe)
exp_rmspe.__doc__ = "Root mean square percentage error of the exponential of predictions and targets"
# Cell
def ExplainedVariance(sample_weight=None):
"Explained variance between predictions and targets"
return skm_to_fastai(skm.explained_variance_score, is_class=False, sample_weight=sample_weight)
# Cell
def R2Score(sample_weight=None):
"R2 score between predictions and targets"
return skm_to_fastai(skm.r2_score, is_class=False, sample_weight=sample_weight)
# Cell
@delegates(AccumMetric)
def PearsonCorrCoef(dim_argmax=None, **kwargs):
"Pearson correlation coefficient for regression problem"
def pearsonr(x,y): return scs.pearsonr(x,y)[0]
return AccumMetric(pearsonr, invert_arg=False, dim_argmax=dim_argmax, **kwargs)
# Cell
@delegates(AccumMetric)
def SpearmanCorrCoef(dim_argmax=None, axis=0, nan_policy='propagate', **kwargs):
"Spearman correlation coefficient for regression problem"
def spearmanr(a,b=None,**kwargs): return scs.spearmanr(a,b,**kwargs)[0]
return AccumMetric(partial(spearmanr, axis=axis, nan_policy=nan_policy),
invert_arg=False, dim_argmax=dim_argmax, **kwargs)
# Cell
def foreground_acc(inp, targ, bkg_idx=0, axis=1):
"Computes non-background accuracy for multiclass segmentation"
targ = targ.squeeze(1)
mask = targ != bkg_idx
return (inp.argmax(dim=axis)[mask]==targ[mask]).float().mean()
# Cell
class Dice(Metric):
"Dice coefficient metric for binary target in segmentation"
def __init__(self, axis=1): self.axis = axis
def reset(self): self.inter,self.union = 0,0
def accumulate(self, learn):
pred,targ = flatten_check(learn.pred.argmax(dim=self.axis), learn.y)
self.inter += (pred*targ).float().sum().item()
self.union += (pred+targ).float().sum().item()
@property
def value(self): return 2. * self.inter/self.union if self.union > 0 else None
# Cell
class DiceMulti(Metric):
"Averaged Dice metric (Macro F1) for multiclass target in segmentation"
def __init__(self, axis=1): self.axis = axis
def reset(self): self.inter,self.union = {},{}
def accumulate(self, learn):
pred,targ = flatten_check(learn.pred.argmax(dim=self.axis), learn.y)
for c in range(learn.pred.shape[self.axis]):
p = torch.where(pred == c, 1, 0)
t = torch.where(targ == c, 1, 0)
c_inter = (p*t).float().sum().item()
c_union = (p+t).float().sum().item()
if c in self.inter:
self.inter[c] += c_inter
self.union[c] += c_union
else:
self.inter[c] = c_inter
self.union[c] = c_union
@property
def value(self):
binary_dice_scores = np.array([])
for c in self.inter:
binary_dice_scores = np.append(binary_dice_scores, 2.*self.inter[c]/self.union[c] if self.union[c] > 0 else np.nan)
return np.nanmean(binary_dice_scores)
# Cell
class JaccardCoeff(Dice):
"Implementation of the Jaccard coefficient that is lighter in RAM"
@property
def value(self): return self.inter/(self.union-self.inter) if self.union > 0 else None
# Cell
class CorpusBLEUMetric(Metric):
def __init__(self, vocab_sz=5000, axis=-1):
"BLEU Metric calculated over the validation corpus"
self.metric_name = 'CorpusBLEU'
self.axis, self.vocab_sz = axis, vocab_sz
self.pred_len,self.targ_len,self.samp_idx,self.corrects,self.counts, = 0,0,0,[0]*4,[0]*4
def reset(self):
self.pred_len,self.targ_len,self.corrects,self.counts = 0,0,[0]*4,[0]*4
class NGram():
def __init__(self, ngram, max_n=5000): self.ngram,self.max_n = ngram,max_n
def __eq__(self, other):
if len(self.ngram) != len(other.ngram): return False
return np.all(np.array(self.ngram) == np.array(other.ngram))
def __hash__(self): return int(sum([o * self.max_n**i for i,o in enumerate(self.ngram)]))
def get_grams(self, x, n, max_n=5000):
return x if n==1 else [self.NGram(x[i:i+n], max_n=max_n) for i in range(len(x)-n+1)]
def get_correct_ngrams(self, pred, targ, n, max_n=5000):
pred_grams,targ_grams = self.get_grams(pred, n, max_n=max_n),self.get_grams(targ, n, max_n=max_n)
pred_cnt,targ_cnt = Counter(pred_grams),Counter(targ_grams)
return sum([min(c, targ_cnt[g]) for g,c in pred_cnt.items()]),len(pred_grams)
def accumulate(self, learn):
if learn.training: return None
else:
last_output = learn.pred.argmax(dim=self.axis)
last_target = learn.y
for pred,targ in zip(last_output.cpu().numpy(),last_target.cpu().numpy()):
self.pred_len += len(pred)
self.targ_len += len(targ)
smooth_mteval = 1
for i in range(4):
c,t = self.get_correct_ngrams(pred, targ, i+1, max_n=self.vocab_sz)
if c == 0:
smooth_mteval *= 2
c = 1 / smooth_mteval # exp smoothing, method 3 from http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
self.corrects[i] += c
self.counts[i] += t
@property
def value(self):
if self.counts == 0: return None
elif max(self.corrects) == 0: return 0.0
else:
precs = [c/t for c,t in zip(self.corrects,self.counts)]
len_penalty = math.exp(1 - self.targ_len/self.pred_len) if self.pred_len < self.targ_len else 1
return len_penalty * ((precs[0]*precs[1]*precs[2]*precs[3]) ** 0.25)
# Cell
class LossMetric(AvgMetric):
"Create a metric from `loss_func.attr` named `nm`"
def __init__(self, attr, nm=None): store_attr('attr,nm')
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(getattr(learn.loss_func, self.attr, 0))*bs
self.count += bs
@property
def name(self): return self.attr if self.nm is None else self.nm
# Cell
def LossMetrics(attrs, nms=None):
"List of `LossMetric` for each of `attrs` and `nms`"
if isinstance(attrs, str): attrs = attrs.split(',')
nms = attrs if nms is None else nms.split(',') if isinstance(nms, str) else nms
return [LossMetric(a, n) for a,n in zip(attrs,nms)] | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/metrics.py | metrics.py |
__all__ = ['Optimizer', 'sgd_step', 'weight_decay', 'l2_reg', 'average_grad', 'average_sqr_grad', 'momentum_step',
'SGD', 'rms_prop_step', 'RMSProp', 'step_stat', 'debias', 'adam_step', 'Adam', 'radam_step', 'RAdam',
'qhadam_step', 'QHAdam', 'larc_layer_lr', 'larc_step', 'Larc', 'lamb_step', 'Lamb', 'Lookahead', 'ranger',
'detuplify_pg', 'set_item_pg', 'pytorch_hp_map', 'OptimWrapper']
# Cell
from .torch_basics import *
# Cell
class _BaseOptimizer():
"Common functionality between `Optimizer` and `OptimWrapper`"
def all_params(self, n=slice(None), with_grad=False):
res = L((p,pg,self.state[p],hyper) for pg,hyper in zip(self.param_lists[n],self.hypers[n]) for p in pg)
return L(o for o in res if hasattr(o[0], 'grad') and o[0].grad is not None) if with_grad else res
def _set_require_grad(self, rg, p,pg,state,h): p.requires_grad_(rg or state.get('force_train', False))
def freeze_to(self, n):
self.frozen_idx = n if n >= 0 else len(self.param_lists) + n
if self.frozen_idx >= len(self.param_lists):
warn(f"Freezing {self.frozen_idx} groups; model has {len(self.param_lists)}; whole model is frozen.")
for o in self.all_params(slice(n, None)): self._set_require_grad(True, *o)
for o in self.all_params(slice(None, n)): self._set_require_grad(False, *o)
def freeze(self):
assert(len(self.param_lists)>1)
self.freeze_to(-1)
def set_freeze(self, n, rg, ignore_force_train=False):
for p in self.param_lists[n]: p.requires_grad_(rg or (state.get('force_train', False) and not ignore_force_train))
def unfreeze(self): self.freeze_to(0)
def set_hypers(self, **kwargs): L(kwargs.items()).starmap(self.set_hyper)
def _set_hyper(self, k, v):
for v_,h in zip(v, self.hypers): h[k] = v_
def set_hyper(self, k, v):
if isinstance(v, slice):
if v.start: v = even_mults(v.start, v.stop, len(self.param_lists))
else: v = [v.stop/10]*(len(self.param_lists)-1) + [v.stop]
v = L(v, use_list=None)
if len(v)==1: v = v*len(self.param_lists)
assert len(v) == len(self.hypers), f"Trying to set {len(v)} values for {k} but there are {len(self.param_lists)} parameter groups."
self._set_hyper(k, v)
@property
def param_groups(self): return [{**{'params': pg}, **hp} for pg,hp in zip(self.param_lists, self.hypers)]
@param_groups.setter
def param_groups(self, v):
for pg,v_ in zip(self.param_lists,v): pg = v_['params']
for hyper,v_ in zip(self.hypers,v):
for k,t in v_.items():
if k != 'params': hyper[k] = t
# Cell
def _update(state, new=None):
if new is None: return state
if isinstance(new, dict): state.update(new)
return state
# Cell
class Optimizer(_BaseOptimizer):
"Base optimizer class for the fastai library, updating `params` with `cbs`"
_keep_on_clear = ['force_train', 'do_wd']
def __init__(self, params, cbs, train_bn=True, **defaults):
params = L(params)
self.cbs,self.state,self.train_bn = L(cbs),defaultdict(dict),train_bn
defaults = merge(*self.cbs.attrgot('defaults'), defaults)
self.param_lists = L(L(p) for p in params) if isinstance(params[0], (L,list)) else L([params])
self.hypers = L({} for _ in range_of(self.param_lists))
self.set_hypers(**defaults)
self.frozen_idx = 0
def zero_grad(self):
for p,*_ in self.all_params(with_grad=True):
p.grad.detach_()
p.grad.zero_()
def step(self):
for p,pg,state,hyper in self.all_params(with_grad=True):
for cb in self.cbs: state = _update(state, cb(p, **{**state, **hyper}))
self.state[p] = state
def clear_state(self):
for p,pg,state,hyper in self.all_params():
self.state[p] = {k: state[k] for k in self._keep_on_clear if k in state}
def state_dict(self):
state = [self.state[p] for p,*_ in self.all_params()]
return {'state': state, 'hypers': self.hypers}
def load_state_dict(self, sd):
assert len(sd["hypers"]) == len(self.param_lists)
assert len(sd["state"]) == sum([len(pg) for pg in self.param_lists])
self.hypers = sd['hypers']
self.state = {p: s for p,s in zip(self.all_params().itemgot(0), sd['state'])}
# Cell
def sgd_step(p, lr, **kwargs):
p.data.add_(p.grad.data, alpha=-lr)
# Cell
def weight_decay(p, lr, wd, do_wd=True, **kwargs):
"Weight decay as decaying `p` with `lr*wd`"
if do_wd and wd!=0: p.data.mul_(1 - lr*wd)
weight_decay.defaults = dict(wd=0.)
# Cell
def l2_reg(p, lr, wd, do_wd=True, **kwargs):
"L2 regularization as adding `wd*p` to `p.grad`"
if do_wd and wd!=0: p.grad.data.add_(p.data, alpha=wd)
l2_reg.defaults = dict(wd=0.)
# Cell
def average_grad(p, mom, dampening=False, grad_avg=None, **kwargs):
"Keeps track of the avg grads of `p` in `state` with `mom`."
if grad_avg is None: grad_avg = torch.zeros_like(p.grad.data)
damp = 1-mom if dampening else 1.
grad_avg.mul_(mom).add_(p.grad.data, alpha=damp)
return {'grad_avg': grad_avg}
average_grad.defaults = dict(mom=0.9)
# Cell
def average_sqr_grad(p, sqr_mom, dampening=True, sqr_avg=None, **kwargs):
if sqr_avg is None: sqr_avg = torch.zeros_like(p.grad.data)
damp = 1-sqr_mom if dampening else 1.
sqr_avg.mul_(sqr_mom).addcmul_(p.grad.data, p.grad.data, value=damp)
return {'sqr_avg': sqr_avg}
average_sqr_grad.defaults = dict(sqr_mom=0.99)
# Cell
def momentum_step(p, lr, grad_avg, **kwargs):
"Step for SGD with momentum with `lr`"
p.data.add_(grad_avg, alpha=-lr)
# Cell
def SGD(params, lr, mom=0., wd=0., decouple_wd=True):
"A `Optimizer` for SGD with `lr` and `mom` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
if mom != 0: cbs.append(average_grad)
cbs.append(sgd_step if mom==0 else momentum_step)
return Optimizer(params, cbs, lr=lr, mom=mom, wd=wd)
# Cell
def rms_prop_step(p, lr, sqr_avg, eps, grad_avg=None, **kwargs):
"Step for SGD with momentum with `lr`"
denom = sqr_avg.sqrt().add_(eps)
p.data.addcdiv_((grad_avg if grad_avg is not None else p.grad), denom, value=-lr)
rms_prop_step.defaults = dict(eps=1e-8)
# Cell
def RMSProp(params, lr, sqr_mom=0.99, mom=0., wd=0., decouple_wd=True):
"A `Optimizer` for RMSProp with `lr`, `sqr_mom`, `mom` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += ([average_sqr_grad] if mom==0. else [average_grad, average_sqr_grad])
cbs.append(rms_prop_step)
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, wd=wd)
# Cell
def step_stat(p, step=0, **kwargs):
"Register the number of steps done in `state` for `p`"
step += 1
return {'step' : step}
# Cell
def debias(mom, damp, step): return damp * (1 - mom**step) / (1-mom)
# Cell
def adam_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, **kwargs):
"Step for Adam with `lr` on `p`"
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
p.data.addcdiv_(grad_avg, (sqr_avg/debias2).sqrt() + eps, value = -lr / debias1)
return p
adam_step._defaults = dict(eps=1e-5)
# Cell
def Adam(params, lr, mom=0.9, sqr_mom=0.99, eps=1e-5, wd=0.01, decouple_wd=True):
"A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, adam_step]
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
# Cell
def radam_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, beta, **kwargs):
"Step for RAdam with `lr` on `p`"
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
r_inf = 2/(1-sqr_mom) - 1
r = r_inf - 2*step*sqr_mom**step/(1-sqr_mom**step)
if r > 5:
v = math.sqrt(((r-4) * (r-2) * r_inf)/((r_inf-4)*(r_inf-2)*r))
denom = (sqr_avg/debias2).sqrt()
if eps: denom += eps
if beta: denom = F.softplus(denom, beta)
p.data.addcdiv_(grad_avg, denom, value = -lr*v / debias1)
else: p.data.add_(grad_avg, alpha=-lr / debias1)
return p
radam_step._defaults = dict(eps=1e-5)
# Cell
def RAdam(params, lr, mom=0.9, sqr_mom=0.99, eps=1e-5, wd=0., beta=0., decouple_wd=True):
"A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, radam_step]
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd, beta=beta)
# Cell
def qhadam_step(p, lr, mom, sqr_mom, sqr_avg, nu_1, nu_2, step, grad_avg, eps, **kwargs):
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
p.data.addcdiv_(((1-nu_1) * p.grad.data) + (nu_1 * (grad_avg / debias1)),
(((1 - nu_2) * (p.grad.data)**2) + (nu_2 * (sqr_avg / debias2))).sqrt() + eps,
value = -lr)
return p
qhadam_step._defaults = dict(eps=1e-8)
# Cell
def QHAdam(params, lr, mom=0.999, sqr_mom=0.999, nu_1=0.7, nu_2 = 1.0, eps=1e-8, wd=0., decouple_wd=True):
"An `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `nus`, eps` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), partial(average_sqr_grad, dampening=True), step_stat, qhadam_step]
return Optimizer(params, cbs, lr=lr, nu_1=nu_1, nu_2=nu_2 ,
mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
# Cell
def larc_layer_lr(p, lr, trust_coeff, wd, eps, clip=True, **kwargs):
"Computes the local lr before weight decay is applied"
p_norm,g_norm = torch.norm(p.data),torch.norm(p.grad.data)
local_lr = lr*trust_coeff * (p_norm) / (g_norm + p_norm * wd + eps)
return {'local_lr': min(lr, local_lr) if clip else local_lr}
larc_layer_lr.defaults = dict(trust_coeff=0.02, wd=0., eps=1e-8)
# Cell
def larc_step(p, local_lr, grad_avg=None, **kwargs):
"Step for LARC `local_lr` on `p`"
p.data.add_(p.grad.data if grad_avg is None else grad_avg, alpha = -local_lr)
# Cell
def Larc(params, lr, mom=0.9, clip=True, trust_coeff=0.02, eps=1e-8, wd=0., decouple_wd=True):
"A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
if mom!=0.: cbs.append(average_grad)
cbs += [partial(larc_layer_lr, clip=clip), larc_step]
return Optimizer(params, cbs, lr=lr, mom=mom, trust_coeff=trust_coeff, eps=eps, wd=wd)
# Cell
def lamb_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, **kwargs):
"Step for LAMB with `lr` on `p`"
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
r1 = p.data.pow(2).mean().sqrt()
step = (grad_avg/debias1) / ((sqr_avg/debias2).sqrt()+eps)
r2 = step.pow(2).mean().sqrt()
q = 1 if r1 == 0 or r2 == 0 else min(r1/r2,10)
p.data.add_(step, alpha = -lr * q)
lamb_step._defaults = dict(eps=1e-6, wd=0.)
# Cell
def Lamb(params, lr, mom=0.9, sqr_mom=0.99, eps=1e-5, wd=0., decouple_wd=True):
"A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, lamb_step]
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
# Cell
class Lookahead(Optimizer, GetAttr):
"Wrap `opt` in a lookahead optimizer"
_default='opt'
def __init__(self, opt, k=6, alpha=0.5):
store_attr('opt,k,alpha')
self._init_state()
def step(self):
if self.slow_weights is None: self._copy_weights()
self.opt.step()
self.count += 1
if self.count%self.k != 0: return
for slow_pg,fast_pg in zip(self.slow_weights,self.param_lists):
for slow_p,fast_p in zip(slow_pg,fast_pg):
slow_p.data.add_(fast_p.data-slow_p.data, alpha=self.alpha)
fast_p.data.copy_(slow_p.data)
def clear_state(self):
self.opt.clear_state()
self._init_state()
def state_dict(self):
state = self.opt.state_dict()
state.update({'count': self.count, 'slow_weights': self.slow_weights})
return state
def load_state_dict(self, sd):
self.count = sd.pop('count')
self.slow_weights = sd.pop('slow_weights')
self.opt.load_state_dict(sd)
def _init_state(self): self.count,self.slow_weights = 0,None
def _copy_weights(self): self.slow_weights = L(L(p.clone().detach() for p in pg) for pg in self.param_lists)
@property
def param_lists(self): return self.opt.param_lists
@param_lists.setter
def param_lists(self, v): self.opt.param_lists = v
# Cell
@delegates(RAdam)
def ranger(p, lr, mom=0.95, wd=0.01, eps=1e-6, **kwargs):
"Convenience method for `Lookahead` with `RAdam`"
return Lookahead(RAdam(p, lr=lr, mom=mom, wd=wd, eps=eps, **kwargs))
# Cell
def detuplify_pg(d):
res = {}
for k,v in d.items():
if k == 'params': continue
if is_listy(v): res.update(**{f'{k}__{i}': v_ for i,v_ in enumerate(v)})
else: res[k] = v
return res
# Cell
def set_item_pg(pg, k, v):
if '__' not in k: pg[k] = v
else:
name,idx = k.split('__')
pg[name] = tuple(v if i==int(idx) else pg[name][i] for i in range_of(pg[name]))
return pg
# Cell
pytorch_hp_map = {'momentum': 'mom', 'weight_decay': 'wd', 'alpha': 'sqr_mom', 'betas__0': 'mom', 'betas__1': 'sqr_mom'}
# Cell
class OptimWrapper(_BaseOptimizer, GetAttr):
_xtra=['zero_grad', 'step', 'state_dict', 'load_state_dict']
_default='opt'
def __init__(self, opt, hp_map=None):
self.opt = opt
if hp_map is None: hp_map = pytorch_hp_map
self.fwd_map = {k: hp_map[k] if k in hp_map else k for k in detuplify_pg(opt.param_groups[0]).keys()}
self.bwd_map = {v:k for k,v in self.fwd_map.items()}
self.state = defaultdict(dict, {})
self.frozen_idx = 0
@property
def hypers(self):
return [{self.fwd_map[k]:v for k,v in detuplify_pg(pg).items() if k != 'params'} for pg in self.opt.param_groups]
def _set_hyper(self, k, v):
for pg,v_ in zip(self.opt.param_groups,v): pg = set_item_pg(pg, self.bwd_map[k], v_)
def clear_state(self): self.opt.state = defaultdict(dict, {})
@property
def param_lists(self): return [pg['params'] for pg in self.opt.param_groups]
@param_lists.setter
def param_lists(self, v):
for pg,v_ in zip(self.opt.param_groups,v): pg['params'] = v_ | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/optimizer.py | optimizer.py |
__all__ = ['CancelStepException', 'CancelFitException', 'CancelEpochException', 'CancelTrainException',
'CancelValidException', 'CancelBatchException', 'replacing_yield', 'mk_metric', 'save_model', 'load_model',
'Learner', 'before_batch_cb', 'load_learner', 'to_detach_from_dl', 'Metric', 'AvgMetric', 'AvgLoss',
'AvgSmoothLoss', 'ValueMetric', 'Recorder']
# Cell
from .data.all import *
from .optimizer import *
from .callback.core import *
import pickle
# Cell
#nbdev_comment _all_ = ['CancelStepException','CancelFitException','CancelEpochException','CancelTrainException','CancelValidException','CancelBatchException']
# Cell
defaults.lr = 1e-3
# Cell
def replacing_yield(o, attr, val):
"Context manager to temporarily replace an attribute"
old = getattr(o,attr)
try: yield setattr(o,attr,val)
finally: setattr(o,attr,old)
# Cell
def mk_metric(m):
"Convert `m` to an `AvgMetric`, unless it's already a `Metric`"
if isinstance(m,type): m = m()
return m if isinstance(m, Metric) else AvgMetric(m)
# Cell
def save_model(file, model, opt, with_opt=True, pickle_protocol=2):
"Save `model` to `file` along with `opt` (if available, and if `with_opt`)"
if rank_distrib(): return # don't save if child proc
if opt is None: with_opt=False
state = get_model(model).state_dict()
if with_opt: state = {'model': state, 'opt':opt.state_dict()}
torch.save(state, file, pickle_protocol=pickle_protocol)
# Cell
def load_model(file, model, opt, with_opt=True, device=None, strict=True):
"Load `model` from `file` along with `opt` (if available, and if `with_opt`)"
distrib_barrier()
if isinstance(device, int): device = torch.device('cuda', device)
elif device is None: device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = set(state)=={'model', 'opt'}
model_state = state['model'] if hasopt else state
get_model(model).load_state_dict(model_state, strict=strict)
if hasopt and with_opt:
try: opt.load_state_dict(state['opt'])
except:
if with_opt: warn("Could not load the optimizer state.")
elif with_opt: warn("Saved filed doesn't contain an optimizer state.")
# Cell
def _try_concat(o):
try: return torch.cat(o)
except: return sum([L(o_[i,:] for i in range_of(o_)) for o_ in o], L())
# Cell
_before_epoch = [event.before_fit, event.before_epoch]
_after_epoch = [event.after_epoch, event.after_fit]
# Cell
class _ConstantFunc():
"Returns a function that returns `o`"
def __init__(self, o): self.o = o
def __call__(self, *args, **kwargs): return self.o
# Cell
_loop = ['Start Fit', 'before_fit', 'Start Epoch Loop', 'before_epoch', 'Start Train', 'before_train',
'Start Batch Loop', 'before_batch', 'after_pred', 'after_loss', 'before_backward', 'before_step',
'after_step', 'after_cancel_batch', 'after_batch','End Batch Loop','End Train',
'after_cancel_train', 'after_train', 'Start Valid', 'before_validate','Start Batch Loop',
'**CBs same as train batch**', 'End Batch Loop', 'End Valid', 'after_cancel_validate',
'after_validate', 'End Epoch Loop', 'after_cancel_epoch', 'after_epoch', 'End Fit',
'after_cancel_fit', 'after_fit']
# Cell
class Learner(GetAttr):
_default='model'
def __init__(self, dls, model, loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=trainable_params, cbs=None,
metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True,
moms=(0.95,0.85,0.95)):
path = Path(path) if path is not None else getattr(dls, 'path', Path('.'))
if loss_func is None:
loss_func = getattr(dls.train_ds, 'loss_func', None)
assert loss_func is not None, "Could not infer loss function from the data, please pass a loss function."
self.dls,self.model = dls,model
store_attr(but='dls,model,cbs')
self.training,self.create_mbar,self.logger,self.opt,self.cbs = False,True,print,None,L()
self.add_cbs(L(defaults.callbacks)+L(cbs))
self("after_create")
@property
def metrics(self): return self._metrics
@metrics.setter
def metrics(self,v): self._metrics = L(v).map(mk_metric)
def _grab_cbs(self, cb_cls): return L(cb for cb in self.cbs if isinstance(cb, cb_cls))
def add_cbs(self, cbs):
L(cbs).map(self.add_cb)
return self
def remove_cbs(self, cbs):
L(cbs).map(self.remove_cb)
return self
def add_cb(self, cb):
if isinstance(cb, type): cb = cb()
cb.learn = self
setattr(self, cb.name, cb)
self.cbs.append(cb)
return self
def remove_cb(self, cb):
if isinstance(cb, type): self.remove_cbs(self._grab_cbs(cb))
else:
cb.learn = None
if hasattr(self, cb.name): delattr(self, cb.name)
if cb in self.cbs: self.cbs.remove(cb)
return self
@contextmanager
def added_cbs(self, cbs):
self.add_cbs(cbs)
try: yield
finally: self.remove_cbs(cbs)
@contextmanager
def removed_cbs(self, cbs):
self.remove_cbs(cbs)
try: yield self
finally: self.add_cbs(cbs)
def ordered_cbs(self, event): return [cb for cb in self.cbs.sorted('order') if hasattr(cb, event)]
def __call__(self, event_name): L(event_name).map(self._call_one)
def _call_one(self, event_name):
if not hasattr(event, event_name): raise Exception(f'missing {event_name}')
for cb in self.cbs.sorted('order'): cb(event_name)
def _bn_bias_state(self, with_bias): return norm_bias_params(self.model, with_bias).map(self.opt.state)
def create_opt(self):
self.opt = self.opt_func(self.splitter(self.model), lr=self.lr)
if not self.wd_bn_bias:
for p in self._bn_bias_state(True ): p['do_wd'] = False
if self.train_bn:
for p in self._bn_bias_state(False): p['force_train'] = True
def _split(self, b):
i = getattr(self.dls, 'n_inp', 1 if len(b)==1 else len(b)-1)
self.xb,self.yb = b[:i],b[i:]
def _with_events(self, f, event_type, ex, final=noop):
try: self(f'before_{event_type}'); f()
except ex: self(f'after_cancel_{event_type}')
self(f'after_{event_type}'); final()
def all_batches(self):
self.n_iter = len(self.dl)
for o in enumerate(self.dl): self.one_batch(*o)
def _do_one_batch(self):
self.pred = self.model(*self.xb)
self('after_pred')
if len(self.yb):
self.loss_grad = self.loss_func(self.pred, *self.yb)
self.loss = self.loss_grad.clone()
self('after_loss')
if not self.training or not len(self.yb): return
self('before_backward')
self.loss_grad.backward()
self._with_events(self.opt.step, 'step', CancelStepException)
self.opt.zero_grad()
def one_batch(self, i, b):
self.iter = i
self._split(b)
self._with_events(self._do_one_batch, 'batch', CancelBatchException)
def _do_epoch_train(self):
self.dl = self.dls.train
self._with_events(self.all_batches, 'train', CancelTrainException)
def _do_epoch_validate(self, ds_idx=1, dl=None):
if dl is None: dl = self.dls[ds_idx]
self.dl = dl
with torch.no_grad(): self._with_events(self.all_batches, 'validate', CancelValidException)
def _do_epoch(self):
self._do_epoch_train()
self._do_epoch_validate()
def _do_fit(self):
for epoch in range(self.n_epoch):
self.epoch=epoch
self._with_events(self._do_epoch, 'epoch', CancelEpochException)
def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False):
with self.added_cbs(cbs):
if reset_opt or not self.opt: self.create_opt()
if wd is None: wd = self.wd
if wd is not None: self.opt.set_hypers(wd=wd)
self.opt.set_hypers(lr=self.lr if lr is None else lr)
self.n_epoch = n_epoch
self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup)
def _end_cleanup(self): self.dl,self.xb,self.yb,self.pred,self.loss = None,(None,),(None,),None,None
def __enter__(self): self(_before_epoch); return self
def __exit__(self, exc_type, exc_value, tb): self(_after_epoch)
def validation_context(self, cbs=None, inner=False):
cms = [self.no_logging(),self.no_mbar()]
if cbs: cms.append(self.added_cbs(cbs))
if not inner: cms.append(self)
return ContextManagers(cms)
def validate(self, ds_idx=1, dl=None, cbs=None):
if dl is None: dl = self.dls[ds_idx]
with self.validation_context(cbs=cbs): self._do_epoch_validate(ds_idx, dl)
return getattr(self, 'final_record', None)
@delegates(GatherPredsCallback.__init__)
def get_preds(self, ds_idx=1, dl=None, with_input=False, with_decoded=False, with_loss=False, act=None,
inner=False, reorder=True, cbs=None, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffled=False, drop_last=False)
else:
try: len(dl)
except TypeError as e:
raise TypeError("`dl` is something other than a single `DataLoader` object")
if reorder and hasattr(dl, 'get_idxs'):
idxs = dl.get_idxs()
dl = dl.new(get_idxs = _ConstantFunc(idxs))
cb = GatherPredsCallback(with_input=with_input, with_loss=with_loss, **kwargs)
ctx_mgrs = self.validation_context(cbs=L(cbs)+[cb], inner=inner)
if with_loss: ctx_mgrs.append(self.loss_not_reduced())
with ContextManagers(ctx_mgrs):
self._do_epoch_validate(dl=dl)
if act is None: act = getattr(self.loss_func, 'activation', noop)
res = cb.all_tensors()
pred_i = 1 if with_input else 0
if res[pred_i] is not None:
res[pred_i] = act(res[pred_i])
if with_decoded: res.insert(pred_i+2, getattr(self.loss_func, 'decodes', noop)(res[pred_i]))
if reorder and hasattr(dl, 'get_idxs'): res = nested_reorder(res, tensor(idxs).argsort())
return tuple(res)
self._end_cleanup()
def predict(self, item, rm_type_tfms=None, with_input=False):
dl = self.dls.test_dl([item], rm_type_tfms=rm_type_tfms, num_workers=0)
inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
i = getattr(self.dls, 'n_inp', -1)
inp = (inp,) if i==1 else tuplify(inp)
dec = self.dls.decode_batch(inp + tuplify(dec_preds))[0]
dec_inp,dec_targ = map(detuplify, [dec[:i],dec[i:]])
res = dec_targ,dec_preds[0],preds[0]
if with_input: res = (dec_inp,) + res
return res
def show_results(self, ds_idx=1, dl=None, max_n=9, shuffle=True, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffle=shuffle)
b = dl.one_batch()
_,_,preds = self.get_preds(dl=[b], with_decoded=True)
self.dls.show_results(b, preds, max_n=max_n, **kwargs)
def show_training_loop(self):
indent = 0
for s in _loop:
if s.startswith('Start'): print(f'{" "*indent}{s}'); indent += 2
elif s.startswith('End'): indent -= 2; print(f'{" "*indent}{s}')
else: print(f'{" "*indent} - {s:15}:', self.ordered_cbs(s))
@contextmanager
def no_logging(self): return replacing_yield(self, 'logger', noop)
@contextmanager
def no_mbar(self): return replacing_yield(self, 'create_mbar', False)
@contextmanager
def loss_not_reduced(self):
if hasattr(self.loss_func, 'reduction'): return replacing_yield(self.loss_func, 'reduction', 'none')
else: return replacing_yield(self, 'loss_func', partial(self.loss_func, reduction='none'))
def to_detach(self,b,cpu=True,gather=True):
return self.dl.to_detach(b,cpu,gather) if hasattr(getattr(self,'dl',None),'to_detach') else to_detach(b,cpu,gather)
Learner.x,Learner.y = add_props(lambda i,x: detuplify((x.xb,x.yb)[i]))
# Cell
add_docs(Learner, "Group together a `model`, some `dls` and a `loss_func` to handle training",
add_cbs="Add `cbs` to the list of `Callback` and register `self` as their learner",
add_cb="Add `cb` to the list of `Callback` and register `self` as their learner",
remove_cbs="Remove `cbs` from the list of `Callback` and deregister `self` as their learner",
remove_cb="Add `cb` from the list of `Callback` and deregister `self` as their learner",
added_cbs="Context manage that temporarily adds `cbs`",
removed_cbs="Context manage that temporarily removes `cbs`",
ordered_cbs="List of `Callback`s, in order, for an `event` in the training loop",
create_opt="Create an optimizer with default hyper-parameters",
one_batch="Train or evaluate `self.model` on batch `(xb,yb)`",
all_batches="Train or evaluate `self.model` on all the batches of `self.dl`",
fit="Fit `self.model` for `n_epoch` using `cbs`. Optionally `reset_opt`.",
validate="Validate on `dl` with potential new `cbs`.",
get_preds="Get the predictions and targets on the `ds_idx`-th dbunchset or `dl`, optionally `with_input` and `with_loss`",
predict="Prediction on `item`, fully decoded, loss function decoded and probabilities",
validation_context="A `ContextManagers` suitable for validation, with optional `cbs`",
show_results="Show some predictions on `ds_idx`-th dataset or `dl`",
show_training_loop="Show each step in the training loop",
no_logging="Context manager to temporarily remove `logger`",
no_mbar="Context manager to temporarily prevent the master progress bar from being created",
loss_not_reduced="A context manager to evaluate `loss_func` with reduction set to none.",
to_detach="Calls `to_detach` if `self.dl` provides a `.to_detach` function otherwise calls global `to_detach`",
__call__="Call `event_name` for all `Callback`s in `self.cbs`"
)
# Cell
if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback]
# Cell
def _before_batch_cb(f, self):
xb,yb = f(self, self.xb, self.yb)
self.learn.xb,self.learn.yb = xb,yb
# Cell
def before_batch_cb(f):
"Shortcut for creating a Callback on the `before_batch` event, which takes and returns `xb,yb`"
return Callback(before_batch=partial(_before_batch_cb, f))
# Cell
@patch
@delegates(save_model)
def save(self:Learner, file, **kwargs):
"Save model and optimizer state (if `with_opt`) to `self.path/self.model_dir/file`"
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
save_model(file, self.model, getattr(self,'opt',None), **kwargs)
return file
# Cell
@patch
@delegates(load_model)
def load(self:Learner, file, device=None, **kwargs):
"Load model and optimizer state (if `with_opt`) from `self.path/self.model_dir/file` using `device`"
if device is None and hasattr(self.dls, 'device'): device = self.dls.device
if self.opt is None: self.create_opt()
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
load_model(file, self.model, self.opt, device=device, **kwargs)
return self
# Cell
@patch
def export(self:Learner, fname='export.pkl', pickle_module=pickle, pickle_protocol=2):
"Export the content of `self` without the items and the optimizer state for inference"
if rank_distrib(): return # don't export if child proc
self._end_cleanup()
old_dbunch = self.dls
self.dls = self.dls.new_empty()
state = self.opt.state_dict() if self.opt is not None else None
self.opt = None
with warnings.catch_warnings():
#To avoid the warning that come from PyTorch about model not being checked
warnings.simplefilter("ignore")
torch.save(self, self.path/fname, pickle_module=pickle_module, pickle_protocol=pickle_protocol)
self.create_opt()
if state is not None: self.opt.load_state_dict(state)
self.dls = old_dbunch
# Cell
def load_learner(fname, cpu=True, pickle_module=pickle):
"Load a `Learner` object in `fname`, optionally putting it on the `cpu`"
distrib_barrier()
res = torch.load(fname, map_location='cpu' if cpu else None, pickle_module=pickle_module)
if hasattr(res, 'to_fp32'): res = res.to_fp32()
if cpu: res.dls.cpu()
return res
# Cell
def to_detach_from_dl(learn:(Learner,NoneType),b:object,cpu:bool=True,gather:bool=True):
return learn.dl.to_detach(b,cpu,gather) if hasattr(getattr(learn,'dl',None),'to_detach') else to_detach(b,cpu,gather)
# Cell
@docs
class Metric():
"Blueprint for defining a metric"
def reset(self): pass
def accumulate(self, learn): pass
@property
def value(self): raise NotImplementedError
@property
def name(self): return class2attr(self, 'Metric')
_docs = dict(
reset="Reset inner state to prepare for new computation",
name="Name of the `Metric`, camel-cased and with Metric removed",
accumulate="Use `learn` to update the state with new results",
value="The value of the metric")
# Cell
def _maybe_reduce(val):
if num_distrib()>1:
val = val.clone()
torch.distributed.all_reduce(val, op=torch.distributed.ReduceOp.SUM)
val /= num_distrib()
return val
# Cell
class AvgMetric(Metric):
"Average the values of `func` taking into account potential different batch sizes"
def __init__(self, func): self.func = func
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(self.func(learn.pred, *learn.yb))*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__
# Cell
class AvgLoss(Metric):
"Average the losses taking into account potential different batch sizes"
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(learn.loss.mean())*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return "loss"
# Cell
class AvgSmoothLoss(Metric):
"Smooth average of the losses (exponentially weighted with `beta`)"
def __init__(self, beta=0.98): self.beta = beta
def reset(self): self.count,self.val = 0,tensor(0.)
def accumulate(self, learn):
self.count += 1
self.val = torch.lerp(to_detach(learn.loss.mean(), gather=False), self.val, self.beta)
@property
def value(self): return self.val/(1-self.beta**self.count)
# Cell
class ValueMetric(Metric):
"Use to include a pre-calculated metric value (for instance calculated in a `Callback`) and returned by `func`"
def __init__(self, func, metric_name=None): store_attr('func, metric_name')
@property
def value(self): return self.func()
@property
def name(self): return self.metric_name if self.metric_name else self.func.__name__
# Cell
from fastprogress.fastprogress import format_time
# Cell
def _maybe_item(t):
t = t.value
try: return t.item()
except: return t
# Cell
class Recorder(Callback):
"Callback that registers statistics (lr, loss and metrics) during training"
_stateattrs=('lrs','iters','losses','values')
remove_on_fetch,order = True,50
def __init__(self, add_time=True, train_metrics=False, valid_metrics=True, beta=0.98):
store_attr('add_time,train_metrics,valid_metrics')
self.loss,self.smooth_loss = AvgLoss(),AvgSmoothLoss(beta=beta)
def before_fit(self):
"Prepare state for training"
self.lrs,self.iters,self.losses,self.values = [],[],[],[]
names = self.metrics.attrgot('name')
if self.train_metrics and self.valid_metrics:
names = L('loss') + names
names = names.map('train_{}') + names.map('valid_{}')
elif self.valid_metrics: names = L('train_loss', 'valid_loss') + names
else: names = L('train_loss') + names
if self.add_time: names.append('time')
self.metric_names = 'epoch'+names
self.smooth_loss.reset()
def after_batch(self):
"Update all metrics and records lr and smooth loss in training"
if len(self.yb) == 0: return
mets = self._train_mets if self.training else self._valid_mets
for met in mets: met.accumulate(self.learn)
if not self.training: return
self.lrs.append(self.opt.hypers[-1]['lr'])
self.losses.append(self.smooth_loss.value)
self.learn.smooth_loss = self.smooth_loss.value
def before_epoch(self):
"Set timer if `self.add_time=True`"
self.cancel_train,self.cancel_valid = False,False
if self.add_time: self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0))
def before_train (self): self._train_mets[1:].map(Self.reset())
def before_validate(self): self._valid_mets.map(Self.reset())
def after_train (self): self.log += self._train_mets.map(_maybe_item)
def after_validate(self): self.log += self._valid_mets.map(_maybe_item)
def after_cancel_train(self): self.cancel_train = True
def after_cancel_validate(self): self.cancel_valid = True
def after_epoch(self):
"Store and log the loss/metric values"
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time: self.log.append(format_time(time.time() - self.start_epoch))
self.logger(self.log)
self.iters.append(self.smooth_loss.count)
@property
def _train_mets(self):
if getattr(self, 'cancel_train', False): return L()
return L(self.smooth_loss) + (self.metrics if self.train_metrics else L())
@property
def _valid_mets(self):
if getattr(self, 'cancel_valid', False): return L()
return (L(self.loss) + self.metrics if self.valid_metrics else L())
def plot_loss(self, skip_start=5, with_valid=True):
plt.plot(list(range(skip_start, len(self.losses))), self.losses[skip_start:], label='train')
if with_valid:
idx = (np.array(self.iters)<skip_start).sum()
plt.plot(self.iters[idx:], L(self.values[idx:]).itemgot(1), label='valid')
plt.legend()
# Cell
add_docs(Recorder,
before_train = "Reset loss and metrics state",
after_train = "Log loss and metric values on the training set (if `self.training_metrics=True`)",
before_validate = "Reset loss and metrics state",
after_validate = "Log loss and metric values on the validation set",
after_cancel_train = "Ignore training metrics for this epoch",
after_cancel_validate = "Ignore validation metrics for this epoch",
plot_loss = "Plot the losses from `skip_start` and onward")
if Recorder not in defaults.callbacks: defaults.callbacks.append(Recorder)
# Cell
@patch
def freeze_to(self:Learner, n):
if self.opt is None: self.create_opt()
self.opt.freeze_to(n)
self.opt.clear_state()
@patch
def freeze(self:Learner): self.freeze_to(-1)
@patch
def unfreeze(self:Learner): self.freeze_to(0)
add_docs(Learner,
freeze_to="Freeze parameter groups up to `n`",
freeze="Freeze up to last parameter group",
unfreeze="Unfreeze the entire model")
# Cell
@patch
def tta(self:Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False):
"Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation"
if dl is None: dl = self.dls[ds_idx]
if item_tfms is not None or batch_tfms is not None: dl = dl.new(after_item=item_tfms, after_batch=batch_tfms)
try:
self(_before_epoch)
with dl.dataset.set_split_idx(0), self.no_mbar():
if hasattr(self,'progress'): self.progress.mbar = master_bar(list(range(n)))
aug_preds = []
for i in self.progress.mbar if hasattr(self,'progress') else range(n):
self.epoch = i #To keep track of progress on mbar since the progress callback will use self.epoch
aug_preds.append(self.get_preds(dl=dl, inner=True)[0][None])
aug_preds = torch.cat(aug_preds)
aug_preds = aug_preds.max(0)[0] if use_max else aug_preds.mean(0)
self.epoch = n
with dl.dataset.set_split_idx(1): preds,targs = self.get_preds(dl=dl, inner=True)
finally: self(event.after_fit)
if use_max: return torch.stack([preds, aug_preds], 0).max(0)[0],targs
preds = (aug_preds,preds) if beta is None else torch.lerp(aug_preds, preds, beta)
return preds,targs | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/learner.py | learner.py |
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"subplots": "00_torch_core.ipynb",
"show_image": "00_torch_core.ipynb",
"show_titled_image": "00_torch_core.ipynb",
"show_images": "00_torch_core.ipynb",
"ArrayBase": "00_torch_core.ipynb",
"ArrayImageBase": "00_torch_core.ipynb",
"ArrayImage": "00_torch_core.ipynb",
"ArrayImageBW": "00_torch_core.ipynb",
"ArrayMask": "00_torch_core.ipynb",
"Tensor.__array_eq__": "00_torch_core.ipynb",
"tensor": "00_torch_core.ipynb",
"set_seed": "00_torch_core.ipynb",
"get_random_states": "00_torch_core.ipynb",
"set_random_states": "00_torch_core.ipynb",
"no_random": "00_torch_core.ipynb",
"unsqueeze": "00_torch_core.ipynb",
"unsqueeze_": "00_torch_core.ipynb",
"apply": "00_torch_core.ipynb",
"maybe_gather": "00_torch_core.ipynb",
"to_detach": "00_torch_core.ipynb",
"to_half": "00_torch_core.ipynb",
"to_float": "00_torch_core.ipynb",
"defaults.use_cuda": "00_torch_core.ipynb",
"default_device": "00_torch_core.ipynb",
"to_device": "00_torch_core.ipynb",
"to_cpu": "00_torch_core.ipynb",
"to_np": "00_torch_core.ipynb",
"to_concat": "00_torch_core.ipynb",
"Tensor.set_meta": "00_torch_core.ipynb",
"Tensor.as_subclass": "00_torch_core.ipynb",
"TensorBase": "00_torch_core.ipynb",
"TensorImageBase": "00_torch_core.ipynb",
"TensorImage": "00_torch_core.ipynb",
"TensorImageBW": "00_torch_core.ipynb",
"TensorMask": "00_torch_core.ipynb",
"TensorFlowField": "00_torch_core.ipynb",
"TensorCategory": "00_torch_core.ipynb",
"TensorMultiCategory": "00_torch_core.ipynb",
"TitledTensorScalar": "00_torch_core.ipynb",
"L.tensored": "00_torch_core.ipynb",
"L.stack": "00_torch_core.ipynb",
"L.cat": "00_torch_core.ipynb",
"concat": "00_torch_core.ipynb",
"Chunks": "00_torch_core.ipynb",
"show_title": "00_torch_core.ipynb",
"ShowTitle": "00_torch_core.ipynb",
"TitledInt": "00_torch_core.ipynb",
"TitledFloat": "00_torch_core.ipynb",
"TitledStr": "00_torch_core.ipynb",
"TitledTuple": "00_torch_core.ipynb",
"TitledStr.truncate": "00_torch_core.ipynb",
"pd.DataFrame.__init__": "00_torch_core.ipynb",
"get_empty_df": "00_torch_core.ipynb",
"display_df": "00_torch_core.ipynb",
"get_first": "00_torch_core.ipynb",
"one_param": "00_torch_core.ipynb",
"item_find": "00_torch_core.ipynb",
"find_device": "00_torch_core.ipynb",
"find_bs": "00_torch_core.ipynb",
"np_func": "00_torch_core.ipynb",
"Module": "00_torch_core.ipynb",
"get_model": "00_torch_core.ipynb",
"one_hot": "00_torch_core.ipynb",
"one_hot_decode": "00_torch_core.ipynb",
"params": "00_torch_core.ipynb",
"trainable_params": "00_torch_core.ipynb",
"norm_types": "00_torch_core.ipynb",
"norm_bias_params": "00_torch_core.ipynb",
"batch_to_samples": "00_torch_core.ipynb",
"Tensor.interp_1d": "00_torch_core.ipynb",
"Tensor.pca": "00_torch_core.ipynb",
"logit": "00_torch_core.ipynb",
"num_distrib": "00_torch_core.ipynb",
"rank_distrib": "00_torch_core.ipynb",
"distrib_barrier": "00_torch_core.ipynb",
"Path.save_array": "00_torch_core.ipynb",
"Path.load_array": "00_torch_core.ipynb",
"base_doc": "00_torch_core.ipynb",
"doc": "00_torch_core.ipynb",
"nested_reorder": "00_torch_core.ipynb",
"make_cross_image": "00_torch_core.ipynb",
"show_image_batch": "00_torch_core.ipynb",
"requires_grad": "00_torch_core.ipynb",
"init_default": "01_layers.ipynb",
"cond_init": "00_torch_core.ipynb",
"apply_leaf": "00_torch_core.ipynb",
"apply_init": "00_torch_core.ipynb",
"script_use_ctx": "00_torch_core.ipynb",
"script_save_ctx": "00_torch_core.ipynb",
"script_fwd": "00_torch_core.ipynb",
"script_bwd": "00_torch_core.ipynb",
"grad_module": "00_torch_core.ipynb",
"module": "01_layers.ipynb",
"Identity": "01_layers.ipynb",
"Lambda": "01_layers.ipynb",
"PartialLambda": "01_layers.ipynb",
"Flatten": "01_layers.ipynb",
"View": "01_layers.ipynb",
"ResizeBatch": "01_layers.ipynb",
"Debugger": "01_layers.ipynb",
"sigmoid_range": "01_layers.ipynb",
"SigmoidRange": "01_layers.ipynb",
"AdaptiveConcatPool1d": "01_layers.ipynb",
"AdaptiveConcatPool2d": "01_layers.ipynb",
"PoolType": "01_layers.ipynb",
"adaptive_pool": "01_layers.ipynb",
"PoolFlatten": "01_layers.ipynb",
"NormType": "01_layers.ipynb",
"BatchNorm": "01_layers.ipynb",
"InstanceNorm": "01_layers.ipynb",
"BatchNorm1dFlat": "01_layers.ipynb",
"LinBnDrop": "01_layers.ipynb",
"sigmoid": "01_layers.ipynb",
"sigmoid_": "01_layers.ipynb",
"vleaky_relu": "01_layers.ipynb",
"init_linear": "01_layers.ipynb",
"defaults.activation": "01_layers.ipynb",
"ConvLayer": "01_layers.ipynb",
"AdaptiveAvgPool": "01_layers.ipynb",
"MaxPool": "01_layers.ipynb",
"AvgPool": "01_layers.ipynb",
"trunc_normal_": "01_layers.ipynb",
"Embedding": "01_layers.ipynb",
"SelfAttention": "01_layers.ipynb",
"PooledSelfAttention2d": "01_layers.ipynb",
"SimpleSelfAttention": "01_layers.ipynb",
"icnr_init": "01_layers.ipynb",
"PixelShuffle_ICNR": "01_layers.ipynb",
"sequential": "01_layers.ipynb",
"SequentialEx": "01_layers.ipynb",
"MergeLayer": "01_layers.ipynb",
"Cat": "01_layers.ipynb",
"SimpleCNN": "01_layers.ipynb",
"ProdLayer": "01_layers.ipynb",
"inplace_relu": "01_layers.ipynb",
"SEModule": "01_layers.ipynb",
"ResBlock": "01_layers.ipynb",
"SEBlock": "01_layers.ipynb",
"SEResNeXtBlock": "01_layers.ipynb",
"SeparableBlock": "01_layers.ipynb",
"swish": "01_layers.ipynb",
"Swish": "01_layers.ipynb",
"MishJitAutoFn": "01_layers.ipynb",
"mish": "01_layers.ipynb",
"Mish": "01_layers.ipynb",
"ParameterModule": "01_layers.ipynb",
"children_and_parameters": "01_layers.ipynb",
"has_children": "01_layers.ipynb",
"flatten_model": "01_layers.ipynb",
"NoneReduce": "01_layers.ipynb",
"in_channels": "01_layers.ipynb",
"BaseLoss": "01a_losses.ipynb",
"CrossEntropyLossFlat": "01a_losses.ipynb",
"BCEWithLogitsLossFlat": "01a_losses.ipynb",
"BCELossFlat": "01a_losses.ipynb",
"MSELossFlat": "01a_losses.ipynb",
"L1LossFlat": "01a_losses.ipynb",
"LabelSmoothingCrossEntropy": "01a_losses.ipynb",
"LabelSmoothingCrossEntropyFlat": "01a_losses.ipynb",
"fa_collate": "02_data.load.ipynb",
"fa_convert": "02_data.load.ipynb",
"SkipItemException": "02_data.load.ipynb",
"DataLoader": "02_data.load.ipynb",
"TfmdDL": "03_data.core.ipynb",
"DataLoaders": "03_data.core.ipynb",
"FilteredBase": "03_data.core.ipynb",
"TfmdLists": "03_data.core.ipynb",
"decode_at": "03_data.core.ipynb",
"show_at": "03_data.core.ipynb",
"Datasets": "03_data.core.ipynb",
"test_set": "03_data.core.ipynb",
"DataLoaders.test_dl": "03_data.core.ipynb",
"Config": "04_data.external.ipynb",
"URLs": "04_data.external.ipynb",
"download_url": "04_data.external.ipynb",
"download_data": "04_data.external.ipynb",
"file_extract": "04_data.external.ipynb",
"newest_folder": "04_data.external.ipynb",
"rename_extracted": "04_data.external.ipynb",
"untar_data": "04_data.external.ipynb",
"get_files": "05_data.transforms.ipynb",
"FileGetter": "05_data.transforms.ipynb",
"image_extensions": "05_data.transforms.ipynb",
"get_image_files": "05_data.transforms.ipynb",
"ImageGetter": "05_data.transforms.ipynb",
"get_text_files": "05_data.transforms.ipynb",
"ItemGetter": "05_data.transforms.ipynb",
"AttrGetter": "05_data.transforms.ipynb",
"RandomSplitter": "05_data.transforms.ipynb",
"TrainTestSplitter": "05_data.transforms.ipynb",
"IndexSplitter": "05_data.transforms.ipynb",
"GrandparentSplitter": "05_data.transforms.ipynb",
"FuncSplitter": "05_data.transforms.ipynb",
"MaskSplitter": "05_data.transforms.ipynb",
"FileSplitter": "05_data.transforms.ipynb",
"ColSplitter": "05_data.transforms.ipynb",
"RandomSubsetSplitter": "05_data.transforms.ipynb",
"parent_label": "05_data.transforms.ipynb",
"RegexLabeller": "05_data.transforms.ipynb",
"ColReader": "05_data.transforms.ipynb",
"CategoryMap": "05_data.transforms.ipynb",
"Categorize": "05_data.transforms.ipynb",
"Category": "05_data.transforms.ipynb",
"MultiCategorize": "05_data.transforms.ipynb",
"MultiCategory": "05_data.transforms.ipynb",
"OneHotEncode": "05_data.transforms.ipynb",
"EncodedMultiCategorize": "05_data.transforms.ipynb",
"RegressionSetup": "05_data.transforms.ipynb",
"get_c": "05_data.transforms.ipynb",
"ToTensor": "05_data.transforms.ipynb",
"IntToFloatTensor": "05_data.transforms.ipynb",
"broadcast_vec": "05_data.transforms.ipynb",
"Normalize": "05_data.transforms.ipynb",
"TransformBlock": "06_data.block.ipynb",
"CategoryBlock": "06_data.block.ipynb",
"MultiCategoryBlock": "06_data.block.ipynb",
"RegressionBlock": "06_data.block.ipynb",
"DataBlock": "06_data.block.ipynb",
"DataBlock.summary": "06_data.block.ipynb",
"imagenet_stats": "07_vision.core.ipynb",
"cifar_stats": "07_vision.core.ipynb",
"mnist_stats": "07_vision.core.ipynb",
"n_px": "07_vision.core.ipynb",
"shape": "60_medical.imaging.ipynb",
"aspect": "07_vision.core.ipynb",
"Image.Image.reshape": "07_vision.core.ipynb",
"Image.Image.to_bytes_format": "07_vision.core.ipynb",
"Image.Image.to_thumb": "07_vision.core.ipynb",
"Image.Image.resize_max": "07_vision.core.ipynb",
"to_image": "07_vision.core.ipynb",
"load_image": "07_vision.core.ipynb",
"image2tensor": "07_vision.core.ipynb",
"PILBase": "07_vision.core.ipynb",
"PILImage": "07_vision.core.ipynb",
"PILImageBW": "07_vision.core.ipynb",
"PILMask": "07_vision.core.ipynb",
"OpenMask": "07_vision.core.ipynb",
"OpenMask.loss_func": "07_vision.core.ipynb",
"PILMask.create": "07_vision.core.ipynb",
"AddMaskCodes": "07_vision.core.ipynb",
"TensorPoint": "07_vision.core.ipynb",
"TensorPointCreate": "07_vision.core.ipynb",
"TensorPointCreate.loss_func": "07_vision.core.ipynb",
"TensorPoint.create": "07_vision.core.ipynb",
"get_annotations": "07_vision.core.ipynb",
"TensorBBox": "07_vision.core.ipynb",
"LabeledBBox": "07_vision.core.ipynb",
"encodes": "40_tabular.core.ipynb",
"PointScaler": "07_vision.core.ipynb",
"BBoxLabeler": "07_vision.core.ipynb",
"decodes": "40_tabular.core.ipynb",
"get_grid": "08_vision.data.ipynb",
"clip_remove_empty": "08_vision.data.ipynb",
"bb_pad": "08_vision.data.ipynb",
"ImageBlock": "08_vision.data.ipynb",
"MaskBlock": "08_vision.data.ipynb",
"PointBlock": "08_vision.data.ipynb",
"BBoxBlock": "08_vision.data.ipynb",
"PointBlock.__doc__": "08_vision.data.ipynb",
"BBoxBlock.__doc__": "08_vision.data.ipynb",
"BBoxLblBlock": "08_vision.data.ipynb",
"ImageDataLoaders": "08_vision.data.ipynb",
"ImageDataLoaders.from_csv": "08_vision.data.ipynb",
"ImageDataLoaders.from_name_func": "08_vision.data.ipynb",
"ImageDataLoaders.from_path_re": "08_vision.data.ipynb",
"ImageDataLoaders.from_name_re": "08_vision.data.ipynb",
"SegmentationDataLoaders": "08_vision.data.ipynb",
"RandTransform": "09_vision.augment.ipynb",
"TensorTypes": "09_vision.augment.ipynb",
"Image.Image.flip_lr": "09_vision.augment.ipynb",
"TensorImageBase.flip_lr": "09_vision.augment.ipynb",
"TensorPoint.flip_lr": "09_vision.augment.ipynb",
"TensorBBox.flip_lr": "09_vision.augment.ipynb",
"FlipItem": "09_vision.augment.ipynb",
"PILImage.dihedral": "09_vision.augment.ipynb",
"TensorImage.dihedral": "09_vision.augment.ipynb",
"TensorPoint.dihedral": "09_vision.augment.ipynb",
"TensorBBox.dihedral": "09_vision.augment.ipynb",
"DihedralItem": "09_vision.augment.ipynb",
"TensorBBox.crop_pad": "09_vision.augment.ipynb",
"TensorPoint.crop_pad": "09_vision.augment.ipynb",
"Image.Image.crop_pad": "09_vision.augment.ipynb",
"CropPad": "09_vision.augment.ipynb",
"RandomCrop": "09_vision.augment.ipynb",
"OldRandomCrop": "09_vision.augment.ipynb",
"Resize": "09_vision.augment.ipynb",
"RandomResizedCrop": "09_vision.augment.ipynb",
"RatioResize": "09_vision.augment.ipynb",
"affine_grid": "09_vision.augment.ipynb",
"TensorImage.affine_coord": "09_vision.augment.ipynb",
"TensorMask.affine_coord": "09_vision.augment.ipynb",
"TensorPoint.affine_coord": "09_vision.augment.ipynb",
"TensorBBox.affine_coord": "09_vision.augment.ipynb",
"AffineCoordTfm": "09_vision.augment.ipynb",
"RandomResizedCropGPU": "09_vision.augment.ipynb",
"mask_tensor": "09_vision.augment.ipynb",
"affine_mat": "09_vision.augment.ipynb",
"flip_mat": "09_vision.augment.ipynb",
"TensorImage.flip_batch": "09_vision.augment.ipynb",
"TensorMask.flip_batch": "09_vision.augment.ipynb",
"TensorPoint.flip_batch": "09_vision.augment.ipynb",
"TensorBBox.flip_batch": "09_vision.augment.ipynb",
"Flip": "09_vision.augment.ipynb",
"DeterministicDraw": "09_vision.augment.ipynb",
"DeterministicFlip": "09_vision.augment.ipynb",
"dihedral_mat": "09_vision.augment.ipynb",
"TensorImage.dihedral_batch": "09_vision.augment.ipynb",
"TensorMask.dihedral_batch": "09_vision.augment.ipynb",
"TensorPoint.dihedral_batch": "09_vision.augment.ipynb",
"TensorBBox.dihedral_batch": "09_vision.augment.ipynb",
"Dihedral": "09_vision.augment.ipynb",
"DeterministicDihedral": "09_vision.augment.ipynb",
"rotate_mat": "09_vision.augment.ipynb",
"TensorImage.rotate": "09_vision.augment.ipynb",
"TensorMask.rotate": "09_vision.augment.ipynb",
"TensorPoint.rotate": "09_vision.augment.ipynb",
"TensorBBox.rotate": "09_vision.augment.ipynb",
"Rotate": "09_vision.augment.ipynb",
"zoom_mat": "09_vision.augment.ipynb",
"TensorImage.zoom": "09_vision.augment.ipynb",
"TensorMask.zoom": "09_vision.augment.ipynb",
"TensorPoint.zoom": "09_vision.augment.ipynb",
"TensorBBox.zoom": "09_vision.augment.ipynb",
"Zoom": "09_vision.augment.ipynb",
"find_coeffs": "09_vision.augment.ipynb",
"apply_perspective": "09_vision.augment.ipynb",
"TensorImage.warp": "09_vision.augment.ipynb",
"TensorMask.warp": "09_vision.augment.ipynb",
"TensorPoint.warp": "09_vision.augment.ipynb",
"TensorBBox.warp": "09_vision.augment.ipynb",
"Warp": "09_vision.augment.ipynb",
"TensorImage.lighting": "09_vision.augment.ipynb",
"SpaceTfm": "09_vision.augment.ipynb",
"LightingTfm": "09_vision.augment.ipynb",
"TensorImage.brightness": "09_vision.augment.ipynb",
"Brightness": "09_vision.augment.ipynb",
"TensorImage.contrast": "09_vision.augment.ipynb",
"Contrast": "09_vision.augment.ipynb",
"grayscale": "09_vision.augment.ipynb",
"TensorImage.saturation": "09_vision.augment.ipynb",
"Saturation": "09_vision.augment.ipynb",
"rgb2hsv": "09_vision.augment.ipynb",
"hsv2rgb": "09_vision.augment.ipynb",
"TensorImage.hsv": "09_vision.augment.ipynb",
"HSVTfm": "09_vision.augment.ipynb",
"TensorImage.hue": "09_vision.augment.ipynb",
"Hue": "09_vision.augment.ipynb",
"cutout_gaussian": "09_vision.augment.ipynb",
"norm_apply_denorm": "09_vision.augment.ipynb",
"RandomErasing": "09_vision.augment.ipynb",
"setup_aug_tfms": "09_vision.augment.ipynb",
"aug_transforms": "09_vision.augment.ipynb",
"download_images": "09b_vision.utils.ipynb",
"resize_to": "09b_vision.utils.ipynb",
"verify_image": "09b_vision.utils.ipynb",
"verify_images": "09b_vision.utils.ipynb",
"resize_image": "09b_vision.utils.ipynb",
"resize_images": "09b_vision.utils.ipynb",
"Box.__getitem__": "09c_vision.widgets.ipynb",
"widget": "09c_vision.widgets.ipynb",
"carousel": "09c_vision.widgets.ipynb",
"ImagesCleaner": "09c_vision.widgets.ipynb",
"ImageClassifierCleaner": "09c_vision.widgets.ipynb",
"init_cnn": "11_vision.models.xresnet.ipynb",
"XResNet": "11_vision.models.xresnet.ipynb",
"xresnet18": "11_vision.models.xresnet.ipynb",
"xresnet34": "11_vision.models.xresnet.ipynb",
"xresnet50": "11_vision.models.xresnet.ipynb",
"xresnet101": "11_vision.models.xresnet.ipynb",
"xresnet152": "11_vision.models.xresnet.ipynb",
"xresnet18_deep": "11_vision.models.xresnet.ipynb",
"xresnet34_deep": "11_vision.models.xresnet.ipynb",
"xresnet50_deep": "11_vision.models.xresnet.ipynb",
"xresnet18_deeper": "11_vision.models.xresnet.ipynb",
"xresnet34_deeper": "11_vision.models.xresnet.ipynb",
"xresnet50_deeper": "11_vision.models.xresnet.ipynb",
"se_kwargs1": "11_vision.models.xresnet.ipynb",
"se_kwargs2": "11_vision.models.xresnet.ipynb",
"se_kwargs3": "11_vision.models.xresnet.ipynb",
"g0": "11_vision.models.xresnet.ipynb",
"g1": "11_vision.models.xresnet.ipynb",
"g2": "11_vision.models.xresnet.ipynb",
"g3": "11_vision.models.xresnet.ipynb",
"xse_resnet18": "11_vision.models.xresnet.ipynb",
"xse_resnext18": "11_vision.models.xresnet.ipynb",
"xresnext18": "11_vision.models.xresnet.ipynb",
"xse_resnet34": "11_vision.models.xresnet.ipynb",
"xse_resnext34": "11_vision.models.xresnet.ipynb",
"xresnext34": "11_vision.models.xresnet.ipynb",
"xse_resnet50": "11_vision.models.xresnet.ipynb",
"xse_resnext50": "11_vision.models.xresnet.ipynb",
"xresnext50": "11_vision.models.xresnet.ipynb",
"xse_resnet101": "11_vision.models.xresnet.ipynb",
"xse_resnext101": "11_vision.models.xresnet.ipynb",
"xresnext101": "11_vision.models.xresnet.ipynb",
"xse_resnet152": "11_vision.models.xresnet.ipynb",
"xsenet154": "11_vision.models.xresnet.ipynb",
"xse_resnext18_deep": "11_vision.models.xresnet.ipynb",
"xse_resnext34_deep": "11_vision.models.xresnet.ipynb",
"xse_resnext50_deep": "11_vision.models.xresnet.ipynb",
"xse_resnext18_deeper": "11_vision.models.xresnet.ipynb",
"xse_resnext34_deeper": "11_vision.models.xresnet.ipynb",
"xse_resnext50_deeper": "11_vision.models.xresnet.ipynb",
"Optimizer": "12_optimizer.ipynb",
"sgd_step": "12_optimizer.ipynb",
"weight_decay": "12_optimizer.ipynb",
"weight_decay.defaults": "12_optimizer.ipynb",
"l2_reg": "12_optimizer.ipynb",
"l2_reg.defaults": "12_optimizer.ipynb",
"average_grad": "12_optimizer.ipynb",
"average_grad.defaults": "12_optimizer.ipynb",
"average_sqr_grad": "12_optimizer.ipynb",
"average_sqr_grad.defaults": "12_optimizer.ipynb",
"momentum_step": "12_optimizer.ipynb",
"SGD": "12_optimizer.ipynb",
"rms_prop_step": "12_optimizer.ipynb",
"rms_prop_step.defaults": "12_optimizer.ipynb",
"RMSProp": "12_optimizer.ipynb",
"step_stat": "12_optimizer.ipynb",
"debias": "12_optimizer.ipynb",
"adam_step": "12_optimizer.ipynb",
"Adam": "12_optimizer.ipynb",
"radam_step": "12_optimizer.ipynb",
"RAdam": "12_optimizer.ipynb",
"qhadam_step": "12_optimizer.ipynb",
"QHAdam": "12_optimizer.ipynb",
"larc_layer_lr": "12_optimizer.ipynb",
"larc_layer_lr.defaults": "12_optimizer.ipynb",
"larc_step": "12_optimizer.ipynb",
"Larc": "12_optimizer.ipynb",
"lamb_step": "12_optimizer.ipynb",
"Lamb": "12_optimizer.ipynb",
"Lookahead": "12_optimizer.ipynb",
"ranger": "12_optimizer.ipynb",
"detuplify_pg": "12_optimizer.ipynb",
"set_item_pg": "12_optimizer.ipynb",
"pytorch_hp_map": "12_optimizer.ipynb",
"OptimWrapper": "12_optimizer.ipynb",
"Callback": "13_callback.core.ipynb",
"TrainEvalCallback": "13_callback.core.ipynb",
"GatherPredsCallback": "13_callback.core.ipynb",
"FetchPredsCallback": "13_callback.core.ipynb",
"defaults.lr": "13a_learner.ipynb",
"replacing_yield": "13a_learner.ipynb",
"mk_metric": "13a_learner.ipynb",
"save_model": "13a_learner.ipynb",
"load_model": "13a_learner.ipynb",
"Learner": "13a_learner.ipynb",
"before_batch_cb": "13a_learner.ipynb",
"Learner.save": "13a_learner.ipynb",
"Learner.load": "13a_learner.ipynb",
"Learner.export": "13a_learner.ipynb",
"load_learner": "13a_learner.ipynb",
"to_detach_from_dl": "13a_learner.ipynb",
"Metric": "13a_learner.ipynb",
"AvgMetric": "13a_learner.ipynb",
"AvgLoss": "13a_learner.ipynb",
"AvgSmoothLoss": "13a_learner.ipynb",
"ValueMetric": "13a_learner.ipynb",
"Recorder": "13a_learner.ipynb",
"Learner.freeze_to": "13a_learner.ipynb",
"Learner.freeze": "13a_learner.ipynb",
"Learner.unfreeze": "13a_learner.ipynb",
"Learner.tta": "13a_learner.ipynb",
"flatten_check": "13b_metrics.ipynb",
"AccumMetric": "13b_metrics.ipynb",
"skm_to_fastai": "13b_metrics.ipynb",
"optim_metric": "13b_metrics.ipynb",
"accuracy": "13b_metrics.ipynb",
"error_rate": "13b_metrics.ipynb",
"top_k_accuracy": "13b_metrics.ipynb",
"APScoreBinary": "13b_metrics.ipynb",
"BalancedAccuracy": "13b_metrics.ipynb",
"BrierScore": "13b_metrics.ipynb",
"CohenKappa": "13b_metrics.ipynb",
"F1Score": "13b_metrics.ipynb",
"FBeta": "13b_metrics.ipynb",
"HammingLoss": "13b_metrics.ipynb",
"Jaccard": "13b_metrics.ipynb",
"Precision": "13b_metrics.ipynb",
"Recall": "13b_metrics.ipynb",
"RocAuc": "13b_metrics.ipynb",
"RocAucBinary": "13b_metrics.ipynb",
"MatthewsCorrCoef": "13b_metrics.ipynb",
"Perplexity": "13b_metrics.ipynb",
"perplexity": "13b_metrics.ipynb",
"accuracy_multi": "13b_metrics.ipynb",
"APScoreMulti": "13b_metrics.ipynb",
"BrierScoreMulti": "13b_metrics.ipynb",
"F1ScoreMulti": "13b_metrics.ipynb",
"FBetaMulti": "13b_metrics.ipynb",
"HammingLossMulti": "13b_metrics.ipynb",
"JaccardMulti": "13b_metrics.ipynb",
"MatthewsCorrCoefMulti": "13b_metrics.ipynb",
"PrecisionMulti": "13b_metrics.ipynb",
"RecallMulti": "13b_metrics.ipynb",
"RocAucMulti": "13b_metrics.ipynb",
"mse": "13b_metrics.ipynb",
"rmse": "13b_metrics.ipynb",
"rmse.__doc__": "13b_metrics.ipynb",
"mae": "13b_metrics.ipynb",
"msle": "13b_metrics.ipynb",
"exp_rmspe": "13b_metrics.ipynb",
"exp_rmspe.__doc__": "13b_metrics.ipynb",
"ExplainedVariance": "13b_metrics.ipynb",
"R2Score": "13b_metrics.ipynb",
"PearsonCorrCoef": "13b_metrics.ipynb",
"SpearmanCorrCoef": "13b_metrics.ipynb",
"foreground_acc": "13b_metrics.ipynb",
"Dice": "13b_metrics.ipynb",
"DiceMulti": "13b_metrics.ipynb",
"JaccardCoeff": "13b_metrics.ipynb",
"CorpusBLEUMetric": "13b_metrics.ipynb",
"LossMetric": "13b_metrics.ipynb",
"LossMetrics": "13b_metrics.ipynb",
"annealer": "14_callback.schedule.ipynb",
"sched_lin": "14_callback.schedule.ipynb",
"sched_cos": "14_callback.schedule.ipynb",
"sched_no": "14_callback.schedule.ipynb",
"sched_exp": "14_callback.schedule.ipynb",
"SchedLin": "14_callback.schedule.ipynb",
"SchedCos": "14_callback.schedule.ipynb",
"SchedNo": "14_callback.schedule.ipynb",
"SchedExp": "14_callback.schedule.ipynb",
"SchedLin.__doc__": "14_callback.schedule.ipynb",
"SchedCos.__doc__": "14_callback.schedule.ipynb",
"SchedExp.__doc__": "14_callback.schedule.ipynb",
"SchedPoly": "14_callback.schedule.ipynb",
"combine_scheds": "14_callback.schedule.ipynb",
"combined_cos": "14_callback.schedule.ipynb",
"ParamScheduler": "14_callback.schedule.ipynb",
"Learner.fit_one_cycle": "14_callback.schedule.ipynb",
"Recorder.plot_sched": "14_callback.schedule.ipynb",
"Learner.fit_flat_cos": "14_callback.schedule.ipynb",
"Learner.fit_sgdr": "14_callback.schedule.ipynb",
"Learner.fine_tune": "14_callback.schedule.ipynb",
"LRFinder": "14_callback.schedule.ipynb",
"Recorder.plot_lr_find": "14_callback.schedule.ipynb",
"SuggestedLRs": "14_callback.schedule.ipynb",
"Learner.lr_find": "14_callback.schedule.ipynb",
"CollectDataCallback": "14a_callback.data.ipynb",
"CudaCallback": "14a_callback.data.ipynb",
"WeightedDL": "14a_callback.data.ipynb",
"Datasets.weighted_dataloaders": "14a_callback.data.ipynb",
"PartialDL": "14a_callback.data.ipynb",
"FilteredBase.partial_dataloaders": "14a_callback.data.ipynb",
"Hook": "15_callback.hook.ipynb",
"hook_output": "15_callback.hook.ipynb",
"Hooks": "15_callback.hook.ipynb",
"hook_outputs": "15_callback.hook.ipynb",
"dummy_eval": "15_callback.hook.ipynb",
"model_sizes": "15_callback.hook.ipynb",
"num_features_model": "15_callback.hook.ipynb",
"has_params": "15_callback.hook.ipynb",
"HookCallback": "15_callback.hook.ipynb",
"total_params": "15_callback.hook.ipynb",
"layer_info": "15_callback.hook.ipynb",
"module_summary": "15_callback.hook.ipynb",
"Learner.summary": "15_callback.hook.ipynb",
"ActivationStats": "15_callback.hook.ipynb",
"UnetBlock": "15a_vision.models.unet.ipynb",
"ResizeToOrig": "15a_vision.models.unet.ipynb",
"DynamicUnet": "15a_vision.models.unet.ipynb",
"ProgressCallback": "16_callback.progress.ipynb",
"Learner.no_bar": "16_callback.progress.ipynb",
"ShowGraphCallback": "16_callback.progress.ipynb",
"CSVLogger": "16_callback.progress.ipynb",
"TerminateOnNaNCallback": "17_callback.tracker.ipynb",
"TrackerCallback": "17_callback.tracker.ipynb",
"EarlyStoppingCallback": "17_callback.tracker.ipynb",
"SaveModelCallback": "17_callback.tracker.ipynb",
"ReduceLROnPlateau": "17_callback.tracker.ipynb",
"MixedPrecision": "18_callback.fp16.ipynb",
"FP16TestCallback": "18_callback.fp16.ipynb",
"Learner.to_fp16": "18_callback.fp16.ipynb",
"Learner.to_fp32": "18_callback.fp16.ipynb",
"get_master": "18_callback.fp16.ipynb",
"to_master_grads": "18_callback.fp16.ipynb",
"to_model_params": "18_callback.fp16.ipynb",
"test_overflow": "18_callback.fp16.ipynb",
"grad_overflow": "18_callback.fp16.ipynb",
"copy_clone": "18_callback.fp16.ipynb",
"ModelToHalf": "18_callback.fp16.ipynb",
"NonNativeMixedPrecision": "18_callback.fp16.ipynb",
"Learner.to_to_non_native_fp16": "18_callback.fp16.ipynb",
"Learner.to_non_native_fp32": "18_callback.fp16.ipynb",
"ShortEpochCallback": "18a_callback.training.ipynb",
"GradientAccumulation": "18a_callback.training.ipynb",
"GradientClip": "18a_callback.training.ipynb",
"set_bn_eval": "18a_callback.training.ipynb",
"BnFreeze": "18a_callback.training.ipynb",
"bn_types": "18a_callback.training.ipynb",
"MCDropoutCallback": "18b_callback.preds.ipynb",
"reduce_loss": "19_callback.mixup.ipynb",
"MixHandler": "19_callback.mixup.ipynb",
"MixUp": "19_callback.mixup.ipynb",
"CutMix": "19_callback.mixup.ipynb",
"Interpretation": "20_interpret.ipynb",
"ClassificationInterpretation": "20_interpret.ipynb",
"DataParallel.reset": "20a_distributed.ipynb",
"ParallelTrainer": "20a_distributed.ipynb",
"Learner.to_parallel": "20a_distributed.ipynb",
"Learner.detach_parallel": "20a_distributed.ipynb",
"Learner.parallel_ctx": "20a_distributed.ipynb",
"DistributedDataParallel.reset": "20a_distributed.ipynb",
"setup_distrib": "20a_distributed.ipynb",
"teardown_distrib": "20a_distributed.ipynb",
"DistributedDL": "20a_distributed.ipynb",
"DistributedTrainer": "20a_distributed.ipynb",
"Learner.to_distributed": "20a_distributed.ipynb",
"Learner.detach_distributed": "20a_distributed.ipynb",
"Learner.distrib_ctx": "20a_distributed.ipynb",
"rank0_first": "20a_distributed.ipynb",
"has_pool_type": "21_vision.learner.ipynb",
"create_body": "21_vision.learner.ipynb",
"create_head": "21_vision.learner.ipynb",
"default_split": "21_vision.learner.ipynb",
"model_meta": "21_vision.learner.ipynb",
"create_cnn_model": "21_vision.learner.ipynb",
"cnn_learner": "21_vision.learner.ipynb",
"create_unet_model": "21_vision.learner.ipynb",
"unet_learner": "21_vision.learner.ipynb",
"GANModule": "24_vision.gan.ipynb",
"basic_critic": "24_vision.gan.ipynb",
"AddChannels": "24_vision.gan.ipynb",
"basic_generator": "24_vision.gan.ipynb",
"DenseResBlock": "24_vision.gan.ipynb",
"gan_critic": "24_vision.gan.ipynb",
"GANLoss": "24_vision.gan.ipynb",
"AdaptiveLoss": "24_vision.gan.ipynb",
"accuracy_thresh_expand": "24_vision.gan.ipynb",
"set_freeze_model": "24_vision.gan.ipynb",
"GANTrainer": "24_vision.gan.ipynb",
"FixedGANSwitcher": "24_vision.gan.ipynb",
"AdaptiveGANSwitcher": "24_vision.gan.ipynb",
"GANDiscriminativeLR": "24_vision.gan.ipynb",
"InvisibleTensor": "24_vision.gan.ipynb",
"generate_noise": "24_vision.gan.ipynb",
"gan_loss_from_func": "24_vision.gan.ipynb",
"GANLearner": "24_vision.gan.ipynb",
"GANLearner.from_learners": "24_vision.gan.ipynb",
"GANLearner.wgan": "24_vision.gan.ipynb",
"spec_add_spaces": "30_text.core.ipynb",
"rm_useless_spaces": "30_text.core.ipynb",
"replace_rep": "30_text.core.ipynb",
"replace_wrep": "30_text.core.ipynb",
"fix_html": "30_text.core.ipynb",
"replace_all_caps": "30_text.core.ipynb",
"replace_maj": "30_text.core.ipynb",
"lowercase": "30_text.core.ipynb",
"replace_space": "30_text.core.ipynb",
"defaults.text_spec_tok": "30_text.core.ipynb",
"defaults.text_proc_rules": "30_text.core.ipynb",
"defaults.text_postproc_rules": "30_text.core.ipynb",
"BaseTokenizer": "30_text.core.ipynb",
"SpacyTokenizer": "30_text.core.ipynb",
"WordTokenizer": "30_text.core.ipynb",
"TokenizeWithRules": "30_text.core.ipynb",
"tokenize1": "30_text.core.ipynb",
"parallel_tokenize": "30_text.core.ipynb",
"fn_counter_pkl": "30_text.core.ipynb",
"fn_lengths_pkl": "30_text.core.ipynb",
"tokenize_folder": "30_text.core.ipynb",
"tokenize_files": "30_text.core.ipynb",
"tokenize_texts": "30_text.core.ipynb",
"tokenize_df": "30_text.core.ipynb",
"tokenize_csv": "30_text.core.ipynb",
"load_tokenized_csv": "30_text.core.ipynb",
"Tokenizer": "30_text.core.ipynb",
"eu_langs": "30_text.core.ipynb",
"SentencePieceTokenizer": "30_text.core.ipynb",
"SubwordTokenizer": "30_text.core.ipynb",
"reverse_text": "31_text.data.ipynb",
"make_vocab": "31_text.data.ipynb",
"TensorText": "31_text.data.ipynb",
"LMTensorText": "31_text.data.ipynb",
"TensorText.__doc__": "31_text.data.ipynb",
"LMTensorText.__doc__": "31_text.data.ipynb",
"Numericalize": "31_text.data.ipynb",
"LMDataLoader": "31_text.data.ipynb",
"Pad_Input": "31_text.data.ipynb",
"pad_input": "31_text.data.ipynb",
"pad_chunk": "31_text.data.ipynb",
"pad_input_chunk": "31_text.data.ipynb",
"Pad_Chunk": "31_text.data.ipynb",
"SortedDL": "31_text.data.ipynb",
"TextBlock": "31_text.data.ipynb",
"TextDataLoaders": "31_text.data.ipynb",
"TextDataLoaders.from_csv": "31_text.data.ipynb",
"dropout_mask": "32_text.models.awdlstm.ipynb",
"RNNDropout": "32_text.models.awdlstm.ipynb",
"WeightDropout": "32_text.models.awdlstm.ipynb",
"EmbeddingDropout": "32_text.models.awdlstm.ipynb",
"AWD_LSTM": "32_text.models.awdlstm.ipynb",
"awd_lstm_lm_split": "32_text.models.awdlstm.ipynb",
"awd_lstm_lm_config": "32_text.models.awdlstm.ipynb",
"awd_lstm_clas_split": "32_text.models.awdlstm.ipynb",
"awd_lstm_clas_config": "32_text.models.awdlstm.ipynb",
"AWD_QRNN": "32_text.models.awdlstm.ipynb",
"awd_qrnn_lm_config": "32_text.models.awdlstm.ipynb",
"awd_qrnn_clas_config": "32_text.models.awdlstm.ipynb",
"LinearDecoder": "33_text.models.core.ipynb",
"SequentialRNN": "33_text.models.core.ipynb",
"get_language_model": "33_text.models.core.ipynb",
"SentenceEncoder": "33_text.models.core.ipynb",
"masked_concat_pool": "33_text.models.core.ipynb",
"PoolingLinearClassifier": "33_text.models.core.ipynb",
"get_text_classifier": "33_text.models.core.ipynb",
"ModelResetter": "34_callback.rnn.ipynb",
"RNNCallback": "34_callback.rnn.ipynb",
"RNNRegularizer": "34_callback.rnn.ipynb",
"rnn_cbs": "34_callback.rnn.ipynb",
"load_cpp": "36_text.models.qrnn.ipynb",
"forget_mult_cuda": "36_text.models.qrnn.ipynb",
"bwd_forget_mult_cuda": "36_text.models.qrnn.ipynb",
"dispatch_cuda": "36_text.models.qrnn.ipynb",
"forget_mult_CPU": "36_text.models.qrnn.ipynb",
"ForgetMultGPU": "36_text.models.qrnn.ipynb",
"QRNNLayer": "36_text.models.qrnn.ipynb",
"QRNN": "36_text.models.qrnn.ipynb",
"match_embeds": "37_text.learner.ipynb",
"load_ignore_keys": "37_text.learner.ipynb",
"clean_raw_keys": "37_text.learner.ipynb",
"load_model_text": "37_text.learner.ipynb",
"TextLearner": "37_text.learner.ipynb",
"decode_spec_tokens": "37_text.learner.ipynb",
"LMLearner": "37_text.learner.ipynb",
"language_model_learner": "37_text.learner.ipynb",
"text_classifier_learner": "37_text.learner.ipynb",
"make_date": "40_tabular.core.ipynb",
"add_datepart": "40_tabular.core.ipynb",
"add_elapsed_times": "40_tabular.core.ipynb",
"cont_cat_split": "40_tabular.core.ipynb",
"df_shrink_dtypes": "40_tabular.core.ipynb",
"df_shrink": "40_tabular.core.ipynb",
"Tabular": "40_tabular.core.ipynb",
"TabularPandas": "40_tabular.core.ipynb",
"TabularProc": "40_tabular.core.ipynb",
"Categorify": "40_tabular.core.ipynb",
"setups": "40_tabular.core.ipynb",
"FillStrategy": "40_tabular.core.ipynb",
"FillMissing": "40_tabular.core.ipynb",
"ReadTabBatch": "40_tabular.core.ipynb",
"TabDataLoader": "40_tabular.core.ipynb",
"TabularDataLoaders": "41_tabular.data.ipynb",
"TabularDataLoaders.from_csv": "41_tabular.data.ipynb",
"emb_sz_rule": "42_tabular.model.ipynb",
"get_emb_sz": "42_tabular.model.ipynb",
"TabularModel": "42_tabular.model.ipynb",
"tabular_config": "42_tabular.model.ipynb",
"TabularLearner": "43_tabular.learner.ipynb",
"tabular_learner": "43_tabular.learner.ipynb",
"TabularCollab": "45_collab.ipynb",
"CollabDataLoaders": "45_collab.ipynb",
"CollabDataLoaders.from_csv": "45_collab.ipynb",
"EmbeddingDotBias": "45_collab.ipynb",
"EmbeddingNN": "45_collab.ipynb",
"collab_learner": "45_collab.ipynb",
"get_dicom_files": "60_medical.imaging.ipynb",
"Path.dcmread": "60_medical.imaging.ipynb",
"TensorDicom": "60_medical.imaging.ipynb",
"PILDicom": "60_medical.imaging.ipynb",
"Path.png16read": "60_medical.imaging.ipynb",
"pixels": "60_medical.imaging.ipynb",
"scaled_px": "60_medical.imaging.ipynb",
"array_freqhist_bins": "60_medical.imaging.ipynb",
"Tensor.freqhist_bins": "60_medical.imaging.ipynb",
"Tensor.hist_scaled_pt": "60_medical.imaging.ipynb",
"Tensor.hist_scaled": "60_medical.imaging.ipynb",
"DcmDataset.hist_scaled": "60_medical.imaging.ipynb",
"Tensor.windowed": "60_medical.imaging.ipynb",
"DcmDataset.windowed": "60_medical.imaging.ipynb",
"dicom_windows": "60_medical.imaging.ipynb",
"TensorCTScan": "60_medical.imaging.ipynb",
"PILCTScan": "60_medical.imaging.ipynb",
"DcmDataset.show": "60_medical.imaging.ipynb",
"DcmDataset.pct_in_window": "60_medical.imaging.ipynb",
"uniform_blur2d": "60_medical.imaging.ipynb",
"gauss_blur2d": "60_medical.imaging.ipynb",
"Tensor.mask_from_blur": "60_medical.imaging.ipynb",
"DcmDataset.mask_from_blur": "60_medical.imaging.ipynb",
"mask2bbox": "60_medical.imaging.ipynb",
"crop_resize": "60_medical.imaging.ipynb",
"Tensor.to_nchan": "60_medical.imaging.ipynb",
"DcmDataset.to_nchan": "60_medical.imaging.ipynb",
"Tensor.to_3chan": "60_medical.imaging.ipynb",
"DcmDataset.to_3chan": "60_medical.imaging.ipynb",
"Tensor.save_jpg": "60_medical.imaging.ipynb",
"DcmDataset.save_jpg": "60_medical.imaging.ipynb",
"Tensor.to_uint16": "60_medical.imaging.ipynb",
"DcmDataset.to_uint16": "60_medical.imaging.ipynb",
"Tensor.save_tif16": "60_medical.imaging.ipynb",
"DcmDataset.save_tif16": "60_medical.imaging.ipynb",
"DcmDataset.set_pixels": "60_medical.imaging.ipynb",
"DcmDataset.pixel_array": "60_medical.imaging.ipynb",
"DcmDataset.zoom": "60_medical.imaging.ipynb",
"DcmDataset.zoom_to": "60_medical.imaging.ipynb",
"DcmDataset.as_dict": "60_medical.imaging.ipynb",
"pd.DataFrame.from_dicoms": "60_medical.imaging.ipynb",
"DicomSegmentationDataLoaders": "60_medical.imaging.ipynb",
"WandbCallback": "70_callback.wandb.ipynb",
"Learner.gather_args": "70_callback.wandb.ipynb",
"log_dataset": "70_callback.wandb.ipynb",
"log_model": "70_callback.wandb.ipynb",
"TensorBoardBaseCallback": "71_callback.tensorboard.ipynb",
"TensorBoardCallback": "71_callback.tensorboard.ipynb",
"TensorBoardProjectorCallback": "71_callback.tensorboard.ipynb",
"projector_word_embeddings": "71_callback.tensorboard.ipynb",
"NeptuneCallback": "72_callback.neptune.ipynb",
"json_clean": "73_callback.captum.ipynb",
"jsonutil.json_clean": "73_callback.captum.ipynb",
"CaptumInterpretation": "73_callback.captum.ipynb",
"CaptumInterpretation.insights": "73_callback.captum.ipynb",
"synth_dbunch": "97_test_utils.ipynb",
"RegModel": "97_test_utils.ipynb",
"synth_learner": "97_test_utils.ipynb",
"VerboseCallback": "97_test_utils.ipynb",
"get_env": "97_test_utils.ipynb",
"try_import": "97_test_utils.ipynb",
"nvidia_smi": "97_test_utils.ipynb",
"nvidia_mem": "97_test_utils.ipynb",
"show_install": "97_test_utils.ipynb",
"PYTORCH_URL": "99_pytorch_doc.ipynb",
"pytorch_doc_link": "99_pytorch_doc.ipynb"}
modules = ["torch_core.py",
"layers.py",
"losses.py",
"data/load.py",
"data/core.py",
"data/external.py",
"data/transforms.py",
"data/block.py",
"vision/core.py",
"vision/data.py",
"vision/augment.py",
"vision/utils.py",
"vision/widgets.py",
"vision/models/xresnet.py",
"optimizer.py",
"callback/core.py",
"learner.py",
"metrics.py",
"callback/schedule.py",
"callback/data.py",
"callback/hook.py",
"vision/models/unet.py",
"callback/progress.py",
"callback/tracker.py",
"callback/fp16.py",
"callback/training.py",
"callback/preds.py",
"callback/mixup.py",
"interpret.py",
"distributed.py",
"vision/learner.py",
"vision/gan.py",
"text/core.py",
"text/data.py",
"text/models/awdlstm.py",
"text/models/core.py",
"callback/rnn.py",
"text/models/qrnn.py",
"text/learner.py",
"tabular/core.py",
"tabular/data.py",
"tabular/model.py",
"tabular/learner.py",
"collab.py",
"medical/imaging.py",
"medical/text.py",
"callback/wandb.py",
"callback/tensorboard.py",
"callback/neptune.py",
"callback/captum.py",
"test_utils.py",
"_pytorch_doc.py"]
doc_url = "https://docs.fast.ai/"
git_url = "https://github.com/fastai/fastai/tree/master/"
def custom_doc_links(name):
from nbdev.showdoc import try_external_doc_link
return try_external_doc_link(name, ['fastcore', 'nbdev']) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/_nbdev.py | _nbdev.py |
__all__ = ['plot_top_losses', 'Interpretation', 'ClassificationInterpretation']
# Cell
from .data.all import *
from .optimizer import *
from .learner import *
import sklearn.metrics as skm
# Cell
@typedispatch
def plot_top_losses(x, y, *args, **kwargs):
raise Exception(f"plot_top_losses is not implemented for {type(x)},{type(y)}")
# Cell
#nbdev_comment _all_ = ["plot_top_losses"]
# Cell
class Interpretation():
"Interpretation base class, can be inherited for task specific Interpretation classes"
def __init__(self, dl, inputs, preds, targs, decoded, losses):
store_attr("dl,inputs,preds,targs,decoded,losses")
@classmethod
def from_learner(cls, learn, ds_idx=1, dl=None, act=None):
"Construct interpretation object from a learner"
if dl is None: dl = learn.dls[ds_idx]
return cls(dl, *learn.get_preds(dl=dl, with_input=True, with_loss=True, with_decoded=True, act=None))
def top_losses(self, k=None, largest=True):
"`k` largest(/smallest) losses and indexes, defaulting to all losses (sorted by `largest`)."
return self.losses.topk(ifnone(k, len(self.losses)), largest=largest)
def plot_top_losses(self, k, largest=True, **kwargs):
losses,idx = self.top_losses(k, largest)
if not isinstance(self.inputs, tuple): self.inputs = (self.inputs,)
if isinstance(self.inputs[0], Tensor): inps = tuple(o[idx] for o in self.inputs)
else: inps = self.dl.create_batch(self.dl.before_batch([tuple(o[i] for o in self.inputs) for i in idx]))
b = inps + tuple(o[idx] for o in (self.targs if is_listy(self.targs) else (self.targs,)))
x,y,its = self.dl._pre_show_batch(b, max_n=k)
b_out = inps + tuple(o[idx] for o in (self.decoded if is_listy(self.decoded) else (self.decoded,)))
x1,y1,outs = self.dl._pre_show_batch(b_out, max_n=k)
if its is not None:
plot_top_losses(x, y, its, outs.itemgot(slice(len(inps), None)), self.preds[idx], losses, **kwargs)
#TODO: figure out if this is needed
#its None means that a batch knows how to show itself as a whole, so we pass x, x1
#else: show_results(x, x1, its, ctxs=ctxs, max_n=max_n, **kwargs)
# Cell
class ClassificationInterpretation(Interpretation):
"Interpretation methods for classification models."
def __init__(self, dl, inputs, preds, targs, decoded, losses):
super().__init__(dl, inputs, preds, targs, decoded, losses)
self.vocab = self.dl.vocab
if is_listy(self.vocab): self.vocab = self.vocab[-1]
def confusion_matrix(self):
"Confusion matrix as an `np.ndarray`."
x = torch.arange(0, len(self.vocab))
d,t = flatten_check(self.decoded, self.targs)
cm = ((d==x[:,None]) & (t==x[:,None,None])).long().sum(2)
return to_np(cm)
def plot_confusion_matrix(self, normalize=False, title='Confusion matrix', cmap="Blues", norm_dec=2,
plot_txt=True, **kwargs):
"Plot the confusion matrix, with `title` and using `cmap`."
# This function is mainly copied from the sklearn docs
cm = self.confusion_matrix()
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig = plt.figure(**kwargs)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(len(self.vocab))
plt.xticks(tick_marks, self.vocab, rotation=90)
plt.yticks(tick_marks, self.vocab, rotation=0)
if plot_txt:
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
coeff = f'{cm[i, j]:.{norm_dec}f}' if normalize else f'{cm[i, j]}'
plt.text(j, i, coeff, horizontalalignment="center", verticalalignment="center", color="white" if cm[i, j] > thresh else "black")
ax = fig.gca()
ax.set_ylim(len(self.vocab)-.5,-.5)
plt.tight_layout()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.grid(False)
def most_confused(self, min_val=1):
"Sorted descending list of largest non-diagonal entries of confusion matrix, presented as actual, predicted, number of occurrences."
cm = self.confusion_matrix()
np.fill_diagonal(cm, 0)
res = [(self.vocab[i],self.vocab[j],cm[i,j])
for i,j in zip(*np.where(cm>=min_val))]
return sorted(res, key=itemgetter(2), reverse=True)
def print_classification_report(self):
"Print scikit-learn classification report"
d,t = flatten_check(self.decoded, self.targs)
print(skm.classification_report(t, d, labels=list(self.vocab.o2i.values()), target_names=[str(v) for v in self.vocab])) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/interpret.py | interpret.py |
__all__ = ['ParallelTrainer', 'setup_distrib', 'teardown_distrib', 'DistributedDL', 'DistributedTrainer', 'rank0_first']
# Cell
from .basics import *
from .callback.progress import ProgressCallback
from torch.nn.parallel import DistributedDataParallel, DataParallel
from .data.load import _FakeLoader,_loaders
# Cell
@patch
def reset(self: DataParallel):
"Patch required `reset` call into `DataParallel`"
if hasattr(self.module, 'reset'): self.module.reset()
# Cell
class ParallelTrainer(Callback):
"Wrap a model `DataParallel` automatically"
run_after,run_before = TrainEvalCallback,Recorder
def __init__(self, device_ids): self.device_ids = device_ids
def before_fit(self): self.learn.model = DataParallel(self.learn.model, device_ids=self.device_ids)
def after_fit(self): self.learn.model = self.learn.model.module
# Cell
@patch
def to_parallel(self: Learner, device_ids=None):
"Add `ParallelTrainer` callback to a `Learner`"
self.add_cb(ParallelTrainer(device_ids))
return self
# Cell
@patch
def detach_parallel(self: Learner):
"Remove `ParallelTrainer` callback from a Learner"
self.remove_cb(ParallelTrainer)
return self
# Cell
@patch
@contextmanager
def parallel_ctx(self: Learner, device_ids=None):
"A context manager to adapt a learner to train in data parallel mode."
try:
self.to_parallel(device_ids)
yield self
finally: self.detach_parallel()
# Cell
@patch
def reset(self: DistributedDataParallel):
"Patch required `reset` call into `DistributedDataParallel`"
if hasattr(self.module, 'reset'): self.module.reset()
# Cell
def setup_distrib(gpu=None):
"Setup this process to participate in distributed training"
if gpu is None: return gpu
gpu = int(gpu)
torch.cuda.set_device(int(gpu))
if num_distrib() > 0: torch.distributed.init_process_group(backend='nccl', init_method='env://')
return gpu
# Cell
def teardown_distrib():
"Free distributed training resources"
if torch.distributed.is_initialized(): torch.distributed.destroy_process_group()
# Cell
def _round_to_multiple(number,multiple): return int(math.ceil(number/multiple)*multiple)
# Cell
class DistributedDL(TfmdDL):
"A `TfmdDL` which splits a batch into equal size pieces for each worker"
def __init__(self,dl,rank=None,world_size=None):
if rank is None: rank=rank_distrib()
if world_size is None: world_size=num_distrib()
store_attr()
self.bs,self.device,self.drop_last,self.dataset,fake = attrgetter('bs','device','drop_last','dataset','fake_l')(dl)
self.fake_l = _FakeLoader(self, fake.pin_memory, fake.num_workers, fake.timeout, persistent_workers=fake.persistent_workers)
def _broadcast(self,t,rank):
"Broadcasts t from rank `rank` to all other ranks. Returns t so t is same for all ranks after call."
t = LongTensor(t).cuda() # nccl only works with cuda tensors
torch.distributed.broadcast(t,rank)
return t.cpu().tolist()
def _to_detach(self,b,cpu=True,gather=True): return to_detach(b,cpu,gather) # member func so we can override for test
def __len__(self): return _round_to_multiple(len(self.dl),self.world_size)//self.world_size
def get_idxs(self):
idxs = list(self.dl.get_idxs()) # compute get_idxs in all ranks (we'll only use rank 0 but size must be consistent)
idxs = self._broadcast(idxs,0) # broadcast and receive it from rank 0 to all
self.n = len(idxs) # we assumed n was dl.n but we really care about number of idxs
# add extra samples to make it evenly divisible
self.n_padded = _round_to_multiple(self.n,self.world_size)
idxs += (idxs * (self.n_padded//self.n))[:self.n_padded-self.n] # idx needs to be repeated when n_padded>>n
# slice padded idxs so that each rank gets self.n_padded//self.world_size tensors
return idxs[self.rank*self.n_padded//self.world_size:(self.rank+1)*self.n_padded//self.world_size]
def before_iter(self):
self.i = 0
self.dl.before_iter()
def randomize(self): self.dl.randomize()
def after_batch(self,b):
self.i += find_bs(b)
return self.dl.after_batch(b)
def after_iter(self): self.dl.after_iter()
def create_batches(self,samps): return self.dl.create_batches(samps)
def to_detach(self,b, cpu=True, gather=True):
b = self._to_detach(b, cpu, gather)
def _inner(b):
if b.ndim>0:
# for each rank, compute overflow of read idxs vs self.n and accumulate them to unpad totals after gathering
n = sum([min(0,max(-len(b)//self.world_size,
self.n-(self.i+r*self.n_padded//self.world_size))) for r in range(self.world_size)])
b = b[:n or None]
return b
return apply(_inner,b) if gather and all(hasattr(self,o) for o in ('i','n','n_padded')) else b
# Cell
class DistributedTrainer(Callback):
"Wrap `model` in `DistributedDataParallel` and `dls` in `DistributedDL`"
fup = None
def __init__(self, cuda_id=0,sync_bn=True): store_attr()
def before_fit(self):
opt_kwargs = { 'find_unused_parameters' : DistributedTrainer.fup } if DistributedTrainer.fup is not None else {}
self.learn.model = DistributedDataParallel(
nn.SyncBatchNorm.convert_sync_batchnorm(self.model) if self.sync_bn else self.model,
device_ids=[self.cuda_id], output_device=self.cuda_id, **opt_kwargs)
self.old_dls = list(self.dls)
self.learn.dls.loaders = [self._wrap_dl(dl) for dl in self.dls]
if rank_distrib(): self.learn.logger=noop
def _wrap_dl(self, dl): return dl if isinstance(dl,DistributedDL) else DistributedDL(dl)
def before_train(self): self.learn.dl = self._wrap_dl(self.learn.dl)
def before_validate(self): self.learn.dl = self._wrap_dl(self.learn.dl)
def after_fit(self): self.learn.model,self.learn.dls.loaders = self.learn.model.module,self.old_dls
# Cell
@patch
def to_distributed(self: Learner, cuda_id, sync_bn=True):
"Add `DistributedTrainer` to a learner"
self.add_cb(DistributedTrainer(cuda_id,sync_bn))
if rank_distrib(): self.remove_cb(ProgressCallback)
return self
# Cell
@patch
def detach_distributed(self: Learner):
"Remove `DistributedTrainer` from a learner"
if num_distrib() <=1: return self
self.remove_cb(DistributedTrainer)
if rank_distrib() and not hasattr(self, 'progress'): self.add_cb(ProgressCallback())
return self
# Cell
@patch
@contextmanager
def distrib_ctx(self: Learner, cuda_id=None,sync_bn=True):
"A context manager to adapt a learner to train in distributed data parallel mode."
# Figure out the GPU to use from rank. Create a dpg if none exists yet.
if cuda_id is None: cuda_id = rank_distrib()
if not torch.distributed.is_initialized():
setup_distrib(cuda_id)
cleanup_dpg = torch.distributed.is_initialized()
else: cleanup_dpg = False
# Adapt self to DistributedDataParallel, yield, and cleanup afterwards.
try:
if num_distrib(): self.to_distributed(cuda_id,sync_bn)
yield self
finally:
self.detach_distributed()
if cleanup_dpg: teardown_distrib()
# Cell
def rank0_first(func, *args, **kwargs):
"Execute `func` in the Rank-0 process first, then in other ranks in parallel."
if args or kwargs: func = partial(func, *args, **kwargs)
dummy_l = Learner(DataLoaders(device='cpu'), nn.Linear(1,1), loss_func=lambda: 0)
with dummy_l.distrib_ctx():
if not rank_distrib(): res = func()
distrib_barrier()
if rank_distrib(): res = func()
return res | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/distributed.py | distributed.py |
import numpy as np
import io,operator,sys,os,re,mimetypes,csv,itertools,json,shutil,glob,pickle,tarfile,collections
import hashlib,itertools,types,inspect,functools,random,time,math,bz2,typing,numbers,string
import multiprocessing,threading,urllib,tempfile,concurrent.futures,matplotlib,warnings,zipfile
from concurrent.futures import as_completed
from functools import partial,reduce
from itertools import starmap,dropwhile,takewhile,zip_longest
from copy import copy,deepcopy
from multiprocessing import Lock,Process,Queue,queues
from datetime import datetime
from contextlib import redirect_stdout,contextmanager
from collections.abc import Iterable,Iterator,Generator,Sequence
from typing import Union,Optional
from types import SimpleNamespace
from pathlib import Path
from collections import OrderedDict,defaultdict,Counter,namedtuple
from enum import Enum,IntEnum
from textwrap import TextWrapper
from operator import itemgetter,attrgetter,methodcaller
from urllib.request import urlopen
# External modules
import requests,yaml,matplotlib.pyplot as plt,pandas as pd,scipy
from pandas.api.types import is_categorical_dtype,is_numeric_dtype
from numpy import array,ndarray
from scipy import ndimage
from pdb import set_trace
from fastcore.all import *
from fastprogress.fastprogress import progress_bar,master_bar
try:
from types import WrapperDescriptorType,MethodWrapperType,MethodDescriptorType
except ImportError:
WrapperDescriptorType = type(object.__init__)
MethodWrapperType = type(object().__str__)
MethodDescriptorType = type(str.join)
from types import BuiltinFunctionType,BuiltinMethodType,MethodType,FunctionType,LambdaType
pd.options.display.max_colwidth = 600
NoneType = type(None)
string_classes = (str,bytes)
mimetypes.init()
# PyTorch warnings
warnings.filterwarnings("ignore", message='.*nonzero.*', category=UserWarning)
warnings.filterwarnings("ignore", message='.*grid_sample.*', category=UserWarning)
warnings.filterwarnings("ignore", message='.*Distutils.*', category=UserWarning)
def is_iter(o):
"Test whether `o` can be used in a `for` loop"
#Rank 0 tensors in PyTorch are not really iterable
return isinstance(o, (Iterable,Generator)) and getattr(o,'ndim',1)
def is_coll(o):
"Test whether `o` is a collection (i.e. has a usable `len`)"
#Rank 0 tensors in PyTorch do not have working `len`
return hasattr(o, '__len__') and getattr(o,'ndim',1)
def all_equal(a,b):
"Compares whether `a` and `b` are the same length and have the same contents"
if not is_iter(b): return False
return all(equals(a_,b_) for a_,b_ in itertools.zip_longest(a,b))
def noop (x=None, *args, **kwargs):
"Do nothing"
return x
def noops(self, x=None, *args, **kwargs):
"Do nothing (method)"
return x
def one_is_instance(a, b, t): return isinstance(a,t) or isinstance(b,t)
def equals(a,b):
"Compares `a` and `b` for equality; supports sublists, tensors and arrays too"
if one_is_instance(a,b,type): return a==b
if hasattr(a, '__array_eq__'): return a.__array_eq__(b)
if hasattr(b, '__array_eq__'): return b.__array_eq__(a)
cmp = (np.array_equal if one_is_instance(a, b, ndarray ) else
operator.eq if one_is_instance(a, b, (str,dict,set)) else
all_equal if is_iter(a) or is_iter(b) else
operator.eq)
return cmp(a,b)
def pv(text, verbose):
if verbose: print(text) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/imports.py | imports.py |
__all__ = ['progress_bar', 'master_bar', 'subplots', 'show_image', 'show_titled_image', 'show_images', 'ArrayBase',
'ArrayImageBase', 'ArrayImage', 'ArrayImageBW', 'ArrayMask', 'tensor', 'set_seed', 'get_random_states',
'set_random_states', 'no_random', 'unsqueeze', 'unsqueeze_', 'apply', 'maybe_gather', 'to_detach', 'to_half',
'to_float', 'default_device', 'to_device', 'to_cpu', 'to_np', 'to_concat', 'TensorBase', 'TensorImageBase',
'TensorImage', 'TensorImageBW', 'TensorMask', 'TensorFlowField', 'TensorCategory', 'TensorMultiCategory',
'TitledTensorScalar', 'concat', 'Chunks', 'show_title', 'ShowTitle', 'TitledInt', 'TitledFloat', 'TitledStr',
'TitledTuple', 'get_empty_df', 'display_df', 'get_first', 'one_param', 'item_find', 'find_device', 'find_bs',
'np_func', 'Module', 'get_model', 'one_hot', 'one_hot_decode', 'params', 'trainable_params', 'norm_types',
'norm_bias_params', 'batch_to_samples', 'logit', 'num_distrib', 'rank_distrib', 'distrib_barrier',
'base_doc', 'doc', 'nested_reorder', 'make_cross_image', 'show_image_batch', 'requires_grad', 'init_default',
'cond_init', 'apply_leaf', 'apply_init', 'script_use_ctx', 'script_save_ctx', 'script_fwd', 'script_bwd',
'grad_module', 'flatten_check']
# Cell
from .imports import *
from .torch_imports import *
# Cell
#nbdev_comment _all_ = ['progress_bar','master_bar']
# Cell
if torch.cuda.is_available():
if torch.cuda.current_device()==0:
def_gpu = int(os.environ.get('DEFAULT_GPU') or 0)
if torch.cuda.device_count()>=def_gpu: torch.cuda.set_device(def_gpu)
torch.backends.cudnn.benchmark = True
# Cell
@delegates(plt.subplots, keep=True)
def subplots(nrows=1, ncols=1, figsize=None, imsize=3,suptitle=None, **kwargs):
if figsize is None:
h=nrows*imsize if suptitle is None or imsize>2 else nrows*imsize+0.6 #https://github.com/matplotlib/matplotlib/issues/5355
figsize=(ncols*imsize, h)
fig,ax = plt.subplots(nrows, ncols, figsize=figsize, **kwargs)
if suptitle is not None: fig.suptitle(suptitle)
if nrows*ncols==1: ax = array([ax])
return fig,ax
# Cell
def _fig_bounds(x):
r = x//32
return min(5, max(1,r))
# Cell
@delegates(plt.Axes.imshow, keep=True, but=['shape', 'imlim'])
def show_image(im, ax=None, figsize=None, title=None, ctx=None, **kwargs):
"Show a PIL or PyTorch image on `ax`."
# Handle pytorch axis order
if hasattrs(im, ('data','cpu','permute')):
im = im.data.cpu()
if im.shape[0]<5: im=im.permute(1,2,0)
elif not isinstance(im,np.ndarray): im=array(im)
# Handle 1-channel images
if im.shape[-1]==1: im=im[...,0]
ax = ifnone(ax,ctx)
if figsize is None: figsize = (_fig_bounds(im.shape[0]), _fig_bounds(im.shape[1]))
if ax is None: _,ax = plt.subplots(figsize=figsize)
ax.imshow(im, **kwargs)
if title is not None: ax.set_title(title)
ax.axis('off')
return ax
# Cell
@delegates(show_image, keep=True)
def show_titled_image(o, **kwargs):
"Call `show_image` destructuring `o` to `(img,title)`"
show_image(o[0], title=str(o[1]), **kwargs)
# Cell
@delegates(subplots)
def show_images(ims, nrows=1, ncols=None, titles=None, **kwargs):
"Show all images `ims` as subplots with `rows` using `titles`."
if ncols is None: ncols = int(math.ceil(len(ims)/nrows))
if titles is None: titles = [None]*len(ims)
axs = subplots(nrows, ncols, **kwargs)[1].flat
for im,t,ax in zip(ims, titles, axs): show_image(im, ax=ax, title=t)
# Cell
class ArrayBase(ndarray):
"An `ndarray` that can modify casting behavior"
@classmethod
def _before_cast(cls, x): return x if isinstance(x,ndarray) else array(x)
# Cell
class ArrayImageBase(ArrayBase):
"Base class for arrays representing images"
_show_args = {'cmap':'viridis'}
def show(self, ctx=None, **kwargs):
return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
# Cell
class ArrayImage(ArrayImageBase):
"An array representing an image"
pass
# Cell
class ArrayImageBW(ArrayImage):
"An array representing an image"
_show_args = {'cmap':'Greys'}
# Cell
class ArrayMask(ArrayImageBase):
"An array representing an image mask"
_show_args = {'alpha':0.5, 'cmap':'tab20', 'interpolation':'nearest'}
# Cell
@patch
def __array_eq__(self:Tensor,b):
return torch.equal(self,b) if self.dim() else self==b
# Cell
def _array2tensor(x):
if x.dtype==np.uint16: x = x.astype(np.float32)
return torch.from_numpy(x)
# Cell
@use_kwargs_dict(dtype=None, device=None, requires_grad=False, pin_memory=False)
def tensor(x, *rest, **kwargs):
"Like `torch.as_tensor`, but handle lists too, and can pass multiple vector elements directly."
if len(rest): x = (x,)+rest
# There was a Pytorch bug in dataloader using num_workers>0. Haven't confirmed if fixed
# if isinstance(x, (tuple,list)) and len(x)==0: return tensor(0)
res = (x if isinstance(x, Tensor)
else torch.tensor(x, **kwargs) if isinstance(x, (tuple,list))
else _array2tensor(x) if isinstance(x, ndarray)
else as_tensor(x.values, **kwargs) if isinstance(x, (pd.Series, pd.DataFrame))
else as_tensor(x, **kwargs) if hasattr(x, '__array__') or is_iter(x)
else _array2tensor(array(x), **kwargs))
if res.dtype is torch.float64: return res.float()
return res
# Cell
def set_seed(s, reproducible=False):
"Set random seed for `random`, `torch`, and `numpy` (where available)"
try: torch.manual_seed(s)
except NameError: pass
try: torch.cuda.manual_seed_all(s)
except NameError: pass
try: np.random.seed(s%(2**32-1))
except NameError: pass
random.seed(s)
if reproducible:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Cell
def get_random_states():
"Gets states for `random`, `torch`, and `numpy` random number generators"
return {'random_state':random.getstate(),
'numpy_state':np.random.get_state(),
'torch_state':torch.get_rng_state(),
'torch_cuda_state':torch.cuda.get_rng_state_all(),
'torch_deterministic':torch.backends.cudnn.deterministic,
'torch_benchmark':torch.backends.cudnn.benchmark}
# Cell
def set_random_states(random_state,numpy_state,torch_state,torch_cuda_state,torch_deterministic,torch_benchmark):
"Set states for `random`, `torch`, and `numpy` random number generators"
random.setstate(random_state)
np.random.set_state(numpy_state)
torch.set_rng_state(torch_state)
torch.cuda.set_rng_state_all(torch_cuda_state)
torch.backends.cudnn.deterministic=torch_deterministic
torch.backends.cudnn.benchmark=torch_benchmark
# Cell
@contextmanager
def no_random(seed=42,reproducible=True):
"Stores and retrieves state of random number generators. Sets random seed for `random`, `torch`, and `numpy`."
states = get_random_states()
set_seed(seed,reproducible=reproducible)
try:
yield #we are managing global variables
finally:
set_random_states(**states)
# Cell
def unsqueeze(x, dim=-1, n=1):
"Same as `torch.unsqueeze` but can add `n` dims"
for _ in range(n): x = x.unsqueeze(dim)
return x
# Cell
def unsqueeze_(x, dim=-1, n=1):
"Same as `torch.unsqueeze_` but can add `n` dims"
for _ in range(n): x.unsqueeze_(dim)
return x
# Cell
def _fa_rebuild_tensor (cls, *args, **kwargs): return cls(torch._utils._rebuild_tensor_v2(*args, **kwargs))
def _fa_rebuild_qtensor(cls, *args, **kwargs): return cls(torch._utils._rebuild_qtensor (*args, **kwargs))
# Cell
def apply(func, x, *args, **kwargs):
"Apply `func` recursively to `x`, passing on args"
if is_listy(x): return type(x)([apply(func, o, *args, **kwargs) for o in x])
if isinstance(x,dict): return {k: apply(func, v, *args, **kwargs) for k,v in x.items()}
res = func(x, *args, **kwargs)
return res if x is None else retain_type(res, x)
# Cell
def maybe_gather(x, axis=0):
"Gather copies of `x` on `axis` (if training is distributed)"
if num_distrib()<=1: return x
ndim = x.ndim
res = [x.new_zeros(*x.shape if ndim > 0 else (1,)) for _ in range(num_distrib())]
torch.distributed.all_gather(res, x.contiguous() if ndim > 0 else x[None])
return torch.cat(res, dim=axis) if ndim > 0 else torch.cat(res, dim=axis).mean()
# Cell
def to_detach(b, cpu=True, gather=True):
"Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`."
def _inner(x, cpu=True, gather=True):
if not isinstance(x,Tensor): return x
x = x.detach()
if gather: x = maybe_gather(x)
return x.cpu() if cpu else x
return apply(_inner, b, cpu=cpu, gather=gather)
# Cell
def to_half(b):
"Recursively map lists of tensors in `b ` to FP16."
return apply(lambda x: x.half() if torch.is_floating_point(x) else x, b)
# Cell
def to_float(b):
"Recursively map lists of int tensors in `b ` to float."
return apply(lambda x: x.float() if torch.is_floating_point(x) else x, b)
# Cell
# None: True if available; True: error if not available; False: use CPU
defaults.use_cuda = None
# Cell
def default_device(use_cuda=-1):
"Return or set default device; `use_cuda`: None - CUDA if available; True - error if not available; False - CPU"
if use_cuda != -1: defaults.use_cuda=use_cuda
use = defaults.use_cuda or (torch.cuda.is_available() and defaults.use_cuda is None)
assert torch.cuda.is_available() or not use
return torch.device(torch.cuda.current_device()) if use else torch.device('cpu')
# Cell
def to_device(b, device=None):
"Recursively put `b` on `device`."
if defaults.use_cuda==False: device='cpu'
elif device is None: device=default_device()
def _inner(o): return o.to(device, non_blocking=True) if isinstance(o,Tensor) else o.to_device(device) if hasattr(o, "to_device") else o
return apply(_inner, b)
# Cell
def to_cpu(b):
"Recursively map lists of tensors in `b ` to the cpu."
return to_device(b,'cpu')
# Cell
def to_np(x):
"Convert a tensor to a numpy array."
return apply(lambda o: o.data.cpu().numpy(), x)
# Cell
def to_concat(xs, dim=0):
"Concat the element in `xs` (recursively if they are tuples/lists of tensors)"
if not xs: return xs
if is_listy(xs[0]): return type(xs[0])([to_concat([x[i] for x in xs], dim=dim) for i in range_of(xs[0])])
if isinstance(xs[0],dict): return {k: to_concat([x[k] for x in xs], dim=dim) for k in xs[0].keys()}
#We may receive xs that are not concatenable (inputs of a text classifier for instance),
# in this case we return a big list
try: return retain_type(torch.cat(xs, dim=dim), xs[0])
except: return sum([L(retain_type(o_.index_select(dim, tensor(i)).squeeze(dim), xs[0])
for i in range_of(o_)) for o_ in xs], L())
# Cell
@patch
def set_meta(self:Tensor, x, as_copy=False):
"Set all metadata in `__dict__`"
if not hasattr(x,'__dict__'): return
# XXX: change to `deepcopy` once PyTorch 1.7.1 is out, and check nb 23 segmentation fit works
self.__dict__ = copy(x.__dict__) if as_copy else x.__dict__
# Cell
if not hasattr(torch,'as_subclass'): torch.as_subclass = torch.Tensor.as_subclass
# Cell
@patch
def as_subclass(self:Tensor, typ):
"Cast to `typ` and include `__dict__` and meta"
return retain_meta(self, torch.as_subclass(self, typ))
# Cell
def _torch_handled(args, opt, func):
if func not in opt: return False
for oks in opt[func]:
if all(isinstance(arg,ok) for arg,ok in zip(args,oks) if ok): return True
# Cell
class TensorBase(Tensor):
"A `Tensor` which support subclass pickling, and maintains metadata when casting or after methods"
debug,_opt = False,defaultdict(list)
def __new__(cls, x, **kwargs):
res = cast(tensor(x), cls)
for k,v in kwargs.items(): setattr(res, k, v)
return res
@classmethod
def _before_cast(cls, x): return tensor(x)
def __repr__(self): return re.sub('tensor', self.__class__.__name__, super().__repr__())
def __reduce_ex__(self,proto):
torch.utils.hooks.warn_if_has_hooks(self)
args = (type(self), self.storage(), self.storage_offset(), tuple(self.size()), self.stride())
if self.is_quantized: args = args + (self.q_scale(), self.q_zero_point())
f = _fa_rebuild_qtensor if self.is_quantized else _fa_rebuild_tensor
return (f, args + (self.requires_grad, OrderedDict()))
@classmethod
def register_func(cls, func, *oks): cls._opt[func].append(oks)
def __torch_function__(self, func, types, args=(), kwargs=None):
if self.debug and func.__name__ not in ('__str__','__repr__'): print(func, types, args, kwargs)
convert=False
if _torch_handled(args, self._opt, func): convert,types = type(self),(torch.Tensor,)
res = super().__torch_function__(func, types, args=args, kwargs=kwargs)
if convert: res = convert(res)
if isinstance(res, TensorBase): res.set_meta(self, as_copy=True)
return res
def new_tensor(self, size, dtype=None, device=None, requires_grad=False):
cls = type(self)
return self.as_subclass(Tensor).new_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad).as_subclass(cls)
def new_ones(self, data, dtype=None, device=None, requires_grad=False):
cls = type(self)
return self.as_subclass(Tensor).new_ones(data, dtype=dtype, device=device, requires_grad=requires_grad).as_subclass(cls)
def new(self, x=None):
cls = type(self)
res = self.as_subclass(Tensor).new() if x is None else self.as_subclass(Tensor).new(x)
return res.as_subclass(cls)
# Cell
class TensorImageBase(TensorBase):
_show_args = ArrayImageBase._show_args
def show(self, ctx=None, **kwargs):
return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
# Cell
class TensorImage(TensorImageBase): pass
# Cell
class TensorImageBW(TensorImage): _show_args = ArrayImageBW._show_args
# Cell
class TensorMask(TensorImageBase):
_show_args = ArrayMask._show_args
def show(self, ctx=None, **kwargs):
codes = getattr(self, 'codes', None)
if codes is not None: kwargs = merge({'vmin': 1, 'vmax': len(codes)}, kwargs)
return super().show(ctx=ctx, **kwargs)
# Cell
for o in Tensor.add,Tensor.sub,Tensor.mul,Tensor.div,Tensor.__rsub__,Tensor.__radd__,Tensor.matmul,Tensor.bmm:
TensorBase.register_func(o, TensorMask, TensorImageBase)
TensorBase.register_func(o, TensorImageBase, TensorMask)
TensorMask.register_func(torch.einsum, str, TensorImageBase, TensorMask)
TensorMask.register_func(torch.einsum, str, TensorMask, TensorImageBase)
# Cell
class TensorFlowField(TensorBase): pass
TensorImage.register_func(F.grid_sample, TensorImageBase, TensorFlowField)
# Cell
class TensorCategory(TensorBase): pass
# Cell
class TensorMultiCategory(TensorCategory): pass
# Cell
class TitledTensorScalar(TensorBase):
"A tensor containing a scalar that has a `show` method"
def show(self, **kwargs): show_title(self.item(), **kwargs)
# Cell
@patch
def tensored(self:L):
"`mapped(tensor)`"
return self.map(tensor)
@patch
def stack(self:L, dim=0):
"Same as `torch.stack`"
return torch.stack(list(self.tensored()), dim=dim)
@patch
def cat (self:L, dim=0):
"Same as `torch.cat`"
return torch.cat (list(self.tensored()), dim=dim)
# Cell
def concat(*ls):
"Concatenate tensors, arrays, lists, or tuples"
if not len(ls): return []
it = ls[0]
if isinstance(it,torch.Tensor): res = torch.cat(ls)
elif isinstance(it,ndarray): res = np.concatenate(ls)
else:
res = itertools.chain.from_iterable(map(L,ls))
if isinstance(it,(tuple,list)): res = type(it)(res)
else: res = L(res)
return retain_type(res, it)
# Cell
class Chunks:
"Slice and int indexing into a list of lists"
def __init__(self, chunks, lens=None):
self.chunks = chunks
self.lens = L(map(len,self.chunks) if lens is None else lens)
self.cumlens = np.cumsum(0+self.lens)
self.totlen = self.cumlens[-1]
def __getitem__(self,i):
if isinstance(i,slice): return retain_type(self.getslice(i), old=self.chunks[0])
di,idx = self.doc_idx(i)
return retain_type(self.chunks[di][idx], old=self.chunks[0])
def getslice(self, i):
st_d,st_i = self.doc_idx(ifnone(i.start,0))
en_d,en_i = self.doc_idx(ifnone(i.stop,self.totlen+1))
res = [self.chunks[st_d][st_i:(en_i if st_d==en_d else sys.maxsize)]]
for b in range(st_d+1,en_d): res.append(self.chunks[b])
if st_d!=en_d and en_d<len(self.chunks): res.append(self.chunks[en_d][:en_i])
return concat(*res)
def doc_idx(self, i):
if i<0: i=self.totlen+i # count from end
docidx = np.searchsorted(self.cumlens, i+1)-1
cl = self.cumlens[docidx]
return docidx,i-cl
# Cell
def show_title(o, ax=None, ctx=None, label=None, color='black', **kwargs):
"Set title of `ax` to `o`, or print `o` if `ax` is `None`"
ax = ifnone(ax,ctx)
if ax is None: print(o)
elif hasattr(ax, 'set_title'):
t = ax.title.get_text()
if len(t) > 0: o = t+'\n'+str(o)
ax.set_title(o, color=color)
elif isinstance(ax, pd.Series):
while label in ax: label += '_'
ax = ax.append(pd.Series({label: o}))
return ax
# Cell
class ShowTitle:
"Base class that adds a simple `show`"
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledInt(Int, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledFloat(Float, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledStr(Str, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledTuple(fastuple, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
add_docs(TitledInt, "An `int` with `show`"); add_docs(TitledStr, "An `str` with `show`");
add_docs(TitledFloat, "A `float` with `show`"); add_docs(TitledTuple, "A `fastuple` with `show`")
# Cell
@patch
def truncate(self:TitledStr, n):
"Truncate self to `n`"
words = self.split(' ')[:n]
return TitledStr(' '.join(words))
# Cell
if not hasattr(pd.DataFrame,'_old_init'): pd.DataFrame._old_init = pd.DataFrame.__init__
# Cell
@patch
def __init__(self:pd.DataFrame, data=None, index=None, columns=None, dtype=None, copy=False):
if data is not None and isinstance(data, Tensor): data = to_np(data)
self._old_init(data, index=index, columns=columns, dtype=dtype, copy=copy)
# Cell
def get_empty_df(n):
"Return `n` empty rows of a dataframe"
df = pd.DataFrame(index = range(n))
return [df.iloc[i] for i in range(n)]
# Cell
def display_df(df):
"Display `df` in a notebook or defaults to print"
try: from IPython.display import display, HTML
except: return print(df)
display(HTML(df.to_html()))
# Cell
def get_first(c):
"Get the first element of c, even if c is a dataframe"
return getattr(c, 'iloc', c)[0]
# Cell
def one_param(m):
"First parameter in `m`"
return first(m.parameters())
# Cell
def item_find(x, idx=0):
"Recursively takes the `idx`-th element of `x`"
if is_listy(x): return item_find(x[idx])
if isinstance(x,dict):
key = list(x.keys())[idx] if isinstance(idx, int) else idx
return item_find(x[key])
return x
# Cell
def find_device(b):
"Recursively search the device of `b`."
return item_find(b).device
# Cell
def find_bs(b):
"Recursively search the batch size of `b`."
return item_find(b).shape[0]
# Cell
def np_func(f):
"Convert a function taking and returning numpy arrays to one taking and returning tensors"
def _inner(*args, **kwargs):
nargs = [to_np(arg) if isinstance(arg,Tensor) else arg for arg in args]
return tensor(f(*nargs, **kwargs))
functools.update_wrapper(_inner, f)
return _inner
# Cell
class Module(nn.Module, metaclass=PrePostInitMeta):
"Same as `nn.Module`, but no need for subclasses to call `super().__init__`"
def __pre_init__(self, *args, **kwargs): super().__init__()
def __init__(self): pass
# Cell
from torch.nn.parallel import DistributedDataParallel
# Cell
def get_model(model):
"Return the model maybe wrapped inside `model`."
return model.module if isinstance(model, (DistributedDataParallel, nn.DataParallel)) else model
# Cell
def one_hot(x, c):
"One-hot encode `x` with `c` classes."
res = torch.zeros(c, dtype=torch.uint8)
if isinstance(x, Tensor) and x.numel()>0: res[x] = 1.
else: res[list(L(x, use_list=None))] = 1.
return res
# Cell
def one_hot_decode(x, vocab=None):
return L(vocab[i] if vocab else i for i,x_ in enumerate(x) if x_==1)
# Cell
def params(m):
"Return all parameters of `m`"
return [p for p in m.parameters()]
# Cell
def trainable_params(m):
"Return all trainable parameters of `m`"
return [p for p in m.parameters() if p.requires_grad]
# Cell
norm_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d, nn.LayerNorm)
# Cell
def norm_bias_params(m, with_bias=True):
"Return all bias and BatchNorm parameters"
if isinstance(m, norm_types): return L(m.parameters())
res = L(m.children()).map(norm_bias_params, with_bias=with_bias).concat()
if with_bias and getattr(m, 'bias', None) is not None: res.append(m.bias)
return res
# Cell
def batch_to_samples(b, max_n=10):
"'Transposes' a batch to (at most `max_n`) samples"
if isinstance(b, Tensor): return retain_types(list(b[:max_n]), [b])
else:
res = L(b).map(partial(batch_to_samples,max_n=max_n))
return retain_types(res.zip(), [b])
# Cell
@patch
def interp_1d(x:Tensor, xp, fp):
"Same as `np.interp`"
slopes = (fp[1:]-fp[:-1])/(xp[1:]-xp[:-1])
incx = fp[:-1] - (slopes*xp[:-1])
locs = (x[:,None]>=xp[None,:]).long().sum(1)-1
locs = locs.clamp(0,len(slopes)-1)
return slopes[locs]*x + incx[locs]
# Cell
@patch
def pca(x:Tensor, k=2):
"Compute PCA of `x` with `k` dimensions."
x = x-torch.mean(x,0)
U,S,V = torch.svd(x.t())
return torch.mm(x,U[:,:k])
# Cell
def logit(x):
"Logit of `x`, clamped to avoid inf."
x = x.clamp(1e-7, 1-1e-7)
return -(1/x-1).log()
# Cell
def num_distrib():
"Return the number of processes in distributed training (if applicable)."
return int(os.environ.get('WORLD_SIZE', 0))
# Cell
def rank_distrib():
"Return the distributed rank of this process (if applicable)."
return int(os.environ.get('RANK', 0))
# Cell
def distrib_barrier():
"Place a synchronization barrier in distributed training"
if num_distrib() > 1 and torch.distributed.is_initialized(): torch.distributed.barrier()
# Cell
# Saving arrays requires pytables - optional dependency
try: import tables
except: pass
# Cell
def _comp_filter(lib='lz4',lvl=3): return tables.Filters(complib=f'blosc:{lib}', complevel=lvl)
# Cell
@patch
def save_array(p:Path, o, complib='lz4', lvl=3):
"Save numpy array to a compressed `pytables` file, using compression level `lvl`"
if isinstance(o,Tensor): o = to_np(o)
with tables.open_file(p, mode='w', filters=_comp_filter(lib=complib,lvl=lvl)) as f: f.create_carray('/', 'data', obj=o)
# Cell
@patch
def load_array(p:Path):
"Save numpy array to a `pytables` file"
with tables.open_file(p, 'r') as f: return f.root.data.read()
# Cell
def base_doc(elt):
"Print a base documentation of `elt`"
name = getattr(elt, '__qualname__', getattr(elt, '__name__', ''))
print(f'{name}{inspect.signature(elt)}\n{inspect.getdoc(elt)}\n')
print('To get a prettier result with hyperlinks to source code and documentation, install nbdev: pip install nbdev')
# Cell
def doc(elt):
"Try to use doc form nbdev and fall back to `base_doc`"
try:
from nbdev.showdoc import doc
doc(elt)
except: base_doc(elt)
# Cell
def nested_reorder(t, idxs):
"Reorder all tensors in `t` using `idxs`"
if isinstance(t, (Tensor,L)): return t[idxs]
elif is_listy(t): return type(t)(nested_reorder(t_, idxs) for t_ in t)
if t is None: return t
raise TypeError(f"Expected tensor, tuple, list or L but got {type(t)}")
# Cell
def make_cross_image(bw=True):
"Create a tensor containing a cross image, either `bw` (True) or color"
if bw:
im = torch.zeros(5,5)
im[2,:] = 1.
im[:,2] = 1.
else:
im = torch.zeros(3,5,5)
im[0,2,:] = 1.
im[1,:,2] = 1.
return im
# Cell
def show_image_batch(b, show=show_titled_image, items=9, cols=3, figsize=None, **kwargs):
"Display batch `b` in a grid of size `items` with `cols` width"
if items<cols: cols=items
rows = (items+cols-1) // cols
if figsize is None: figsize = (cols*3, rows*3)
fig,axs = plt.subplots(rows, cols, figsize=figsize)
for *o,ax in zip(*to_cpu(b), axs.flatten()): show(o, ax=ax, **kwargs)
# Cell
def requires_grad(m):
"Check if the first parameter of `m` requires grad or not"
ps = list(m.parameters())
return ps[0].requires_grad if len(ps)>0 else False
# Cell
def init_default(m, func=nn.init.kaiming_normal_):
"Initialize `m` weights with `func` and set `bias` to 0."
if func:
if hasattr(m, 'weight'): func(m.weight)
if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.)
return m
# Cell
def cond_init(m, func):
"Apply `init_default` to `m` unless it's a batchnorm module"
if (not isinstance(m, norm_types)) and requires_grad(m): init_default(m, func)
# Cell
def apply_leaf(m, f):
"Apply `f` to children of `m`."
c = m.children()
if isinstance(m, nn.Module): f(m)
for l in c: apply_leaf(l,f)
# Cell
def apply_init(m, func=nn.init.kaiming_normal_):
"Initialize all non-batchnorm layers of `m` with `func`."
apply_leaf(m, partial(cond_init, func=func))
# Cell
def script_use_ctx(f):
"Decorator: create jit script and pass everything in `ctx.saved_variables to `f`, after `*args`"
sf = torch.jit.script(f)
def _f(ctx, *args, **kwargs): return sf(*args, *ctx.saved_variables, **kwargs)
return update_wrapper(_f,f)
# Cell
def script_save_ctx(static, *argidx):
"Decorator: create jit script and save args with indices `argidx` using `ctx.save_for_backward`"
def _dec(f):
sf = torch.jit.script(f)
def _f(ctx, *args, **kwargs):
if argidx:
save = [args[o] for o in argidx]
ctx.save_for_backward(*save)
if not argidx: args = [ctx]+args
return sf(*args, **kwargs)
if static: _f = staticmethod(_f)
return update_wrapper(_f,f)
return _dec
# Cell
def script_fwd(*argidx):
"Decorator: create static jit script and save args with indices `argidx` using `ctx.save_for_backward`"
return script_save_ctx(True, *argidx)
# Cell
def script_bwd(f):
"Decorator: create static jit script and pass everything in `ctx.saved_variables to `f`, after `*args`"
return staticmethod(script_use_ctx(f))
# Cell
def grad_module(cls):
"Decorator: convert `cls` into an autograd function"
class _c(nn.Module):
def forward(self, *args, **kwargs): return cls.apply(*args, **kwargs)
return _c
# Comes from 13b_metrics.ipynb, cell
def flatten_check(inp, targ):
"Check that `out` and `targ` have the same number of elements and flatten them."
inp,targ = TensorBase(inp.contiguous()).view(-1),TensorBase(targ.contiguous()).view(-1)
test_eq(len(inp), len(targ))
return inp,targ | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/torch_core.py | torch_core.py |
__all__ = ['TabularCollab', 'CollabDataLoaders', 'EmbeddingDotBias', 'EmbeddingNN', 'collab_learner']
# Cell
from .tabular.all import *
# Cell
class TabularCollab(TabularPandas):
"Instance of `TabularPandas` suitable for collaborative filtering (with no continuous variable)"
with_cont=False
# Cell
class CollabDataLoaders(DataLoaders):
"Base `DataLoaders` for collaborative filtering."
@delegates(DataLoaders.from_dblock)
@classmethod
def from_df(cls, ratings, valid_pct=0.2, user_name=None, item_name=None, rating_name=None, seed=None, path='.', **kwargs):
"Create a `DataLoaders` suitable for collaborative filtering from `ratings`."
user_name = ifnone(user_name, ratings.columns[0])
item_name = ifnone(item_name, ratings.columns[1])
rating_name = ifnone(rating_name, ratings.columns[2])
cat_names = [user_name,item_name]
splits = RandomSplitter(valid_pct=valid_pct, seed=seed)(range_of(ratings))
to = TabularCollab(ratings, [Categorify], cat_names, y_names=[rating_name], y_block=TransformBlock(), splits=splits)
return to.dataloaders(path=path, **kwargs)
@classmethod
def from_csv(cls, csv, **kwargs):
"Create a `DataLoaders` suitable for collaborative filtering from `csv`."
return cls.from_df(pd.read_csv(csv), **kwargs)
CollabDataLoaders.from_csv = delegates(to=CollabDataLoaders.from_df)(CollabDataLoaders.from_csv)
# Cell
class EmbeddingDotBias(Module):
"Base dot model for collaborative filtering."
def __init__(self, n_factors, n_users, n_items, y_range=None):
self.y_range = y_range
(self.u_weight, self.i_weight, self.u_bias, self.i_bias) = [Embedding(*o) for o in [
(n_users, n_factors), (n_items, n_factors), (n_users,1), (n_items,1)
]]
def forward(self, x):
users,items = x[:,0],x[:,1]
dot = self.u_weight(users)* self.i_weight(items)
res = dot.sum(1) + self.u_bias(users).squeeze() + self.i_bias(items).squeeze()
if self.y_range is None: return res
return torch.sigmoid(res) * (self.y_range[1]-self.y_range[0]) + self.y_range[0]
@classmethod
def from_classes(cls, n_factors, classes, user=None, item=None, y_range=None):
"Build a model with `n_factors` by inferring `n_users` and `n_items` from `classes`"
if user is None: user = list(classes.keys())[0]
if item is None: item = list(classes.keys())[1]
res = cls(n_factors, len(classes[user]), len(classes[item]), y_range=y_range)
res.classes,res.user,res.item = classes,user,item
return res
def _get_idx(self, arr, is_item=True):
"Fetch item or user (based on `is_item`) for all in `arr`"
assert hasattr(self, 'classes'), "Build your model with `EmbeddingDotBias.from_classes` to use this functionality."
classes = self.classes[self.item] if is_item else self.classes[self.user]
c2i = {v:k for k,v in enumerate(classes)}
try: return tensor([c2i[o] for o in arr])
except Exception as e:
print(f"""You're trying to access {'an item' if is_item else 'a user'} that isn't in the training data.
If it was in your original data, it may have been split such that it's only in the validation set now.""")
def bias(self, arr, is_item=True):
"Bias for item or user (based on `is_item`) for all in `arr`"
idx = self._get_idx(arr, is_item)
layer = (self.i_bias if is_item else self.u_bias).eval().cpu()
return to_detach(layer(idx).squeeze(),gather=False)
def weight(self, arr, is_item=True):
"Weight for item or user (based on `is_item`) for all in `arr`"
idx = self._get_idx(arr, is_item)
layer = (self.i_weight if is_item else self.u_weight).eval().cpu()
return to_detach(layer(idx),gather=False)
# Cell
class EmbeddingNN(TabularModel):
"Subclass `TabularModel` to create a NN suitable for collaborative filtering."
@delegates(TabularModel.__init__)
def __init__(self, emb_szs, layers, **kwargs):
super().__init__(emb_szs=emb_szs, n_cont=0, out_sz=1, layers=layers, **kwargs)
# Cell
@delegates(Learner.__init__)
def collab_learner(dls, n_factors=50, use_nn=False, emb_szs=None, layers=None, config=None, y_range=None, loss_func=None, **kwargs):
"Create a Learner for collaborative filtering on `dls`."
emb_szs = get_emb_sz(dls, ifnone(emb_szs, {}))
if loss_func is None: loss_func = MSELossFlat()
if config is None: config = tabular_config()
if y_range is not None: config['y_range'] = y_range
if layers is None: layers = [n_factors]
if use_nn: model = EmbeddingNN(emb_szs=emb_szs, layers=layers, **config)
else: model = EmbeddingDotBias.from_classes(n_factors, dls.classes, y_range=y_range)
return Learner(dls, model, loss_func=loss_func, **kwargs) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/collab.py | collab.py |
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class tofp16(nn.Module):
"""
Utility module that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def BN_convert_float(module):
"""
Utility function for network_to_half().
Retained for legacy purposes.
"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
module.float()
for child in module.children():
BN_convert_float(child)
return module
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
Retained for legacy purposes. It is recommended to use FP16Model.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half()))
def convert_module(module, dtype):
"""
Converts a module's immediate parameters and buffers to dtype.
"""
for param in module.parameters(recurse=False):
if param is not None:
if param.data.dtype.is_floating_point:
param.data = param.data.to(dtype=dtype)
if param._grad is not None and param._grad.data.dtype.is_floating_point:
param._grad.data = param._grad.data.to(dtype=dtype)
for buf in module.buffers(recurse=False):
if buf is not None and buf.data.dtype.is_floating_point:
buf.data = buf.data.to(dtype=dtype)
def convert_network(network, dtype):
"""
Converts a network's parameters and buffers to dtype.
"""
for module in network.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
continue
convert_module(module, dtype)
if isinstance(module, torch.nn.RNNBase) or isinstance(module, torch.nn.modules.rnn.RNNBase):
module.flatten_parameters()
return network
class FP16Model(nn.Module):
"""
Convert model to half precision in a batchnorm-safe way.
"""
def __init__(self, network):
super(FP16Model, self).__init__()
self.network = convert_network(network, dtype=torch.half)
def forward(self, *inputs):
inputs = tuple(t.half() for t in inputs)
return self.network(*inputs)
def backwards_debug_hook(grad):
raise RuntimeError("master_params recieved a gradient in the backward pass!")
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
# Backward compatibility fixes
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR <= 4:
clip_grad_norm = torch.nn.utils.clip_grad_norm
else:
clip_grad_norm = torch.nn.utils.clip_grad_norm_ | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/fp16_utils.py | fp16_utils.py |
__all__ = ['TensorBoardBaseCallback', 'TensorBoardCallback', 'TensorBoardProjectorCallback',
'projector_word_embeddings']
# Cell
from ..basics import *
# Cell
import tensorboard
from torch.utils.tensorboard import SummaryWriter
from .fp16 import ModelToHalf
from .hook import hook_output
# Cell
class TensorBoardBaseCallback(Callback):
"Base class for tensorboard callbacks"
def __init__(self):
self.run_projector = False
def after_pred(self):
if self.run_projector: self.feat = _add_projector_features(self.learn, self.h, self.feat)
def after_validate(self):
if not self.run_projector: return
self.run_projector = False
self._remove()
_write_projector_embedding(self.learn, self.writer, self.feat)
def after_fit(self):
if self.run: self.writer.close()
def _setup_projector(self):
self.run_projector = True
self.h = hook_output(self.learn.model[1][1] if not self.layer else self.layer)
self.feat = {}
def _setup_writer(self):
self.writer = SummaryWriter(log_dir=self.log_dir)
def _remove(self):
if getattr(self, 'h', None): self.h.remove()
def __del__(self): self._remove()
# Cell
class TensorBoardCallback(TensorBoardBaseCallback):
"Saves model topology, losses & metrics for tensorboard and tensorboard projector during training"
def __init__(self, log_dir=None, trace_model=True, log_preds=True, n_preds=9, projector=False, layer=None):
super().__init__()
store_attr()
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
self._setup_writer()
if self.trace_model:
if hasattr(self.learn, 'mixed_precision'):
raise Exception("Can't trace model in mixed precision, pass `trace_model=False` or don't use FP16.")
b = self.dls.one_batch()
self.learn._split(b)
self.writer.add_graph(self.model, *self.xb)
def after_batch(self):
self.writer.add_scalar('train_loss', self.smooth_loss, self.train_iter)
for i,h in enumerate(self.opt.hypers):
for k,v in h.items(): self.writer.add_scalar(f'{k}_{i}', v, self.train_iter)
def after_epoch(self):
for n,v in zip(self.recorder.metric_names[2:-1], self.recorder.log[2:-1]):
self.writer.add_scalar(n, v, self.train_iter)
if self.log_preds:
b = self.dls.valid.one_batch()
self.learn.one_batch(0, b)
preds = getattr(self.loss_func, 'activation', noop)(self.pred)
out = getattr(self.loss_func, 'decodes', noop)(preds)
x,y,its,outs = self.dls.valid.show_results(b, out, show=False, max_n=self.n_preds)
tensorboard_log(x, y, its, outs, self.writer, self.train_iter)
def before_validate(self):
if self.projector: self._setup_projector()
# Cell
class TensorBoardProjectorCallback(TensorBoardBaseCallback):
"Extracts and exports image featuers for tensorboard projector during inference"
def __init__(self, log_dir=None, layer=None):
super().__init__()
store_attr()
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
self._setup_writer()
def before_validate(self):
self._setup_projector()
# Cell
def _write_projector_embedding(learn, writer, feat):
lbls = [learn.dl.vocab[l] for l in feat['lbl']] if getattr(learn.dl, 'vocab', None) else None
vecs = feat['vec'].squeeze()
writer.add_embedding(vecs, metadata=lbls, label_img=feat['img'], global_step=learn.train_iter)
# Cell
def _add_projector_features(learn, hook, feat):
img = _normalize_for_projector(learn.x)
first_epoch = True if learn.iter == 0 else False
feat['vec'] = hook.stored if first_epoch else torch.cat((feat['vec'], hook.stored),0)
feat['img'] = img if first_epoch else torch.cat((feat['img'], img),0)
if getattr(learn.dl, 'vocab', None):
feat['lbl'] = learn.y if first_epoch else torch.cat((feat['lbl'], learn.y),0)
return feat
# Cell
def _get_embeddings(model, layer):
layer = model[0].encoder if layer == None else layer
return layer.weight
# Cell
@typedispatch
def _normalize_for_projector(x:TensorImage):
# normalize tensor to be between 0-1
img = x.clone()
sz = img.shape
img = img.view(x.size(0), -1)
img -= img.min(1, keepdim=True)[0]
img /= img.max(1, keepdim=True)[0]
img = img.view(*sz)
return img
# Cell
from ..text.all import LMLearner, TextLearner
# Cell
def projector_word_embeddings(learn=None, layer=None, vocab=None, limit=-1, start=0, log_dir=None):
"Extracts and exports word embeddings from language models embedding layers"
if not layer:
if isinstance(learn, LMLearner): layer = learn.model[0].encoder
elif isinstance(learn, TextLearner): layer = learn.model[0].module.encoder
emb = layer.weight
img = torch.full((len(emb),3,8,8), 0.7)
vocab = learn.dls.vocab[0] if vocab == None else vocab
vocab = list(map(lambda x: f'{x}_', vocab))
writer = SummaryWriter(log_dir=log_dir)
end = start + limit if limit >= 0 else -1
writer.add_embedding(emb[start:end], metadata=vocab[start:end], label_img=img[start:end])
writer.close()
# Cell
from ..vision.data import *
# Cell
@typedispatch
def tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step):
fig,axs = get_grid(len(samples), add_vert=1, return_fig=True)
for i in range(2):
axs = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs)]
axs = [r.show(ctx=c, color='green' if b==r else 'red')
for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)]
writer.add_figure('Sample results', fig, step)
# Cell
from ..vision.core import TensorPoint,TensorBBox
# Cell
@typedispatch
def tensorboard_log(x:TensorImage, y: (TensorImageBase, TensorPoint, TensorBBox), samples, outs, writer, step):
fig,axs = get_grid(len(samples), add_vert=1, return_fig=True, double=True)
for i in range(2):
axs[::2] = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs[::2])]
for x in [samples,outs]:
axs[1::2] = [b.show(ctx=c) for b,c in zip(x.itemgot(0),axs[1::2])]
writer.add_figure('Sample results', fig, step) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/tensorboard.py | tensorboard.py |
__all__ = ['annealer', 'sched_lin', 'sched_cos', 'sched_no', 'sched_exp', 'SchedLin', 'SchedCos', 'SchedNo', 'SchedExp',
'SchedPoly', 'combine_scheds', 'combined_cos', 'ParamScheduler', 'LRFinder', 'SuggestedLRs']
# Cell
from ..basics import *
# Cell
class _Annealer:
def __init__(self, f, start, end): store_attr('f,start,end')
def __call__(self, pos): return self.f(self.start, self.end, pos)
# Cell
def annealer(f):
"Decorator to make `f` return itself partially applied."
@functools.wraps(f)
def _inner(start, end): return _Annealer(f, start, end)
return _inner
# Cell
#TODO Jeremy, make this pickle
#@annealer
#def SchedLin(start, end, pos): return start + pos*(end-start)
#@annealer
#def SchedCos(start, end, pos): return start + (1 + math.cos(math.pi*(1-pos))) * (end-start) / 2
#@annealer
#def SchedNo (start, end, pos): return start
#@annealer
#def SchedExp(start, end, pos): return start * (end/start) ** pos
#
#SchedLin.__doc__ = "Linear schedule function from `start` to `end`"
#SchedCos.__doc__ = "Cosine schedule function from `start` to `end`"
#SchedNo .__doc__ = "Constant schedule function with `start` value"
#SchedExp.__doc__ = "Exponential schedule function from `start` to `end`"
# Cell
def sched_lin(start, end, pos): return start + pos*(end-start)
def sched_cos(start, end, pos): return start + (1 + math.cos(math.pi*(1-pos))) * (end-start) / 2
def sched_no (start, end, pos): return start
def sched_exp(start, end, pos): return start * (end/start) ** pos
def SchedLin(start, end): return _Annealer(sched_lin, start, end)
def SchedCos(start, end): return _Annealer(sched_cos, start, end)
def SchedNo (start, end): return _Annealer(sched_no, start, end)
def SchedExp(start, end): return _Annealer(sched_exp, start, end)
SchedLin.__doc__ = "Linear schedule function from `start` to `end`"
SchedCos.__doc__ = "Cosine schedule function from `start` to `end`"
SchedNo .__doc__ = "Constant schedule function with `start` value"
SchedExp.__doc__ = "Exponential schedule function from `start` to `end`"
# Cell
def SchedPoly(start, end, power):
"Polynomial schedule (of `power`) function from `start` to `end`"
def _inner(pos): return start + (end - start) * pos ** power
return _inner
# Cell
def combine_scheds(pcts, scheds):
"Combine `scheds` according to `pcts` in one function"
assert sum(pcts) == 1.
pcts = tensor([0] + L(pcts))
assert torch.all(pcts >= 0)
pcts = torch.cumsum(pcts, 0)
def _inner(pos):
if int(pos) == 1: return scheds[-1](1.)
idx = (pos >= pcts).nonzero().max()
actual_pos = (pos-pcts[idx]) / (pcts[idx+1]-pcts[idx])
return scheds[idx](actual_pos.item())
return _inner
# Cell
def combined_cos(pct, start, middle, end):
"Return a scheduler with cosine annealing from `start`→`middle` & `middle`→`end`"
return combine_scheds([pct,1-pct], [SchedCos(start, middle), SchedCos(middle, end)])
# Cell
@docs
class ParamScheduler(Callback):
"Schedule hyper-parameters according to `scheds`"
order,run_valid = 60,False
def __init__(self, scheds): self.scheds = scheds
def before_fit(self): self.hps = {p:[] for p in self.scheds.keys()}
def before_batch(self): self._update_val(self.pct_train)
def _update_val(self, pct):
for n,f in self.scheds.items(): self.opt.set_hyper(n, f(pct))
def after_batch(self):
for p in self.scheds.keys(): self.hps[p].append(self.opt.hypers[-1][p])
def after_fit(self):
if hasattr(self.learn, 'recorder') and hasattr(self, 'hps'): self.recorder.hps = self.hps
_docs = {"before_fit": "Initialize container for hyper-parameters",
"before_batch": "Set the proper hyper-parameters in the optimizer",
"after_batch": "Record hyper-parameters of this batch",
"after_fit": "Save the hyper-parameters in the recorder if there is one"}
# Cell
@patch
def fit_one_cycle(self:Learner, n_epoch, lr_max=None, div=25., div_final=1e5, pct_start=0.25, wd=None,
moms=None, cbs=None, reset_opt=False):
"Fit `self.model` for `n_epoch` using the 1cycle policy."
if self.opt is None: self.create_opt()
self.opt.set_hyper('lr', self.lr if lr_max is None else lr_max)
lr_max = np.array([h['lr'] for h in self.opt.hypers])
scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
# Cell
@patch
def plot_sched(self:Recorder, keys=None, figsize=None):
keys = self.hps.keys() if keys is None else L(keys)
rows,cols = (len(keys)+1)//2, min(2, len(keys))
figsize = figsize or (6*cols,4*rows)
_, axs = plt.subplots(rows, cols, figsize=figsize)
axs = axs.flatten() if len(keys) > 1 else L(axs)
for p,ax in zip(keys, axs):
ax.plot(self.hps[p])
ax.set_ylabel(p)
# Cell
@patch
def fit_flat_cos(self:Learner, n_epoch, lr=None, div_final=1e5, pct_start=0.75, wd=None,
cbs=None, reset_opt=False):
"Fit `self.model` for `n_epoch` at flat `lr` before a cosine annealing."
if self.opt is None: self.create_opt()
self.opt.set_hyper('lr', self.lr if lr is None else lr)
lr = np.array([h['lr'] for h in self.opt.hypers])
scheds = {'lr': combined_cos(pct_start, lr, lr, lr/div_final)}
self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
# Cell
@patch
def fit_sgdr(self:Learner, n_cycles, cycle_len, lr_max=None, cycle_mult=2, cbs=None, reset_opt=False, wd=None):
"Fit `self.model` for `n_cycles` of `cycle_len` using SGDR."
if self.opt is None: self.create_opt()
self.opt.set_hyper('lr', self.lr if lr_max is None else lr_max)
lr_max = np.array([h['lr'] for h in self.opt.hypers])
n_epoch = cycle_len * (cycle_mult**n_cycles-1)//(cycle_mult-1)
pcts = [cycle_len * cycle_mult**i / n_epoch for i in range(n_cycles)]
scheds = [SchedCos(lr_max, 0) for _ in range(n_cycles)]
scheds = {'lr': combine_scheds(pcts, scheds)}
self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
# Cell
@patch
@delegates(Learner.fit_one_cycle)
def fine_tune(self:Learner, epochs, base_lr=2e-3, freeze_epochs=1, lr_mult=100,
pct_start=0.3, div=5.0, **kwargs):
"Fine tune with `freeze` for `freeze_epochs` then with `unfreeze` from `epochs` using discriminative LR"
self.freeze()
self.fit_one_cycle(freeze_epochs, slice(base_lr), pct_start=0.99, **kwargs)
base_lr /= 2
self.unfreeze()
self.fit_one_cycle(epochs, slice(base_lr/lr_mult, base_lr), pct_start=pct_start, div=div, **kwargs)
# Cell
@docs
class LRFinder(ParamScheduler):
"Training with exponentially growing learning rate"
def __init__(self, start_lr=1e-7, end_lr=10, num_it=100, stop_div=True):
if is_listy(start_lr):
self.scheds = {'lr': [SchedExp(s, e) for (s,e) in zip(start_lr,end_lr)]}
else: self.scheds = {'lr': SchedExp(start_lr, end_lr)}
self.num_it,self.stop_div = num_it,stop_div
def before_fit(self):
super().before_fit()
self.learn.save('_tmp')
self.best_loss = float('inf')
def before_batch(self):
self._update_val(self.train_iter/self.num_it)
def after_batch(self):
super().after_batch()
if self.smooth_loss < self.best_loss: self.best_loss = self.smooth_loss
if self.smooth_loss > 4*self.best_loss and self.stop_div: raise CancelFitException()
if self.train_iter >= self.num_it: raise CancelFitException()
def before_validate(self): raise CancelValidException()
def after_fit(self):
self.learn.opt.zero_grad() #Need to zero the gradients of the model before detaching the optimizer for future fits
tmp_f = self.path/self.model_dir/'_tmp.pth'
if tmp_f.exists():
self.learn.load('_tmp', with_opt=True)
os.remove(tmp_f)
_docs = {"before_fit": "Initialize container for hyper-parameters and save the model",
"before_batch": "Set the proper hyper-parameters in the optimizer",
"after_batch": "Record hyper-parameters of this batch and potentially stop training",
"after_fit": "Save the hyper-parameters in the recorder if there is one and load the original model",
"before_validate": "Skip the validation part of training"}
# Cell
@patch
def plot_lr_find(self:Recorder, skip_end=5):
"Plot the result of an LR Finder test (won't work if you didn't do `learn.lr_find()` before)"
lrs = self.lrs if skip_end==0 else self.lrs [:-skip_end]
losses = self.losses if skip_end==0 else self.losses[:-skip_end]
fig, ax = plt.subplots(1,1)
ax.plot(lrs, losses)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale('log')
# Cell
SuggestedLRs = collections.namedtuple('SuggestedLRs', ['lr_min', 'lr_steep'])
# Cell
@patch
def lr_find(self:Learner, start_lr=1e-7, end_lr=10, num_it=100, stop_div=True, show_plot=True, suggestions=True):
"Launch a mock training to find a good learning rate, return lr_min, lr_steep if `suggestions` is True"
n_epoch = num_it//len(self.dls.train) + 1
cb=LRFinder(start_lr=start_lr, end_lr=end_lr, num_it=num_it, stop_div=stop_div)
with self.no_logging(): self.fit(n_epoch, cbs=cb)
if show_plot: self.recorder.plot_lr_find()
if suggestions:
lrs,losses = tensor(self.recorder.lrs[num_it//10:-5]),tensor(self.recorder.losses[num_it//10:-5])
if len(losses) == 0: return
lr_min = lrs[losses.argmin()].item()
grads = (losses[1:]-losses[:-1]) / (lrs[1:].log()-lrs[:-1].log())
lr_steep = lrs[grads.argmin()].item()
return SuggestedLRs(lr_min/10.,lr_steep) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/schedule.py | schedule.py |
__all__ = ['CollectDataCallback', 'CudaCallback', 'WeightedDL', 'PartialDL']
# Cell
from ..basics import *
# Cell
class CollectDataCallback(Callback):
"Collect all batches, along with `pred` and `loss`, into `self.data`. Mainly for testing"
def before_fit(self): self.data = L()
def after_batch(self):
self.data.append(self.learn.to_detach((self.xb,self.yb,self.pred,self.loss)))
# Cell
class CudaCallback(Callback):
"Move data to CUDA device"
def __init__(self, device=None): self.device = ifnone(device, default_device())
def before_batch(self): self.learn.xb,self.learn.yb = to_device(self.xb),to_device(self.yb)
def before_fit(self): self.model.to(self.device)
# Cell
@delegates()
class WeightedDL(TfmdDL):
def __init__(self, dataset=None, bs=None, wgts=None, **kwargs):
super().__init__(dataset=dataset, bs=bs, **kwargs)
wgts = array([1.]*len(dataset) if wgts is None else wgts)
self.wgts = wgts/wgts.sum()
def get_idxs(self):
if self.n==0: return []
if not self.shuffle: return super().get_idxs()
return list(np.random.choice(self.n, self.n, p=self.wgts))
# Cell
@patch
@delegates(Datasets.dataloaders)
def weighted_dataloaders(self:Datasets, wgts, bs=64, **kwargs):
xtra_kwargs = [{}] * (self.n_subsets-1)
return self.dataloaders(bs=bs, dl_type=WeightedDL, dl_kwargs=({'wgts':wgts}, *xtra_kwargs), **kwargs)
# Cell
@delegates()
class PartialDL(TfmdDL):
"Select randomly partial quantity of data at each epoch"
def __init__(self, dataset=None, bs=None, partial_n=None, **kwargs):
super().__init__(dataset=dataset, bs=bs, **kwargs)
self.partial_n = min(partial_n, self.n) if partial_n else None
def get_idxs(self):
if self.partial_n is None: return super().get_idxs()
return list(np.random.choice(self.n, self.partial_n, replace=False))
def __len__(self):
if self.partial_n is None: return super().__len__()
return self.partial_n//self.bs + (0 if self.drop_last or self.partial_n%self.bs==0 else 1)
# Cell
@patch
@delegates(Datasets.dataloaders)
def partial_dataloaders(self:FilteredBase, partial_n, bs=64, **kwargs):
"Create a partial dataloader `PartialDL` for the training set"
xtra_kwargs = [{}] * (self.n_subsets-1)
return self.dataloaders(bs=bs, dl_type=PartialDL, dl_kwargs=({'partial_n':partial_n}, *xtra_kwargs), **kwargs) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/data.py | data.py |
__all__ = ['ShortEpochCallback', 'GradientAccumulation', 'GradientClip', 'set_bn_eval', 'BnFreeze', 'bn_types']
# Cell
from ..basics import *
from .progress import *
from .fp16 import *
# Cell
class ShortEpochCallback(Callback):
"Fit just `pct` of an epoch, then stop"
def __init__(self,pct=0.01,short_valid=True): self.pct,self.short_valid = pct,short_valid
def after_batch(self):
if self.iter/self.n_iter < self.pct: return
if self.training: raise CancelTrainException
if self.short_valid: raise CancelValidException
# Cell
class GradientAccumulation(Callback):
"Accumulate gradients before updating weights"
order,run_valid = MixedPrecision.order-4,False
def __init__(self, n_acc=32): store_attr()
def before_fit(self): self.count=0
def after_loss(self): self.learn.loss_grad /= self.n_acc/find_bs(self.learn.yb)
def before_step(self):
"Skip weight update if we have not seen enough items"
self.learn.loss_grad *= self.n_acc/find_bs(self.learn.yb) # log correct loss
self.count += find_bs(self.learn.yb)
if self.count<self.n_acc: raise CancelBatchException() # skip step/zero_grad
else: self.count=0
# Cell
class GradientClip(Callback):
"Clip norm of gradients"
order=MixedPrecision.order+1
def __init__(self,max_norm:float=1., norm_type:float=2.0): store_attr()
def before_step(self): nn.utils.clip_grad_norm_(self.parameters(), self.max_norm, self.norm_type)
# Cell
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)
def set_bn_eval(m:nn.Module, use_eval=True)->None:
"Set bn layers in eval mode for all recursive children of `m`."
for l in m.children():
if isinstance(l, bn_types) and not next(l.parameters()).requires_grad:
if use_eval: l.eval()
else: l.train()
set_bn_eval(l)
class BnFreeze(Callback):
run_after=TrainEvalCallback
"Freeze moving average statistics in all non-trainable batchnorm layers."
def before_train(self):
set_bn_eval(self.model) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/training.py | training.py |
__all__ = ['WandbCallback', 'log_dataset', 'log_model', 'wandb_process']
# Cell
from ..basics import *
from .progress import *
from ..text.data import TensorText
from ..tabular.all import TabularDataLoaders, Tabular
from .hook import total_params
# Cell
import wandb
# Cell
class WandbCallback(Callback):
"Saves model topology, losses & metrics"
toward_end,remove_on_fetch,run_after = True,True,FetchPredsCallback
# Record if watch has been called previously (even in another instance)
_wandb_watch_called = False
def __init__(self, log="gradients", log_preds=True, log_model=True, log_dataset=False, dataset_name=None, valid_dl=None, n_preds=36, seed=12345, reorder=True):
# Check if wandb.init has been called
if wandb.run is None:
raise ValueError('You must call wandb.init() before WandbCallback()')
# W&B log step
self._wandb_step = wandb.run.step - 1 # -1 except if the run has previously logged data (incremented at each batch)
self._wandb_epoch = 0 if not(wandb.run.step) else math.ceil(wandb.run.summary['epoch']) # continue to next epoch
store_attr('log,log_preds,log_model,log_dataset,dataset_name,valid_dl,n_preds,seed,reorder')
def before_fit(self):
"Call watch method to log model topology, gradients & weights"
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
# Log config parameters
log_config = self.learn.gather_args()
_format_config(log_config)
try:
wandb.config.update(log_config, allow_val_change=True)
except Exception as e:
print(f'WandbCallback could not log config parameters -> {e}')
if not WandbCallback._wandb_watch_called:
WandbCallback._wandb_watch_called = True
# Logs model topology and optionally gradients and weights
wandb.watch(self.learn.model, log=self.log)
# log dataset
assert isinstance(self.log_dataset, (str, Path, bool)), 'log_dataset must be a path or a boolean'
if self.log_dataset is True:
if Path(self.dls.path) == Path('.'):
print('WandbCallback could not retrieve the dataset path, please provide it explicitly to "log_dataset"')
self.log_dataset = False
else:
self.log_dataset = self.dls.path
if self.log_dataset:
self.log_dataset = Path(self.log_dataset)
assert self.log_dataset.is_dir(), f'log_dataset must be a valid directory: {self.log_dataset}'
metadata = {'path relative to learner': os.path.relpath(self.log_dataset, self.learn.path)}
log_dataset(path=self.log_dataset, name=self.dataset_name, metadata=metadata)
# log model
if self.log_model and not hasattr(self, 'save_model'):
print('WandbCallback requires use of "SaveModelCallback" to log best model')
self.log_model = False
if self.log_preds:
try:
if not self.valid_dl:
#Initializes the batch watched
wandbRandom = random.Random(self.seed) # For repeatability
self.n_preds = min(self.n_preds, len(self.dls.valid_ds))
idxs = wandbRandom.sample(range(len(self.dls.valid_ds)), self.n_preds)
if isinstance(self.dls, TabularDataLoaders):
test_items = getattr(self.dls.valid_ds.items, 'iloc', self.dls.valid_ds.items)[idxs]
self.valid_dl = self.dls.test_dl(test_items, with_labels=True, process=False)
else:
test_items = [getattr(self.dls.valid_ds.items, 'iloc', self.dls.valid_ds.items)[i] for i in idxs]
self.valid_dl = self.dls.test_dl(test_items, with_labels=True)
self.learn.add_cb(FetchPredsCallback(dl=self.valid_dl, with_input=True, with_decoded=True, reorder=self.reorder))
except Exception as e:
self.log_preds = False
print(f'WandbCallback was not able to prepare a DataLoader for logging prediction samples -> {e}')
def after_batch(self):
"Log hyper-parameters and training loss"
if self.training:
self._wandb_step += 1
self._wandb_epoch += 1/self.n_iter
hypers = {f'{k}_{i}':v for i,h in enumerate(self.opt.hypers) for k,v in h.items()}
wandb.log({'epoch': self._wandb_epoch, 'train_loss': to_detach(self.smooth_loss.clone()), 'raw_loss': to_detach(self.loss.clone()), **hypers}, step=self._wandb_step)
def log_predictions(self, preds):
inp,preds,targs,out = preds
b = tuplify(inp) + tuplify(targs)
x,y,its,outs = self.valid_dl.show_results(b, out, show=False, max_n=self.n_preds)
wandb.log(wandb_process(x, y, its, outs), step=self._wandb_step)
def after_epoch(self):
"Log validation loss and custom metrics & log prediction samples"
# Correct any epoch rounding error and overwrite value
self._wandb_epoch = round(self._wandb_epoch)
wandb.log({'epoch': self._wandb_epoch}, step=self._wandb_step)
# Log sample predictions
if self.log_preds:
try:
self.log_predictions(self.learn.fetch_preds.preds)
except Exception as e:
self.log_preds = False
print(f'WandbCallback was not able to get prediction samples -> {e}')
wandb.log({n:s for n,s in zip(self.recorder.metric_names, self.recorder.log) if n not in ['train_loss', 'epoch', 'time']}, step=self._wandb_step)
def after_fit(self):
if self.log_model:
if self.save_model.last_saved_path is None:
print('WandbCallback could not retrieve a model to upload')
else:
metadata = {n:s for n,s in zip(self.recorder.metric_names, self.recorder.log) if n not in ['train_loss', 'epoch', 'time']}
log_model(self.save_model.last_saved_path, metadata=metadata)
self.run = True
if self.log_preds: self.remove_cb(FetchPredsCallback)
wandb.log({}) # ensure sync of last step
# Cell
@patch
def gather_args(self:Learner):
"Gather config parameters accessible to the learner"
# args stored by `store_attr`
cb_args = {f'{cb}':getattr(cb,'__stored_args__',True) for cb in self.cbs}
args = {'Learner':self, **cb_args}
# input dimensions
try:
n_inp = self.dls.train.n_inp
args['n_inp'] = n_inp
xb = self.dls.train.one_batch()[:n_inp]
args.update({f'input {n+1} dim {i+1}':d for n in range(n_inp) for i,d in enumerate(list(detuplify(xb[n]).shape))})
except: print(f'Could not gather input dimensions')
# other useful information
with ignore_exceptions():
args['batch size'] = self.dls.bs
args['batch per epoch'] = len(self.dls.train)
args['model parameters'] = total_params(self.model)[0]
args['device'] = self.dls.device.type
args['frozen'] = bool(self.opt.frozen_idx)
args['frozen idx'] = self.opt.frozen_idx
args['dataset.tfms'] = f'{self.dls.dataset.tfms}'
args['dls.after_item'] = f'{self.dls.after_item}'
args['dls.before_batch'] = f'{self.dls.before_batch}'
args['dls.after_batch'] = f'{self.dls.after_batch}'
return args
# Cell
def _make_plt(img):
"Make plot to image resolution"
# from https://stackoverflow.com/a/13714915
my_dpi = 100
fig = plt.figure(frameon=False, dpi=my_dpi)
h, w = img.shape[:2]
fig.set_size_inches(w / my_dpi, h / my_dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
return fig, ax
# Cell
def _format_config_value(v):
if isinstance(v, list):
return [_format_config_value(item) for item in v]
elif hasattr(v, '__stored_args__'):
return {**_format_config(v.__stored_args__), '_name': v}
return v
# Cell
def _format_config(config):
"Format config parameters before logging them"
for k,v in config.items():
if isinstance(v, dict):
config[k] = _format_config(v)
else:
config[k] = _format_config_value(v)
return config
# Cell
def _format_metadata(metadata):
"Format metadata associated to artifacts"
for k,v in metadata.items(): metadata[k] = str(v)
# Cell
def log_dataset(path, name=None, metadata={}, description='raw dataset'):
"Log dataset folder"
# Check if wandb.init has been called in case datasets are logged manually
if wandb.run is None:
raise ValueError('You must call wandb.init() before log_dataset()')
path = Path(path)
if not path.is_dir():
raise f'path must be a valid directory: {path}'
name = ifnone(name, path.name)
_format_metadata(metadata)
artifact_dataset = wandb.Artifact(name=name, type='dataset', metadata=metadata, description=description)
# log everything except "models" folder
for p in path.ls():
if p.is_dir():
if p.name != 'models': artifact_dataset.add_dir(str(p.resolve()), name=p.name)
else: artifact_dataset.add_file(str(p.resolve()))
wandb.run.use_artifact(artifact_dataset)
# Cell
def log_model(path, name=None, metadata={}, description='trained model'):
"Log model file"
if wandb.run is None:
raise ValueError('You must call wandb.init() before log_model()')
path = Path(path)
if not path.is_file():
raise f'path must be a valid file: {path}'
name = ifnone(name, f'run-{wandb.run.id}-model')
_format_metadata(metadata)
artifact_model = wandb.Artifact(name=name, type='model', metadata=metadata, description=description)
with artifact_model.new_file(name, mode='wb') as fa:
fa.write(path.read_bytes())
wandb.run.log_artifact(artifact_model)
# Cell
@typedispatch
def wandb_process(x:TensorImage, y, samples, outs):
"Process `sample` and `out` depending on the type of `x/y`"
res_input, res_pred, res_label = [],[],[]
for s,o in zip(samples, outs):
img = s[0].permute(1,2,0)
res_input.append(wandb.Image(img, caption='Input data'))
for t, capt, res in ((o[0], "Prediction", res_pred), (s[1], "Ground Truth", res_label)):
fig, ax = _make_plt(img)
# Superimpose label or prediction to input image
ax = img.show(ctx=ax)
ax = t.show(ctx=ax)
res.append(wandb.Image(fig, caption=capt))
plt.close(fig)
return {"Inputs":res_input, "Predictions":res_pred, "Ground Truth":res_label}
# Cell
@typedispatch
def wandb_process(x:TensorImage, y:(TensorCategory,TensorMultiCategory), samples, outs):
return {"Prediction Samples": [wandb.Image(s[0].permute(1,2,0), caption=f'Ground Truth: {s[1]}\nPrediction: {o[0]}')
for s,o in zip(samples,outs)]}
# Cell
@typedispatch
def wandb_process(x:TensorImage, y:TensorMask, samples, outs):
res = []
codes = getattr(y, 'codes', None)
class_labels = {i:f'{c}' for i,c in enumerate(codes)} if codes is not None else None
for s,o in zip(samples, outs):
img = s[0].permute(1,2,0)
masks = {}
for t, capt in ((o[0], "Prediction"), (s[1], "Ground Truth")):
masks[capt] = {'mask_data':t.numpy().astype(np.uint8)}
if class_labels: masks[capt]['class_labels'] = class_labels
res.append(wandb.Image(img, masks=masks))
return {"Prediction Samples":res}
# Cell
@typedispatch
def wandb_process(x:TensorText, y:(TensorCategory,TensorMultiCategory), samples, outs):
data = [[s[0], s[1], o[0]] for s,o in zip(samples,outs)]
return {"Prediction Samples": wandb.Table(data=data, columns=["Text", "Target", "Prediction"])}
# Cell
@typedispatch
def wandb_process(x:Tabular, y:Tabular, samples, outs):
df = x.all_cols
for n in x.y_names: df[n+'_pred'] = y[n].values
return {"Prediction Samples": wandb.Table(dataframe=df)}
# Cell
#nbdev_comment _all_ = ['wandb_process'] | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/wandb.py | wandb.py |
__all__ = ['TerminateOnNaNCallback', 'TrackerCallback', 'EarlyStoppingCallback', 'SaveModelCallback',
'ReduceLROnPlateau']
# Cell
from ..basics import *
from .progress import *
from .fp16 import MixedPrecision
# Cell
class TerminateOnNaNCallback(Callback):
"A `Callback` that terminates training if loss is NaN."
order=-9
def after_batch(self):
"Test if `last_loss` is NaN and interrupts training."
if torch.isinf(self.loss) or torch.isnan(self.loss): raise CancelFitException
# Cell
class TrackerCallback(Callback):
"A `Callback` that keeps track of the best value in `monitor`."
order,remove_on_fetch = 60,True
def __init__(self, monitor='valid_loss', comp=None, min_delta=0., reset_on_fit=True):
if comp is None: comp = np.less if 'loss' in monitor or 'error' in monitor else np.greater
if comp == np.less: min_delta *= -1
self.monitor,self.comp,self.min_delta,self.reset_on_fit,self.best= monitor,comp,min_delta,reset_on_fit,None
def before_fit(self):
"Prepare the monitored value"
self.run = not hasattr(self, "lr_finder") and not hasattr(self, "gather_preds")
if self.reset_on_fit or self.best is None: self.best = float('inf') if self.comp == np.less else -float('inf')
assert self.monitor in self.recorder.metric_names[1:]
self.idx = list(self.recorder.metric_names[1:]).index(self.monitor)
def after_epoch(self):
"Compare the last value to the best up to now"
val = self.recorder.values[-1][self.idx]
if self.comp(val - self.min_delta, self.best): self.best,self.new_best = val,True
else: self.new_best = False
def after_fit(self): self.run=True
# Cell
class EarlyStoppingCallback(TrackerCallback):
"A `TrackerCallback` that terminates training when monitored quantity stops improving."
def __init__(self, monitor='valid_loss', comp=None, min_delta=0., patience=1, reset_on_fit=True):
super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
self.patience = patience
def before_fit(self): self.wait = 0; super().before_fit()
def after_epoch(self):
"Compare the value monitored to its best score and maybe stop training."
super().after_epoch()
if self.new_best: self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
print(f'No improvement since epoch {self.epoch-self.wait}: early stopping')
raise CancelFitException()
# Cell
class SaveModelCallback(TrackerCallback):
"A `TrackerCallback` that saves the model's best during training and loads it at the end."
_only_train_loop = True
def __init__(self, monitor='valid_loss', comp=None, min_delta=0., fname='model', every_epoch=False,
with_opt=False, reset_on_fit=True):
super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
# keep track of file path for loggers
self.last_saved_path = None
store_attr('fname,every_epoch,with_opt')
def _save(self, name): self.last_saved_path = self.learn.save(name, with_opt=self.with_opt)
def after_epoch(self):
"Compare the value monitored to its best score and save if best."
if self.every_epoch: self._save(f'{self.fname}_{self.epoch}')
else: #every improvement
super().after_epoch()
if self.new_best:
print(f'Better model found at epoch {self.epoch} with {self.monitor} value: {self.best}.')
self._save(f'{self.fname}')
def after_fit(self, **kwargs):
"Load the best model."
if not self.every_epoch: self.learn.load(f'{self.fname}', with_opt=self.with_opt)
# Cell
class ReduceLROnPlateau(TrackerCallback):
"A `TrackerCallback` that reduces learning rate when a metric has stopped improving."
def __init__(self, monitor='valid_loss', comp=None, min_delta=0., patience=1, factor=10., min_lr=0, reset_on_fit=True):
super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
self.patience,self.factor,self.min_lr = patience,factor,min_lr
def before_fit(self): self.wait = 0; super().before_fit()
def after_epoch(self):
"Compare the value monitored to its best score and reduce LR by `factor` if no improvement."
super().after_epoch()
if self.new_best: self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
old_lr = self.opt.hypers[-1]['lr']
for h in self.opt.hypers: h['lr'] = max(h['lr'] / self.factor, self.min_lr)
self.wait = 0
if self.opt.hypers[-1]["lr"] < old_lr:
print(f'Epoch {self.epoch}: reducing lr to {self.opt.hypers[-1]["lr"]}') | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/tracker.py | tracker.py |
__all__ = ['Hook', 'hook_output', 'Hooks', 'hook_outputs', 'dummy_eval', 'model_sizes', 'num_features_model',
'has_params', 'HookCallback', 'total_params', 'layer_info', 'module_summary', 'ActivationStats']
# Cell
from ..basics import *
# Cell
@docs
class Hook():
"Create a hook on `m` with `hook_func`."
def __init__(self, m, hook_func, is_forward=True, detach=True, cpu=False, gather=False):
store_attr('hook_func,detach,cpu,gather')
f = m.register_forward_hook if is_forward else m.register_backward_hook
self.hook = f(self.hook_fn)
self.stored,self.removed = None,False
def hook_fn(self, module, input, output):
"Applies `hook_func` to `module`, `input`, `output`."
if self.detach:
input,output = to_detach(input, cpu=self.cpu, gather=self.gather),to_detach(output, cpu=self.cpu, gather=self.gather)
self.stored = self.hook_func(module, input, output)
def remove(self):
"Remove the hook from the model."
if not self.removed:
self.hook.remove()
self.removed=True
def __enter__(self, *args): return self
def __exit__(self, *args): self.remove()
_docs = dict(__enter__="Register the hook",
__exit__="Remove the hook")
# Cell
def _hook_inner(m,i,o): return o if isinstance(o,Tensor) or is_listy(o) else list(o)
def hook_output(module, detach=True, cpu=False, grad=False):
"Return a `Hook` that stores activations of `module` in `self.stored`"
return Hook(module, _hook_inner, detach=detach, cpu=cpu, is_forward=not grad)
# Cell
@docs
class Hooks():
"Create several hooks on the modules in `ms` with `hook_func`."
def __init__(self, ms, hook_func, is_forward=True, detach=True, cpu=False):
self.hooks = [Hook(m, hook_func, is_forward, detach, cpu) for m in ms]
def __getitem__(self,i): return self.hooks[i]
def __len__(self): return len(self.hooks)
def __iter__(self): return iter(self.hooks)
@property
def stored(self): return L(o.stored for o in self)
def remove(self):
"Remove the hooks from the model."
for h in self.hooks: h.remove()
def __enter__(self, *args): return self
def __exit__ (self, *args): self.remove()
_docs = dict(stored = "The states saved in each hook.",
__enter__="Register the hooks",
__exit__="Remove the hooks")
# Cell
def hook_outputs(modules, detach=True, cpu=False, grad=False):
"Return `Hooks` that store activations of all `modules` in `self.stored`"
return Hooks(modules, _hook_inner, detach=detach, cpu=cpu, is_forward=not grad)
# Cell
def dummy_eval(m, size=(64,64)):
"Evaluate `m` on a dummy input of a certain `size`"
ch_in = in_channels(m)
x = one_param(m).new(1, ch_in, *size).requires_grad_(False).uniform_(-1.,1.)
with torch.no_grad(): return m.eval()(x)
# Cell
def model_sizes(m, size=(64,64)):
"Pass a dummy input through the model `m` to get the various sizes of activations."
with hook_outputs(m) as hooks:
_ = dummy_eval(m, size=size)
return [o.stored.shape for o in hooks]
# Cell
def num_features_model(m):
"Return the number of output features for `m`."
sz,ch_in = 32,in_channels(m)
while True:
#Trying for a few sizes in case the model requires a big input size.
try:
return model_sizes(m, (sz,sz))[-1][1]
except Exception as e:
sz *= 2
if sz > 2048: raise e
# Cell
def has_params(m):
"Check if `m` has at least one parameter"
return len(list(m.parameters())) > 0
# Cell
@funcs_kwargs
class HookCallback(Callback):
"`Callback` that can be used to register hooks on `modules`"
_methods = ["hook"]
hook = noops
def __init__(self, modules=None, every=None, remove_end=True, is_forward=True, detach=True, cpu=True, **kwargs):
store_attr('modules,every,remove_end,is_forward,detach,cpu')
assert not kwargs
def before_fit(self):
"Register the `Hooks` on `self.modules`."
if self.modules is None: self.modules = [m for m in flatten_model(self.model) if has_params(m)]
if self.every is None: self._register()
def before_batch(self):
if self.every is None: return
if self.training and self.train_iter%self.every==0: self._register()
def after_batch(self):
if self.every is None: return
if self.training and self.train_iter%self.every==0: self._remove()
def after_fit(self):
"Remove the `Hooks`."
if self.remove_end: self._remove()
def _register(self): self.hooks = Hooks(self.modules, self.hook, self.is_forward, self.detach, self.cpu)
def _remove(self):
if getattr(self, 'hooks', None): self.hooks.remove()
def __del__(self): self._remove()
# Cell
def total_params(m):
"Give the number of parameters of a module and if it's trainable or not"
params = sum([p.numel() for p in m.parameters()])
trains = [p.requires_grad for p in m.parameters()]
return params, (False if len(trains)==0 else trains[0])
# Cell
def layer_info(learn, *xb):
"Return layer infos of `model` on `xb` (only support batch first inputs)"
def _track(m, i, o):
params, trainable, shape = '', '', ''
same = any((x[0].shape[1:] == x[1].shape for x in zip(i, o)))
if hasattr(m, 'weight'): # non activation layer
params, trainable = total_params(m)
shape = apply(lambda x: x.shape, o)
return (type(m).__name__, params, trainable, shape, same)
with Hooks(flatten_model(learn.model), _track) as h:
batch = apply(lambda o:o[:1], xb)
train_only_cbs = [cb for cb in learn.cbs if hasattr(cb, '_only_train_loop')]
with learn.removed_cbs(train_only_cbs), learn.no_logging(), learn as l:
r = l.get_preds(dl=[batch], inner=True, reorder=False)
return h.stored
# Cell
def _print_shapes(o, bs):
if isinstance(o, (torch.Size,tuple)): return ' x '.join([str(bs)] + [str(t) for t in o[1:]])
else: return str([_print_shapes(x, bs) for x in o])
# Cell
def module_summary(learn, *xb):
"Print a summary of `model` using `xb`"
#Individual parameters wrapped in ParameterModule aren't called through the hooks in `layer_info`,
# thus are not counted inside the summary
#TODO: find a way to have them counted in param number somehow
infos = layer_info(learn, *xb)
n,bs = 76,find_bs(xb)
inp_sz = _print_shapes(apply(lambda x:x.shape, xb), bs)
res = f"{type(learn.model).__name__} (Input shape: {inp_sz})\n"
res += "=" * n + "\n"
res += f"{'Layer (type)':<20} {'Output Shape':<20} {'Param #':<10} {'Trainable':<10}\n"
res += "=" * n
ps,trn_ps,j = 0,0,0
infos = [o for o in infos if o is not None] #see comment in previous cell
prev_sz = None
for typ,np,trn,sz,chnged in infos:
if sz is None: continue
if j == 0:
res += f'\n{"":<20} {_print_shapes(sz, bs)[:19]:<20}' # to avoid a double line at the top
if not chnged and not prev_sz == sz and j > 0: res += "\n" + "_" * n + "\n" + f'{"":<20} {_print_shapes(sz, bs)[:19]:<20}'
j = 1
res += f"\n{typ:<20} {'':<20} {np:<10} {str(trn):<10}"
if np != '':
ps += np
if trn: trn_ps += np
prev_sz = sz
res += "\n" + "_" * n + "\n"
res += f"\nTotal params: {ps:,}\n"
res += f"Total trainable params: {trn_ps:,}\n"
res += f"Total non-trainable params: {ps - trn_ps:,}\n\n"
return PrettyString(res)
# Cell
@patch
def summary(self:Learner):
"Print a summary of the model, optimizer and loss function."
xb = self.dls.train.one_batch()[:self.dls.train.n_inp]
res = module_summary(self, *xb)
res += f"Optimizer used: {self.opt_func}\nLoss function: {self.loss_func}\n\n"
if self.opt is not None:
res += f"Model " + ("unfrozen\n\n" if self.opt.frozen_idx==0 else f"frozen up to parameter group #{self.opt.frozen_idx}\n\n")
res += "Callbacks:\n" + '\n'.join(f" - {cb}" for cb in self.cbs.sorted('order'))
return PrettyString(res)
# Cell
@delegates()
class ActivationStats(HookCallback):
"Callback that record the mean and std of activations."
order=-20
def __init__(self, with_hist=False, **kwargs):
super().__init__(**kwargs)
self.with_hist = with_hist
def before_fit(self):
"Initialize stats."
super().before_fit()
self.stats = L()
def hook(self, m, i, o):
if isinstance(o, tuple): return self.hook_multi_ouput(o)
o = o.float()
res = {'mean': o.mean().item(), 'std': o.std().item(),
'near_zero': (o<=0.05).long().sum().item()/o.numel()}
if self.with_hist: res['hist'] = o.histc(40,0,10)
return res
def hook_multi_ouput(self,o_tuple):
"For outputs of RNN which are [nested] tuples of tensors"
res = []
for o in self._flatten_tuple(o_tuple):
if not(isinstance(o, Tensor)): continue
res.append(self.hook(None, None, o))
return res
def _flatten_tuple(self, o_tuple):
"Recursively flatten a [nested] tuple"
res = []
for it in o_tuple:
if isinstance(it, tuple): res += self._flatten_tuple(it)
else: res += [it]
return tuple(res)
def after_batch(self):
"Take the stored results and puts it in `self.stats`"
if self.training and (self.every is None or self.train_iter%self.every == 0):
self.stats.append(self.hooks.stored)
super().after_batch()
def layer_stats(self, idx):
lstats = self.stats.itemgot(idx)
return L(lstats.itemgot(o) for o in ('mean','std','near_zero'))
def hist(self, idx):
res = self.stats.itemgot(idx).itemgot('hist')
return torch.stack(tuple(res)).t().float().log1p()
def color_dim(self, idx, figsize=(10,5), ax=None):
"The 'colorful dimension' plot"
res = self.hist(idx)
if ax is None: ax = subplots(figsize=figsize)[1][0]
ax.imshow(res, origin='lower')
ax.axis('off')
def plot_layer_stats(self, idx):
_,axs = subplots(1, 3, figsize=(12,3))
for o,ax,title in zip(self.layer_stats(idx),axs,('mean','std','% near zero')):
ax.plot(o)
ax.set_title(title) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/hook.py | hook.py |
__all__ = ['NeptuneCallback']
# Cell
import tempfile
from ..basics import *
from ..learner import Callback
# Cell
import neptune
# Cell
class NeptuneCallback(Callback):
"Log losses, metrics, model weights, model architecture summary to neptune"
def __init__(self, log_model_weights=True, keep_experiment_running=False):
self.log_model_weights = log_model_weights
self.keep_experiment_running = keep_experiment_running
self.experiment = None
if neptune.project is None:
raise ValueError('You did not initialize project in neptune.\n',
'Please invoke `neptune.init("USERNAME/PROJECT_NAME")` before this callback.')
def before_fit(self):
try:
self.experiment = neptune.get_experiment()
except ValueError:
print('No active experiment. Please invoke `neptune.create_experiment()` before this callback.')
try:
self.experiment.set_property('n_epoch', str(self.learn.n_epoch))
self.experiment.set_property('model_class', str(type(self.learn.model)))
except:
print(f'Did not log all properties. Check properties in the {neptune.get_experiment()}.')
try:
with tempfile.NamedTemporaryFile(mode='w') as f:
with open(f.name, 'w') as g:
g.write(repr(self.learn.model))
self.experiment.log_artifact(f.name, 'model_summary.txt')
except:
print('Did not log model summary. Check if your model is PyTorch model.')
if self.log_model_weights and not hasattr(self.learn, 'save_model'):
print('Unable to log model to Neptune.\n',
'Use "SaveModelCallback" to save model checkpoints that will be logged to Neptune.')
def after_batch(self):
# log loss and opt.hypers
if self.learn.training:
self.experiment.log_metric('batch__smooth_loss', self.learn.smooth_loss)
self.experiment.log_metric('batch__loss', self.learn.loss)
self.experiment.log_metric('batch__train_iter', self.learn.train_iter)
for i, h in enumerate(self.learn.opt.hypers):
for k, v in h.items():
self.experiment.log_metric(f'batch__opt.hypers.{k}', v)
def after_epoch(self):
# log metrics
for n, v in zip(self.learn.recorder.metric_names, self.learn.recorder.log):
if n not in ['epoch', 'time']:
self.experiment.log_metric(f'epoch__{n}', v)
if n == 'time':
self.experiment.log_text(f'epoch__{n}', str(v))
# log model weights
if self.log_model_weights and hasattr(self.learn, 'save_model'):
if self.learn.save_model.every_epoch:
_file = join_path_file(f'{self.learn.save_model.fname}_{self.learn.save_model.epoch}',
self.learn.path / self.learn.model_dir,
ext='.pth')
else:
_file = join_path_file(self.learn.save_model.fname,
self.learn.path / self.learn.model_dir,
ext='.pth')
self.experiment.log_artifact(_file)
def after_fit(self):
if not self.keep_experiment_running:
try:
self.experiment.stop()
except:
print('No neptune experiment to stop.')
else:
print(f'Your experiment (id: {self.experiment.id}, name: {self.experiment.name}) is left in the running state.\n',
'You can log more data to it, like this: `neptune.log_metric()`') | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/neptune.py | neptune.py |
__all__ = ['ProgressCallback', 'ShowGraphCallback', 'CSVLogger']
# Cell
from ..basics import *
# Cell
@docs
class ProgressCallback(Callback):
"A `Callback` to handle the display of progress bars"
order,_stateattrs = 60,('mbar','pbar')
def before_fit(self):
assert hasattr(self.learn, 'recorder')
if self.create_mbar: self.mbar = master_bar(list(range(self.n_epoch)))
if self.learn.logger != noop:
self.old_logger,self.learn.logger = self.logger,self._write_stats
self._write_stats(self.recorder.metric_names)
else: self.old_logger = noop
def before_epoch(self):
if getattr(self, 'mbar', False): self.mbar.update(self.epoch)
def before_train(self): self._launch_pbar()
def before_validate(self): self._launch_pbar()
def after_train(self): self.pbar.on_iter_end()
def after_validate(self): self.pbar.on_iter_end()
def after_batch(self):
self.pbar.update(self.iter+1)
if hasattr(self, 'smooth_loss'): self.pbar.comment = f'{self.smooth_loss:.4f}'
def _launch_pbar(self):
self.pbar = progress_bar(self.dl, parent=getattr(self, 'mbar', None), leave=False)
self.pbar.update(0)
def after_fit(self):
if getattr(self, 'mbar', False):
self.mbar.on_iter_end()
delattr(self, 'mbar')
if hasattr(self, 'old_logger'): self.learn.logger = self.old_logger
def _write_stats(self, log):
if getattr(self, 'mbar', False): self.mbar.write([f'{l:.6f}' if isinstance(l, float) else str(l) for l in log], table=True)
_docs = dict(before_fit="Setup the master bar over the epochs",
before_epoch="Update the master bar",
before_train="Launch a progress bar over the training dataloader",
before_validate="Launch a progress bar over the validation dataloader",
after_train="Close the progress bar over the training dataloader",
after_validate="Close the progress bar over the validation dataloader",
after_batch="Update the current progress bar",
after_fit="Close the master bar")
if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback, Recorder, ProgressCallback]
elif ProgressCallback not in defaults.callbacks: defaults.callbacks.append(ProgressCallback)
# Cell
@patch
@contextmanager
def no_bar(self:Learner):
"Context manager that deactivates the use of progress bars"
has_progress = hasattr(self, 'progress')
if has_progress: self.remove_cb(self.progress)
try: yield self
finally:
if has_progress: self.add_cb(ProgressCallback())
# Cell
class ShowGraphCallback(Callback):
"Update a graph of training and validation loss"
order,run_valid=65,False
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds")
if not(self.run): return
self.nb_batches = []
assert hasattr(self.learn, 'progress')
def after_train(self): self.nb_batches.append(self.train_iter)
def after_epoch(self):
"Plot validation loss in the pbar graph"
if not self.nb_batches: return
rec = self.learn.recorder
iters = range_of(rec.losses)
val_losses = [v[1] for v in rec.values]
x_bounds = (0, (self.n_epoch - len(self.nb_batches)) * self.nb_batches[0] + len(rec.losses))
y_bounds = (0, max((max(Tensor(rec.losses)), max(Tensor(val_losses)))))
self.progress.mbar.update_graph([(iters, rec.losses), (self.nb_batches, val_losses)], x_bounds, y_bounds)
# Cell
class CSVLogger(Callback):
"Log the results displayed in `learn.path/fname`"
order=60
def __init__(self, fname='history.csv', append=False):
self.fname,self.append = Path(fname),append
def read_log(self):
"Convenience method to quickly access the log."
return pd.read_csv(self.path/self.fname)
def before_fit(self):
"Prepare file with metric names."
if hasattr(self, "gather_preds"): return
self.path.parent.mkdir(parents=True, exist_ok=True)
self.file = (self.path/self.fname).open('a' if self.append else 'w')
self.file.write(','.join(self.recorder.metric_names) + '\n')
self.old_logger,self.learn.logger = self.logger,self._write_line
def _write_line(self, log):
"Write a line with `log` and call the old logger."
self.file.write(','.join([str(t) for t in log]) + '\n')
self.file.flush()
os.fsync(self.file.fileno())
self.old_logger(log)
def after_fit(self):
"Close the file and clean up."
if hasattr(self, "gather_preds"): return
self.file.close()
self.learn.logger = self.old_logger | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/progress.py | progress.py |
__all__ = ['CancelStepException', 'CancelFitException', 'CancelEpochException', 'CancelTrainException',
'CancelValidException', 'CancelBatchException', 'event', 'Callback', 'TrainEvalCallback',
'GatherPredsCallback', 'FetchPredsCallback']
# Cell
from ..data.all import *
from ..optimizer import *
# Cell
#nbdev_comment _all_ = ['CancelStepException','CancelFitException','CancelEpochException','CancelTrainException','CancelValidException','CancelBatchException']
# Cell
_events = L.split('after_create before_fit before_epoch before_train before_batch after_pred after_loss \
before_backward before_step after_cancel_step after_step after_cancel_batch after_batch after_cancel_train \
after_train before_validate after_cancel_validate after_validate after_cancel_epoch \
after_epoch after_cancel_fit after_fit')
mk_class('event', **_events.map_dict(),
doc="All possible events as attributes to get tab-completion and typo-proofing")
# Cell
#nbdev_comment _all_ = ['event']
# Cell
_inner_loop = "before_batch after_pred after_loss before_backward before_step after_step after_cancel_batch after_batch".split()
# Cell
@funcs_kwargs(as_method=True)
class Callback(Stateful,GetAttr):
"Basic class handling tweaks of the training loop by changing a `Learner` in various events"
order,_default,learn,run,run_train,run_valid = 0,'learn',None,True,True,True
_methods = _events
def __init__(self, **kwargs): assert not kwargs, f'Passed unknown events: {kwargs}'
def __repr__(self): return type(self).__name__
def __call__(self, event_name):
"Call `self.{event_name}` if it's defined"
_run = (event_name not in _inner_loop or (self.run_train and getattr(self, 'training', True)) or
(self.run_valid and not getattr(self, 'training', False)))
res = None
if self.run and _run: res = getattr(self, event_name, noop)()
if event_name=='after_fit': self.run=True #Reset self.run to True at each end of fit
return res
def __setattr__(self, name, value):
if hasattr(self.learn,name):
warn(f"You are shadowing an attribute ({name}) that exists in the learner. Use `self.learn.{name}` to avoid this")
super().__setattr__(name, value)
@property
def name(self):
"Name of the `Callback`, camel-cased and with '*Callback*' removed"
return class2attr(self, 'Callback')
# Cell
class TrainEvalCallback(Callback):
"`Callback` that tracks the number of iterations done and properly sets training/eval mode"
order,run_valid = -10,False
def after_create(self): self.learn.n_epoch = 1
def before_fit(self):
"Set the iter and epoch counters to 0, put the model and the right device"
self.learn.epoch,self.learn.loss = 0,tensor(0.)
self.learn.train_iter,self.learn.pct_train = 0,0.
if hasattr(self.dls, 'device'): self.model.to(self.dls.device)
if hasattr(self.model, 'reset'): self.model.reset()
def after_batch(self):
"Update the iter counter (in training mode)"
self.learn.pct_train += 1./(self.n_iter*self.n_epoch)
self.learn.train_iter += 1
def before_train(self):
"Set the model in training mode"
self.learn.pct_train=self.epoch/self.n_epoch
self.model.train()
self.learn.training=True
def before_validate(self):
"Set the model in validation mode"
self.model.eval()
self.learn.training=False
# Cell
if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback]
# Cell
_ex_docs = dict(
CancelBatchException="Skip the rest of this batch and go to `after_batch`",
CancelTrainException="Skip the rest of the training part of the epoch and go to `after_train`",
CancelValidException="Skip the rest of the validation part of the epoch and go to `after_validate`",
CancelEpochException="Skip the rest of this epoch and go to `after_epoch`",
CancelStepException ="Skip stepping the optimizer",
CancelFitException ="Interrupts training and go to `after_fit`")
for c,d in _ex_docs.items(): mk_class(c,sup=Exception,doc=d)
# Cell
#TODO: save_targs and save_preds only handle preds/targets that have one tensor, not tuples of tensors.
class GatherPredsCallback(Callback):
"`Callback` that saves the predictions and targets, optionally `with_loss`"
_stateattrs=('preds','targets','inputs','losses')
def __init__(self, with_input=False, with_loss=False, save_preds=None, save_targs=None, concat_dim=0):
store_attr("with_input,with_loss,save_preds,save_targs,concat_dim")
def before_batch(self):
if self.with_input: self.inputs.append((self.learn.to_detach(self.xb)))
def before_validate(self):
"Initialize containers"
self.preds,self.targets = [],[]
if self.with_input: self.inputs = []
if self.with_loss: self.losses = []
def after_batch(self):
"Save predictions, targets and potentially losses"
if not hasattr(self, 'pred'): return
preds,targs = self.learn.to_detach(self.pred),self.learn.to_detach(self.yb)
if self.save_preds is None: self.preds.append(preds)
else: (self.save_preds/str(self.iter)).save_array(preds)
if self.save_targs is None: self.targets.append(targs)
else: (self.save_targs/str(self.iter)).save_array(targs[0])
if self.with_loss:
bs = find_bs(self.yb)
loss = self.loss if self.loss.numel() == bs else self.loss.view(bs,-1).mean(1)
self.losses.append(self.learn.to_detach(loss))
def after_validate(self):
"Concatenate all recorded tensors"
if not hasattr(self, 'preds'): return
if self.with_input: self.inputs = detuplify(to_concat(self.inputs, dim=self.concat_dim))
if not self.save_preds: self.preds = detuplify(to_concat(self.preds, dim=self.concat_dim))
if not self.save_targs: self.targets = detuplify(to_concat(self.targets, dim=self.concat_dim))
if self.with_loss: self.losses = to_concat(self.losses)
def all_tensors(self):
res = [None if self.save_preds else self.preds, None if self.save_targs else self.targets]
if self.with_input: res = [self.inputs] + res
if self.with_loss: res.append(self.losses)
return res
# Cell
class FetchPredsCallback(Callback):
"A callback to fetch predictions during the training loop"
remove_on_fetch = True
def __init__(self, ds_idx=1, dl=None, with_input=False, with_decoded=False, cbs=None, reorder=True):
self.cbs = L(cbs)
store_attr('ds_idx,dl,with_input,with_decoded,reorder')
def after_validate(self):
to_rm = L(cb for cb in self.learn.cbs if getattr(cb, 'remove_on_fetch', False))
with self.learn.removed_cbs(to_rm + self.cbs) as learn:
self.preds = learn.get_preds(ds_idx=self.ds_idx, dl=self.dl,
with_input=self.with_input, with_decoded=self.with_decoded, inner=True, reorder=self.reorder) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/core.py | core.py |
__all__ = ['reduce_loss', 'MixHandler', 'MixUp', 'CutMix']
# Cell
from ..basics import *
from torch.distributions.beta import Beta
# Cell
def reduce_loss(loss, reduction='mean'):
"Reduce the loss based on `reduction`"
return loss.mean() if reduction == 'mean' else loss.sum() if reduction == 'sum' else loss
# Cell
class MixHandler(Callback):
"A handler class for implementing `MixUp` style scheduling"
run_valid = False
def __init__(self, alpha=0.5):
self.distrib = Beta(tensor(alpha), tensor(alpha))
def before_fit(self):
self.stack_y = getattr(self.learn.loss_func, 'y_int', False)
if self.stack_y: self.old_lf,self.learn.loss_func = self.learn.loss_func,self.loss_func
def after_fit(self):
if self.stack_y: self.learn.loss_func = self.old_lf
def lf(self, pred, *yb):
if not self.training: return self.old_lf(pred, *yb)
with NoneReduce(self.old_lf) as lf:
loss = torch.lerp(lf(pred,*self.yb1), lf(pred,*yb), self.lam)
return reduce_loss(loss, getattr(self.old_lf, 'reduction', 'mean'))
# Cell
class MixUp(MixHandler):
"Implementation of https://arxiv.org/abs/1710.09412"
def __init__(self, alpha=.4): super().__init__(alpha)
def before_batch(self):
lam = self.distrib.sample((self.y.size(0),)).squeeze().to(self.x.device)
lam = torch.stack([lam, 1-lam], 1)
self.lam = lam.max(1)[0]
shuffle = torch.randperm(self.y.size(0)).to(self.x.device)
xb1,self.yb1 = tuple(L(self.xb).itemgot(shuffle)),tuple(L(self.yb).itemgot(shuffle))
nx_dims = len(self.x.size())
self.learn.xb = tuple(L(xb1,self.xb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=nx_dims-1)))
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
# Cell
class CutMix(MixHandler):
"Implementation of `https://arxiv.org/abs/1905.04899`"
def __init__(self, alpha=1.): super().__init__(alpha)
def before_batch(self):
bs, _, H, W = self.x.size()
self.lam = self.distrib.sample((1,))
shuffle = torch.randperm(bs)
xb1,self.yb1 = self.x[shuffle], tuple((self.y[shuffle],))
x1, y1, x2, y2 = self.rand_bbox(W, H, self.lam)
self.learn.xb[0][..., y1:y2, x1:x2] = xb1[..., y1:y2, x1:x2]
self.lam = (1 - ((x2-x1)*(y2-y1))/float(W*H)).item()
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
def rand_bbox(self, W, H, lam):
cut_rat = torch.sqrt(1. - lam)
cut_w = torch.round(W * cut_rat).type(torch.long)
cut_h = torch.round(H * cut_rat).type(torch.long)
# uniform
cx = torch.randint(0, W, (1,))
cy = torch.randint(0, H, (1,))
x1 = torch.clamp(cx - cut_w // 2, 0, W)
y1 = torch.clamp(cy - cut_h // 2, 0, H)
x2 = torch.clamp(cx + cut_w // 2, 0, W)
y2 = torch.clamp(cy + cut_h // 2, 0, H)
return x1, y1, x2, y2 | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/mixup.py | mixup.py |
__all__ = ['MixedPrecision', 'FP16TestCallback', 'get_master', 'to_master_grads', 'to_model_params', 'test_overflow',
'grad_overflow', 'copy_clone', 'ModelToHalf', 'NonNativeMixedPrecision']
# Cell
from ..basics import *
from .progress import *
from torch.cuda.amp import GradScaler,autocast
from torch.cuda.amp.grad_scaler import OptState
# Cell
@delegates(GradScaler)
class MixedPrecision(Callback):
"Mixed precision training using Pytorch's `autocast` and `GradScaler`"
order = 10
def __init__(self, **kwargs): self.kwargs,self.autocast = kwargs,autocast()
def before_fit(self): self.learn.scaler,self.scales = GradScaler(**self.kwargs),L()
def before_batch(self): self.autocast.__enter__()
def after_pred(self):
if self.pred.dtype==torch.float16: self.learn.pred = to_float(self.pred)
def after_loss(self): self.autocast.__exit__()
def before_backward(self): self.learn.loss_grad = self.scaler.scale(self.loss_grad)
def before_step(self):
self.skipped=True
self.scaler.step(self)
if self.skipped: raise CancelStepException()
self.scales.append(self.scaler.get_scale())
def after_step(self): self.learn.scaler.update()
@property # pretend to be an optimizer for `GradScaler`
def param_groups(self): return self.opt.param_groups
def step(self, *args, **kwargs): self.skipped=False
# Cell
class FP16TestCallback(Callback):
"Asserts that predictions are `float16` values"
order = 9
def after_pred(self): assert self.pred.dtype==torch.float16
# Cell
@patch
@delegates(GradScaler)
def to_fp16(self:Learner, **kwargs): return self.add_cb(MixedPrecision(**kwargs))
# Cell
@patch
def to_fp32(self:Learner): return self.remove_cb(MixedPrecision)
# Cell
from ..fp16_utils import convert_network, model_grads_to_master_grads, master_params_to_model_params
# Cell
from torch.nn.utils import parameters_to_vector
# Cell
def get_master(opt, flat_master=False):
model_params = [[param for param in pg if getattr(param, 'requires_grad', False) and hasattr(param, 'data')] for pg in opt.param_lists]
if flat_master:
master_params = []
for pg in model_params:
mp = parameters_to_vector([param.data.float() for param in pg])
mp = nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params = [[nn.Parameter(param.data.clone().float().detach(), requires_grad=True) for param in pg] for pg in model_params]
return model_params, master_params
# Cell
def to_master_grads(model_pgs, master_pgs, flat_master=False):
for (model_params,master_params) in zip(model_pgs,master_pgs):
model_grads_to_master_grads(model_params, master_params, flat_master=flat_master)
# Cell
def to_model_params(model_pgs, master_pgs, flat_master=False)->None:
for (model_params,master_params) in zip(model_pgs,master_pgs):
master_params_to_model_params(model_params, master_params, flat_master=flat_master)
# Cell
def test_overflow(x):
s = float(x.float().sum())
return (s == float('inf') or s == float('-inf') or s != s)
# Cell
def grad_overflow(pgs):
for pg in pgs:
for p in pg:
if p.grad is not None and test_overflow(p.grad.data): return True
return False
# Cell
def copy_clone(d):
return {k:(v.detach().clone().float() if isinstance(v,Tensor) else v) for k,v in d.items()}
# Cell
def _copy_state(opt, pgs1, pgs2):
opt.param_lists = pgs2
for pg1,pg2 in zip(pgs1, pgs2):
for p1,p2 in zip(pg1, pg2): opt.state[p2] = copy_clone(opt.state.pop(p1, {}))
# Cell
class ModelToHalf(Callback):
"Use with NonNativeMixedPrecision callback (but it needs to run at the very beginning)"
order=-50
def before_fit(self): self.learn.model = convert_network(self.model, dtype=torch.float16)
def after_fit (self): self.learn.model = convert_network(self.model, dtype=torch.float32)
# Cell
@docs
class NonNativeMixedPrecision(Callback):
"Run training in mixed precision"
order=10
def __init__(self, loss_scale=512, flat_master=False, dynamic=True, max_loss_scale=2.**24,
div_factor=2., scale_wait=500, clip=None):
assert torch.backends.cudnn.enabled, "Mixed precision training requires cudnn."
self.flat_master,self.dynamic,self.max_loss_scale = flat_master,dynamic,max_loss_scale
self.div_factor,self.scale_wait,self.clip = div_factor,scale_wait,clip
self.loss_scale = max_loss_scale if dynamic else loss_scale
def before_fit(self):
assert self.dls.device.type == 'cuda', "Mixed-precision training requires a GPU, remove the call `to_fp16`"
if self.learn.opt is None: self.learn.create_opt()
self.model_pgs,self.master_pgs = get_master(self.opt, self.flat_master)
self.old_pgs = self.opt.param_lists
#Changes the optimizer so that the optimization step is done in FP32.
_copy_state(self.learn.opt, self.model_pgs, self.master_pgs)
if self.dynamic: self.count = 0
def before_batch(self): self.learn.xb = to_half(self.xb)
def after_pred(self): self.learn.pred = to_float(self.pred)
def before_backward(self): self.learn.loss_grad *= self.loss_scale
def before_step(self):
#First, check for an overflow
if self.dynamic and grad_overflow(self.model_pgs):
self.loss_scale /= self.div_factor
self.learn.loss_grad /= self.div_factor #to record correct loss
self.model.zero_grad()
raise CancelBatchException() #skip step and zero_grad
to_master_grads(self.model_pgs, self.master_pgs, self.flat_master)
for master_params in self.master_pgs:
for param in master_params:
if param.grad is not None: param.grad.div_(self.loss_scale)
if self.clip is not None:
for group in self.master_pgs: nn.utils.clip_grad_norm_(group, self.clip)
# Check if it's been long enough without overflow
if self.dynamic:
self.count += 1
if self.count == self.scale_wait:
self.count = 0
self.loss_scale *= self.div_factor
def after_step(self):
self.model.zero_grad() #Zero the gradients of the model manually (optimizer disconnected)
to_model_params(self.model_pgs, self.master_pgs, self.flat_master)
def after_batch(self):
if self.training: self.learn.loss_grad /= self.loss_scale #Log correct loss
def after_fit(self):
if not hasattr(self,'master_pgs'): return
_copy_state(self.learn.opt, self.master_pgs, self.model_pgs)
self.learn.opt.param_lists = self.old_pgs
delattr(self, "master_pgs")
delattr(self, "model_pgs")
delattr(self, "old_pgs")
_docs = dict(before_fit="Put the model in FP16 and prepare the two copies of the parameters",
before_batch="Put the input in FP16",
after_pred="Put the output back to FP32 so that the loss is computed in FP32",
before_backward="Apply loss scaling to avoid gradient underflow",
before_step="Copy the gradients to the master param and undo the loss scaling",
after_step="Copy the master params to the model params",
after_batch="Ensure loss is logged correctly",
after_fit="Put the model back in FP32")
# Cell
@patch
@delegates(NonNativeMixedPrecision.__init__)
def to_to_non_native_fp16(self:Learner, **kwargs): return self.add_cbs([ModelToHalf(), NonNativeMixedPrecision(**kwargs)])
# Cell
@patch
def to_non_native_fp32(self: Learner): return self.remove_cbs([ModelToHalf, NonNativeMixedPrecision]) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/fp16.py | fp16.py |
__all__ = ['json_clean', 'CaptumInterpretation']
# Cell
import tempfile
from ..basics import *
# Cell
from ipykernel import jsonutil
# Cell
# Dirty hack as json_clean doesn't support CategoryMap type
_json_clean=jsonutil.json_clean
def json_clean(o):
o = list(o.items) if isinstance(o,CategoryMap) else o
return _json_clean(o)
jsonutil.json_clean = json_clean
# Cell
from captum.attr import IntegratedGradients,NoiseTunnel,GradientShap,Occlusion
from captum.attr import visualization as viz
from matplotlib.colors import LinearSegmentedColormap
from captum.insights import AttributionVisualizer, Batch
from captum.insights.attr_vis.features import ImageFeature
# Cell
class CaptumInterpretation():
"Captum Interpretation for Resnet"
def __init__(self,learn,cmap_name='custom blue',colors=None,N=256,methods=['original_image','heat_map'],signs=["all", "positive"],outlier_perc=1):
store_attr('learn,cmap_name,colors,N,methods,signs,outlier_perc')
self.colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')] if self.colors is None else self.colors
self.dls=learn.dls
self.model=self.learn.model
self.supported_metrics=['IG','NT','Occl']
def get_baseline_img(self, img_tensor,baseline_type):
baseline_img=None
if baseline_type=='zeros': baseline_img= img_tensor*0
if baseline_type=='uniform': baseline_img= torch.rand(img_tensor.shape)
if baseline_type=='gauss':
baseline_img= (torch.rand(img_tensor.shape).to(self.dls.device)+img_tensor)/2
return baseline_img.to(self.dls.device)
def visualize(self,inp,metric='IG',n_steps=1000,baseline_type='zeros',nt_type='smoothgrad',strides = (3, 4, 4), sliding_window_shapes=(3,15, 15)):
if metric not in self.supported_metrics:
raise Exception(f"Metric {metric} is not supported. Currently {self.supported_metrics} are only supported")
tls = L([TfmdLists(inp, t) for t in L(ifnone(self.dls.tfms,[None]))])
inp_data=list(zip(*(tls[0],tls[1])))[0]
# Get Data
enc_data,dec_data=self._get_enc_dec_data(inp_data)
# Get Required Metrics
attributions=self._get_attributions(enc_data,metric,n_steps,nt_type,baseline_type,strides,sliding_window_shapes)
#Visualise the attributions
self._viz(attributions,dec_data,metric)
def _viz(self,attributions,dec_data,metric):
default_cmap = LinearSegmentedColormap.from_list(self.cmap_name,self.colors, N=self.N)
_ = viz.visualize_image_attr_multiple(np.transpose(attributions.squeeze().cpu().detach().numpy(), (1,2,0)),
np.transpose(dec_data[0].numpy(), (1,2,0)),
methods=self.methods,
cmap=default_cmap,
show_colorbar=True,
signs=self.signs,
outlier_perc=self.outlier_perc, titles=[f'Original Image - ({dec_data[1]})', metric])
def _get_enc_dec_data(self,inp_data):
dec_data=self.dls.after_item(inp_data)
enc_data=self.dls.after_batch(to_device(self.dls.before_batch(dec_data),self.dls.device))
return(enc_data,dec_data)
def _get_attributions(self,enc_data,metric,n_steps,nt_type,baseline_type,strides,sliding_window_shapes):
# Get Baseline
baseline=self.get_baseline_img(enc_data[0],baseline_type)
supported_metrics ={}
if metric == 'IG':
self._integrated_gradients = self._integrated_gradients if hasattr(self,'_integrated_gradients') else IntegratedGradients(self.model)
return self._integrated_gradients.attribute(enc_data[0],baseline, target=enc_data[1], n_steps=200)
elif metric == 'NT':
self._integrated_gradients = self._integrated_gradients if hasattr(self,'_integrated_gradients') else IntegratedGradients(self.model)
self._noise_tunnel= self._noise_tunnel if hasattr(self,'_noise_tunnel') else NoiseTunnel(self._integrated_gradients)
return self._noise_tunnel.attribute(enc_data[0].to(self.dls.device), n_samples=1, nt_type=nt_type, target=enc_data[1])
elif metric == 'Occl':
self._occlusion = self._occlusion if hasattr(self,'_occlusion') else Occlusion(self.model)
return self._occlusion.attribute(enc_data[0].to(self.dls.device),
strides = strides,
target=enc_data[1],
sliding_window_shapes=sliding_window_shapes,
baselines=baseline)
# Cell
@patch
def insights(x: CaptumInterpretation,inp_data,debug=True):
_baseline_func= lambda o: o*0
_get_vocab = lambda vocab: list(map(str,vocab)) if isinstance(vocab[0],bool) else vocab
dl = x.dls.test_dl(L(inp_data),with_labels=True, bs=4)
normalize_func= next((func for func in dl.after_batch if type(func)==Normalize),noop)
# captum v0.3 expects tensors without the batch dimension.
if hasattr(normalize_func, 'mean'):
if normalize_func.mean.ndim==4: normalize_func.mean.squeeze_(0)
if hasattr(normalize_func, 'std'):
if normalize_func.std.ndim==4: normalize_func.std.squeeze_(0)
visualizer = AttributionVisualizer(
models=[x.model],
score_func=lambda o: torch.nn.functional.softmax(o, 1),
classes=_get_vocab(dl.vocab),
features=[
ImageFeature(
"Image",
baseline_transforms=[_baseline_func],
input_transforms=[normalize_func],
)
],
dataset=x._formatted_data_iter(dl,normalize_func)
)
visualizer.render(debug=debug) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/captum.py | captum.py |
__all__ = ['CutMix']
# Cell
from torch.distributions.beta import Beta
from ..vision.all import *
# Cell
class CutMix(Callback):
"Implementation of `https://arxiv.org/abs/1905.04899`"
run_after,run_valid = [Normalize],False
def __init__(self, alpha=1.): self.distrib = Beta(tensor(alpha), tensor(alpha))
def before_fit(self):
self.stack_y = getattr(self.learn.loss_func, 'y_int', False)
if self.stack_y: self.old_lf,self.learn.loss_func = self.learn.loss_func,self.lf
def after_fit(self):
if self.stack_y: self.learn.loss_func = self.old_lf
def before_batch(self):
W, H = self.xb[0].size(3), self.xb[0].size(2)
lam = self.distrib.sample((1,)).squeeze().to(self.x.device)
lam = torch.stack([lam, 1-lam])
self.lam = lam.max()
shuffle = torch.randperm(self.y.size(0)).to(self.x.device)
xb1,self.yb1 = tuple(L(self.xb).itemgot(shuffle)),tuple(L(self.yb).itemgot(shuffle))
nx_dims = len(self.x.size())
x1, y1, x2, y2 = self.rand_bbox(W, H, self.lam)
self.learn.xb[0][:, :, x1:x2, y1:y2] = xb1[0][:, :, x1:x2, y1:y2]
self.lam = (1 - ((x2-x1)*(y2-y1))/float(W*H)).item()
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
def lf(self, pred, *yb):
if not self.training: return self.old_lf(pred, *yb)
with NoneReduce(self.old_lf) as lf:
loss = torch.lerp(lf(pred,*self.yb1), lf(pred,*yb), self.lam)
return reduce_loss(loss, getattr(self.old_lf, 'reduction', 'mean'))
def rand_bbox(self, W, H, lam):
cut_rat = torch.sqrt(1. - lam)
cut_w = (W * cut_rat).type(torch.long)
cut_h = (H * cut_rat).type(torch.long)
# uniform
cx = torch.randint(0, W, (1,)).to(self.x.device)
cy = torch.randint(0, H, (1,)).to(self.x.device)
x1 = torch.clamp(cx - cut_w // 2, 0, W)
y1 = torch.clamp(cy - cut_h // 2, 0, H)
x2 = torch.clamp(cx + cut_w // 2, 0, W)
y2 = torch.clamp(cy + cut_h // 2, 0, H)
return x1, y1, x2, y2 | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/cutmix.py | cutmix.py |
__all__ = ['DcmDataset', 'DcmTag', 'DcmMultiValue', 'dcmread', 'get_dicom_files', 'DicomSegmentationDataLoaders',
'get_dicom_files', 'TensorDicom', 'PILDicom', 'pixels', 'scaled_px', 'array_freqhist_bins', 'dicom_windows',
'TensorCTScan', 'PILCTScan', 'uniform_blur2d', 'gauss_blur2d', 'mask2bbox', 'crop_resize', 'shape',
'DicomSegmentationDataLoaders']
# Cell
from ..basics import *
from ..vision.all import *
from ..data.transforms import *
import pydicom,kornia,skimage
from pydicom.dataset import Dataset as DcmDataset
from pydicom.tag import BaseTag as DcmTag
from pydicom.multival import MultiValue as DcmMultiValue
from PIL import Image
try:
import cv2
cv2.setNumThreads(0)
except: pass
# Cell
#nbdev_comment _all_ = ['DcmDataset', 'DcmTag', 'DcmMultiValue', 'dcmread', 'get_dicom_files', 'DicomSegmentationDataLoaders']
# Cell
def get_dicom_files(path, recurse=True, folders=None):
"Get dicom files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=[".dcm",".dicom"], recurse=recurse, folders=folders)
# Cell
@patch
def dcmread(fn:Path, force = False):
"Open a `DICOM` file"
return pydicom.dcmread(str(fn), force)
# Cell
class TensorDicom(TensorImage):
"Inherits from `TensorImage` and converts the `pixel_array` into a `TensorDicom`"
_show_args = {'cmap':'gray'}
# Cell
class PILDicom(PILBase):
_open_args,_tensor_cls,_show_args = {},TensorDicom,TensorDicom._show_args
@classmethod
def create(cls, fn:(Path,str,bytes), mode=None)->None:
"Open a `DICOM file` from path `fn` or bytes `fn` and load it as a `PIL Image`"
if isinstance(fn,bytes): im = Image.fromarray(pydicom.dcmread(pydicom.filebase.DicomBytesIO(fn)).pixel_array)
if isinstance(fn,(Path,str)): im = Image.fromarray(pydicom.dcmread(fn).pixel_array)
im.load()
im = im._new(im.im)
return cls(im.convert(mode) if mode else im)
PILDicom._tensor_cls = TensorDicom
# Cell
@patch
def png16read(self:Path): return array(Image.open(self), dtype=np.uint16)
# Cell
@patch(as_prop=True)
def pixels(self:DcmDataset):
"`pixel_array` as a tensor"
return tensor(self.pixel_array.astype(np.float32))
# Cell
@patch(as_prop=True)
def scaled_px(self:DcmDataset):
"`pixels` scaled by `RescaleSlope` and `RescaleIntercept`"
img = self.pixels
if hasattr(self, 'RescaleSlope') and hasattr(self, 'RescaleIntercept') is not None:
return img * self.RescaleSlope + self.RescaleIntercept
else: return img
# Cell
def array_freqhist_bins(self, n_bins=100):
"A numpy based function to split the range of pixel values into groups, such that each group has around the same number of pixels"
imsd = np.sort(self.flatten())
t = np.array([0.001])
t = np.append(t, np.arange(n_bins)/n_bins+(1/2/n_bins))
t = np.append(t, 0.999)
t = (len(imsd)*t+0.5).astype(np.int)
return np.unique(imsd[t])
# Cell
@patch
def freqhist_bins(self:Tensor, n_bins=100):
"A function to split the range of pixel values into groups, such that each group has around the same number of pixels"
imsd = self.view(-1).sort()[0]
t = torch.cat([tensor([0.001]),
torch.arange(n_bins).float()/n_bins+(1/2/n_bins),
tensor([0.999])])
t = (len(imsd)*t).long()
return imsd[t].unique()
# Cell
@patch
def hist_scaled_pt(self:Tensor, brks=None):
# Pytorch-only version - switch to this if/when interp_1d can be optimized
if brks is None: brks = self.freqhist_bins()
brks = brks.to(self.device)
ys = torch.linspace(0., 1., len(brks)).to(self.device)
return self.flatten().interp_1d(brks, ys).reshape(self.shape).clamp(0.,1.)
# Cell
@patch
def hist_scaled(self:Tensor, brks=None):
"Scales a tensor using `freqhist_bins` to values between 0 and 1"
if self.device.type=='cuda': return self.hist_scaled_pt(brks)
if brks is None: brks = self.freqhist_bins()
ys = np.linspace(0., 1., len(brks))
x = self.numpy().flatten()
x = np.interp(x, brks.numpy(), ys)
return tensor(x).reshape(self.shape).clamp(0.,1.)
# Cell
@patch
def hist_scaled(self:DcmDataset, brks=None, min_px=None, max_px=None):
"Pixels scaled to a `min_px` and `max_px` value"
px = self.scaled_px
if min_px is not None: px[px<min_px] = min_px
if max_px is not None: px[px>max_px] = max_px
return px.hist_scaled(brks=brks)
# Cell
@patch
def windowed(self:Tensor, w, l):
"Scale pixel intensity by window width and window level"
px = self.clone()
px_min = l - w//2
px_max = l + w//2
px[px<px_min] = px_min
px[px>px_max] = px_max
return (px-px_min) / (px_max-px_min)
# Cell
@patch
def windowed(self:DcmDataset, w, l):
return self.scaled_px.windowed(w,l)
# Cell
# From https://radiopaedia.org/articles/windowing-ct
dicom_windows = types.SimpleNamespace(
brain=(80,40),
subdural=(254,100),
stroke=(8,32),
brain_bone=(2800,600),
brain_soft=(375,40),
lungs=(1500,-600),
mediastinum=(350,50),
abdomen_soft=(400,50),
liver=(150,30),
spine_soft=(250,50),
spine_bone=(1800,400)
)
# Cell
class TensorCTScan(TensorImageBW):
"Inherits from `TensorImageBW` and converts the `pixel_array` into a `TensorCTScan`"
_show_args = {'cmap':'bone'}
# Cell
class PILCTScan(PILBase): _open_args,_tensor_cls,_show_args = {},TensorCTScan,TensorCTScan._show_args
# Cell
@patch
@delegates(show_image)
def show(self:DcmDataset, scale=True, cmap=plt.cm.bone, min_px=-1100, max_px=None, **kwargs):
"Display a normalized dicom image by default"
px = (self.windowed(*scale) if isinstance(scale,tuple)
else self.hist_scaled(min_px=min_px,max_px=max_px,brks=scale) if isinstance(scale,(ndarray,Tensor))
else self.hist_scaled(min_px=min_px,max_px=max_px) if scale
else self.scaled_px)
show_image(px, cmap=cmap, **kwargs)
# Cell
@patch
def show(self:DcmDataset, frames=1, scale=True, cmap=plt.cm.bone, min_px=-1100, max_px=None, **kwargs):
"Adds functionality to view dicom images where each file may have more than 1 frame"
px = (self.windowed(*scale) if isinstance(scale,tuple)
else self.hist_scaled(min_px=min_px,max_px=max_px,brks=scale) if isinstance(scale,(ndarray,Tensor))
else self.hist_scaled(min_px=min_px,max_px=max_px) if scale
else self.scaled_px)
if px.ndim > 2:
gh=[]
p = px.shape; print(f'{p[0]} frames per file')
for i in range(frames): u = px[i]; gh.append(u)
show_images(gh, **kwargs)
else: show_image(px, cmap=cmap, **kwargs)
# Cell
@patch
def pct_in_window(dcm:DcmDataset, w, l):
"% of pixels in the window `(w,l)`"
px = dcm.scaled_px
return ((px > l-w//2) & (px < l+w//2)).float().mean().item()
# Cell
def uniform_blur2d(x,s):
"Uniformly apply blurring"
w = x.new_ones(1,1,1,s)/s
# Factor 2d conv into 2 1d convs
x = unsqueeze(x, dim=0, n=4-x.dim())
r = (F.conv2d(x, w, padding=s//2))
r = (F.conv2d(r, w.transpose(-1,-2), padding=s//2)).cpu()[:,0]
return r.squeeze()
# Cell
def gauss_blur2d(x,s):
"Apply gaussian_blur2d kornia filter"
s2 = int(s/4)*2+1
x2 = unsqueeze(x, dim=0, n=4-x.dim())
res = kornia.filters.gaussian_blur2d(x2, (s2,s2), (s,s), 'replicate')
return res.squeeze()
# Cell
@patch
def mask_from_blur(x:Tensor, window, sigma=0.3, thresh=0.05, remove_max=True):
"Create a mask from the blurred image"
p = x.windowed(*window)
if remove_max: p[p==1] = 0
return gauss_blur2d(p, s=sigma*x.shape[-1])>thresh
# Cell
@patch
def mask_from_blur(x:DcmDataset, window, sigma=0.3, thresh=0.05, remove_max=True):
"Create a mask from the blurred image"
return to_device(x.scaled_px).mask_from_blur(window, sigma, thresh, remove_max=remove_max)
# Cell
def _px_bounds(x, dim):
c = x.sum(dim).nonzero().cpu()
idxs,vals = torch.unique(c[:,0],return_counts=True)
vs = torch.split_with_sizes(c[:,1],tuple(vals))
d = {k.item():v for k,v in zip(idxs,vs)}
default_u = tensor([0,x.shape[-1]-1])
b = [d.get(o,default_u) for o in range(x.shape[0])]
b = [tensor([o.min(),o.max()]) for o in b]
return torch.stack(b)
# Cell
def mask2bbox(mask):
no_batch = mask.dim()==2
if no_batch: mask = mask[None]
bb1 = _px_bounds(mask,-1).t()
bb2 = _px_bounds(mask,-2).t()
res = torch.stack([bb1,bb2],dim=1).to(mask.device)
return res[...,0] if no_batch else res
# Cell
def _bbs2sizes(crops, init_sz, use_square=True):
bb = crops.flip(1)
szs = (bb[1]-bb[0])
if use_square: szs = szs.max(0)[0][None].repeat((2,1))
overs = (szs+bb[0])>init_sz
bb[0][overs] = init_sz-szs[overs]
lows = (bb[0]/float(init_sz))
return lows,szs/float(init_sz)
# Cell
def crop_resize(x, crops, new_sz):
# NB assumes square inputs. Not tested for non-square anythings!
bs = x.shape[0]
lows,szs = _bbs2sizes(crops, x.shape[-1])
if not isinstance(new_sz,(list,tuple)): new_sz = (new_sz,new_sz)
id_mat = tensor([[1.,0,0],[0,1,0]])[None].repeat((bs,1,1)).to(x.device)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
sp = F.affine_grid(id_mat, (bs,1,*new_sz))+1.
grid = sp*unsqueeze(szs.t(),1,n=2)+unsqueeze(lows.t()*2.,1,n=2)
return F.grid_sample(x.unsqueeze(1), grid-1)
# Cell
@patch
def to_nchan(x:Tensor, wins, bins=None):
res = [x.windowed(*win) for win in wins]
if not isinstance(bins,int) or bins!=0: res.append(x.hist_scaled(bins).clamp(0,1))
dim = [0,1][x.dim()==3]
return TensorCTScan(torch.stack(res, dim=dim))
# Cell
@patch
def to_nchan(x:DcmDataset, wins, bins=None):
return x.scaled_px.to_nchan(wins, bins)
# Cell
@patch
def to_3chan(x:Tensor, win1, win2, bins=None):
return x.to_nchan([win1,win2],bins=bins)
# Cell
@patch
def to_3chan(x:DcmDataset, win1, win2, bins=None):
return x.scaled_px.to_3chan(win1, win2, bins)
# Cell
@patch
def save_jpg(x:(Tensor,DcmDataset), path, wins, bins=None, quality=90):
"Save tensor or dicom image into `jpg` format"
fn = Path(path).with_suffix('.jpg')
x = (x.to_nchan(wins, bins)*255).byte()
im = Image.fromarray(x.permute(1,2,0).numpy(), mode=['RGB','CMYK'][x.shape[0]==4])
im.save(fn, quality=quality)
# Cell
@patch
def to_uint16(x:(Tensor,DcmDataset), bins=None):
"Convert into a unit16 array"
d = x.hist_scaled(bins).clamp(0,1) * 2**16
return d.numpy().astype(np.uint16)
# Cell
@patch
def save_tif16(x:(Tensor,DcmDataset), path, bins=None, compress=True):
"Save tensor or dicom image into `tiff` format"
fn = Path(path).with_suffix('.tif')
Image.fromarray(x.to_uint16(bins)).save(str(fn), compression='tiff_deflate' if compress else None)
# Cell
@patch
def set_pixels(self:DcmDataset, px):
self.PixelData = px.tobytes()
self.Rows,self.Columns = px.shape
DcmDataset.pixel_array = property(DcmDataset.pixel_array.fget, set_pixels)
# Cell
@patch
def zoom(self:DcmDataset, ratio):
"Zoom image by specified ratio"
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
self.set_pixels(ndimage.zoom(self.pixel_array, ratio))
# Cell
@patch
def zoom_to(self:DcmDataset, sz):
"Change image size to specified pixel size"
if not isinstance(sz,(list,tuple)): sz=(sz,sz)
rows,cols = sz
self.zoom((rows/self.Rows,cols/self.Columns))
# Cell
@patch(as_prop=True)
def shape(self:DcmDataset):
"Returns the shape of a dicom image as rows and columns"
return self.Rows,self.Columns
# Cell
def _cast_dicom_special(x):
cls = type(x)
if not cls.__module__.startswith('pydicom'): return x
if cls.__base__ == object: return x
return cls.__base__(x)
def _split_elem(res,k,v):
if not isinstance(v,DcmMultiValue): return
res[f'Multi{k}'] = 1
for i,o in enumerate(v): res[f'{k}{"" if i==0 else i}']=o
# Cell
@patch
def as_dict(self:DcmDataset, px_summ=True, window=dicom_windows.brain):
"Convert the header of a dicom into a dictionary"
pxdata = (0x7fe0,0x0010)
vals = [self[o] for o in self.keys() if o != pxdata]
its = [(v.keyword,v.value) for v in vals]
res = dict(its)
res['fname'] = self.filename
for k,v in its: _split_elem(res,k,v)
if not px_summ: return res
stats = 'min','max','mean','std'
try:
pxs = self.pixel_array
for f in stats: res['img_'+f] = getattr(pxs,f)()
res['img_pct_window'] = self.pct_in_window(*window)
except Exception as e:
for f in stats: res['img_'+f] = 0
print(res,e)
for k in res: res[k] = _cast_dicom_special(res[k])
return res
# Cell
def _dcm2dict(fn, **kwargs): return fn.dcmread().as_dict(**kwargs)
# Cell
@delegates(parallel)
def _from_dicoms(cls, fns, n_workers=0, **kwargs):
return pd.DataFrame(parallel(_dcm2dict, fns, n_workers=n_workers, **kwargs))
pd.DataFrame.from_dicoms = classmethod(_from_dicoms)
# Cell
class DicomSegmentationDataLoaders(DataLoaders):
"Basic wrapper around DICOM `DataLoaders` with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock(cls=PILDicom), MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/medical/imaging.py | imaging.py |
__all__ = ['get_grid', 'clip_remove_empty', 'bb_pad', 'ImageBlock', 'MaskBlock', 'PointBlock', 'BBoxBlock',
'BBoxLblBlock', 'ImageDataLoaders', 'SegmentationDataLoaders']
# Cell
from ..torch_basics import *
from ..data.all import *
from .core import *
# Cell
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
# Cell
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[~empty])
# Cell
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
# Cell
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
# Cell
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
# Cell
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
# Cell
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
# Cell
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
# Cell
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
# Cell
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
# Cell
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/data.py | data.py |
__all__ = ['has_pool_type', 'create_body', 'create_head', 'default_split', 'model_meta', 'create_cnn_model',
'cnn_learner', 'create_unet_model', 'unet_learner', 'has_pool_type', 'create_body', 'create_head',
'default_split', 'model_meta', 'create_cnn_model', 'cnn_learner', 'create_unet_model', 'unet_learner']
# Cell
from ..basics import *
from .core import *
from .data import *
from .augment import *
from . import models
# Cell
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
# Cell
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
# Cell
def _get_first_layer(m):
"Access first layer of a model"
c,p,n = m,None,None # child, parent, name
for n in next(m.named_parameters())[0].split('.')[:-1]:
p,c=c,getattr(c,n)
return c,p,n
# Cell
def _load_pretrained_weights(new_layer, previous_layer):
"Load pretrained weights based on number of input channels"
n_in = getattr(new_layer, 'in_channels')
if n_in==1:
# we take the sum
new_layer.weight.data = previous_layer.weight.data.sum(dim=1, keepdim=True)
elif n_in==2:
# we take first 2 channels + 50%
new_layer.weight.data = previous_layer.weight.data[:,:2] * 1.5
else:
# keep 3 channels weights and set others to null
new_layer.weight.data[:,:3] = previous_layer.weight.data
new_layer.weight.data[:,3:].zero_()
# Cell
def _update_first_layer(model, n_in, pretrained):
"Change first layer based on number of input channels"
if n_in == 3: return
first_layer, parent, name = _get_first_layer(model)
assert isinstance(first_layer, nn.Conv2d), f'Change of input channels only supported with Conv2d, found {first_layer.__class__.__name__}'
assert getattr(first_layer, 'in_channels') == 3, f'Unexpected number of input channels, found {getattr(first_layer, "in_channels")} while expecting 3'
params = {attr:getattr(first_layer, attr) for attr in 'out_channels kernel_size stride padding dilation groups padding_mode'.split()}
params['bias'] = getattr(first_layer, 'bias') is not None
params['in_channels'] = n_in
new_layer = nn.Conv2d(**params)
if pretrained:
_load_pretrained_weights(new_layer, first_layer)
setattr(parent, name, new_layer)
# Cell
def create_body(arch, n_in=3, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained=pretrained)
_update_first_layer(model, n_in, pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
# Cell
def create_head(nf, n_out, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False, lin_first=False, y_range=None):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `n_out` classes."
lin_ftrs = [nf, 512, n_out] if lin_ftrs is None else [nf] + lin_ftrs + [n_out]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
if lin_first: layers.append(nn.Dropout(ps.pop(0)))
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += LinBnDrop(ni, no, bn=True, p=p, act=actn, lin_first=lin_first)
if lin_first: layers.append(nn.Linear(lin_ftrs[-2], n_out))
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
if y_range is not None: layers.append(SigmoidRange(*y_range))
return nn.Sequential(*layers)
# Cell
from ..callback.hook import num_features_model
# Cell
def default_split(m):
"Default split of a model between body and head"
return L(m[0], m[1:]).map(params)
# Cell
def _xresnet_split(m): return L(m[0][:3], m[0][3:], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':default_split}
_xresnet_meta = {'cut':-4, 'split':_xresnet_split, 'stats':imagenet_stats}
_resnet_meta = {'cut':-2, 'split':_resnet_split, 'stats':imagenet_stats}
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split, 'stats':imagenet_stats}
_densenet_meta = {'cut':-1, 'split':_densenet_split, 'stats':imagenet_stats}
_vgg_meta = {'cut':-2, 'split':_vgg_split, 'stats':imagenet_stats}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split, 'stats':imagenet_stats}
# Cell
model_meta = {
models.xresnet.xresnet18 :{**_xresnet_meta}, models.xresnet.xresnet34: {**_xresnet_meta},
models.xresnet.xresnet50 :{**_xresnet_meta}, models.xresnet.xresnet101:{**_xresnet_meta},
models.xresnet.xresnet152:{**_xresnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
# Cell
@delegates(create_head)
def create_cnn_model(arch, n_out, pretrained=True, cut=None, n_in=3, init=nn.init.kaiming_normal_, custom_head=None,
concat_pool=True, **kwargs):
"Create custom convnet architecture"
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, n_in, pretrained, ifnone(cut, meta['cut']))
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, n_out, concat_pool=concat_pool, **kwargs)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
# Cell
def _add_norm(dls, meta, pretrained):
if not pretrained: return
after_batch = dls.after_batch
if first(o for o in after_batch.fs if isinstance(o,Normalize)): return
stats = meta.get('stats')
if stats is None: return
after_batch.add(Normalize.from_stats(*stats))
# Cell
@delegates(create_cnn_model)
def cnn_learner(dls, arch, normalize=True, n_out=None, pretrained=True, config=None,
# learner args
loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=None, cbs=None, metrics=None, path=None,
model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95),
# other model args
**kwargs):
"Build a convnet style learner from `dls` and `arch`"
if config:
warnings.warn('config param is deprecated. Pass your args directly to cnn_learner.')
kwargs = {**config, **kwargs}
meta = model_meta.get(arch, _default_meta)
if normalize: _add_norm(dls, meta, pretrained)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
model = create_cnn_model(arch, n_out, pretrained=pretrained, **kwargs)
splitter=ifnone(splitter, meta['split'])
learn = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
metrics=metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn,
moms=moms)
if pretrained: learn.freeze()
# keep track of args for loggers
store_attr('arch,normalize,n_out,pretrained', self=learn, **kwargs)
return learn
# Cell
@delegates(models.unet.DynamicUnet.__init__)
def create_unet_model(arch, n_out, img_size, pretrained=True, cut=None, n_in=3, **kwargs):
"Create custom unet architecture"
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, n_in, pretrained, ifnone(cut, meta['cut']))
model = models.unet.DynamicUnet(body, n_out, img_size, **kwargs)
return model
# Cell
@delegates(create_unet_model)
def unet_learner(dls, arch, normalize=True, n_out=None, pretrained=True, config=None,
# learner args
loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=None, cbs=None, metrics=None, path=None,
model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95),
# other model args
**kwargs):
"Build a unet learner from `dls` and `arch`"
if config:
warnings.warn('config param is deprecated. Pass your args directly to unet_learner.')
kwargs = {**config, **kwargs}
meta = model_meta.get(arch, _default_meta)
if normalize: _add_norm(dls, meta, pretrained)
n_out = ifnone(n_out, get_c(dls))
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
img_size = dls.one_batch()[0].shape[-2:]
assert img_size, "image size could not be inferred from data"
model = create_unet_model(arch, n_out, img_size, pretrained=pretrained, **kwargs)
splitter=ifnone(splitter, meta['split'])
learn = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
metrics=metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn,
moms=moms)
if pretrained: learn.freeze()
# keep track of args for loggers
store_attr('arch,normalize,n_out,pretrained', self=learn)
if kwargs: store_attr(self=learn, **kwargs)
return learn
# Cell
@typedispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
# Cell
@typedispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
for i in range(2):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
# Cell
@typedispatch
def show_results(x:TensorImage, y:(TensorMask, TensorPoint, TensorBBox), samples, outs, ctxs=None, max_n=6,
nrows=None, ncols=1, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True,
title='Target/Prediction')
for i in range(2):
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(2*max_n))]
for o in [samples,outs]:
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(o.itemgot(0),ctxs[1::2],range(2*max_n))]
return ctxs
# Cell
@typedispatch
def show_results(x:TensorImage, y:TensorImage, samples, outs, ctxs=None, max_n=10, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(3*min(len(samples), max_n), ncols=3, figsize=figsize, title='Input/Target/Prediction')
for i in range(2):
ctxs[i::3] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::3],range(max_n))]
ctxs[2::3] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs[2::3],range(max_n))]
return ctxs
# Cell
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
# Cell
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
rows = get_empty_df(len(samples))
outs = L(s[1:] + o + (TitledStr(r), TitledFloat(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
rows = [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
# Cell
from ..basics import *
from .core import *
from .data import *
from .augment import *
from . import models
# Cell
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
# Cell
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
# Cell
def _get_first_layer(m):
"Access first layer of a model"
c,p,n = m,None,None # child, parent, name
for n in next(m.named_parameters())[0].split('.')[:-1]:
p,c=c,getattr(c,n)
return c,p,n
# Cell
def _load_pretrained_weights(new_layer, previous_layer):
"Load pretrained weights based on number of input channels"
n_in = getattr(new_layer, 'in_channels')
if n_in==1:
# we take the sum
new_layer.weight.data = previous_layer.weight.data.sum(dim=1, keepdim=True)
elif n_in==2:
# we take first 2 channels + 50%
new_layer.weight.data = previous_layer.weight.data[:,:2] * 1.5
else:
# keep 3 channels weights and set others to null
new_layer.weight.data[:,:3] = previous_layer.weight.data
new_layer.weight.data[:,3:].zero_()
# Cell
def _update_first_layer(model, n_in, pretrained):
"Change first layer based on number of input channels"
if n_in == 3: return
first_layer, parent, name = _get_first_layer(model)
assert isinstance(first_layer, nn.Conv2d), f'Change of input channels only supported with Conv2d, found {first_layer.__class__.__name__}'
assert getattr(first_layer, 'in_channels') == 3, f'Unexpected number of input channels, found {getattr(first_layer, "in_channels")} while expecting 3'
params = {attr:getattr(first_layer, attr) for attr in 'out_channels kernel_size stride padding dilation groups padding_mode'.split()}
params['bias'] = getattr(first_layer, 'bias') is not None
params['in_channels'] = n_in
new_layer = nn.Conv2d(**params)
if pretrained:
_load_pretrained_weights(new_layer, first_layer)
setattr(parent, name, new_layer)
# Cell
def create_body(arch, n_in=3, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained=pretrained)
_update_first_layer(model, n_in, pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
# Cell
def create_head(nf, n_out, lin_ftrs=None, ps=0.5, concat_pool=True, first_bn=True, bn_final=False,
lin_first=False, y_range=None):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `n_out` classes."
if concat_pool: nf *= 2
lin_ftrs = [nf, 512, n_out] if lin_ftrs is None else [nf] + lin_ftrs + [n_out]
bns = [first_bn] + [True]*len(lin_ftrs[1:])
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
if lin_first: layers.append(nn.Dropout(ps.pop(0)))
for ni,no,bn,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], bns, ps, actns):
layers += LinBnDrop(ni, no, bn=bn, p=p, act=actn, lin_first=lin_first)
if lin_first: layers.append(nn.Linear(lin_ftrs[-2], n_out))
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
if y_range is not None: layers.append(SigmoidRange(*y_range))
return nn.Sequential(*layers)
# Cell
from ..callback.hook import num_features_model
# Cell
def default_split(m):
"Default split of a model between body and head"
return L(m[0], m[1:]).map(params)
# Cell
def _xresnet_split(m): return L(m[0][:3], m[0][3:], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':default_split}
_xresnet_meta = {'cut':-4, 'split':_xresnet_split, 'stats':imagenet_stats}
_resnet_meta = {'cut':-2, 'split':_resnet_split, 'stats':imagenet_stats}
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split, 'stats':imagenet_stats}
_densenet_meta = {'cut':-1, 'split':_densenet_split, 'stats':imagenet_stats}
_vgg_meta = {'cut':-2, 'split':_vgg_split, 'stats':imagenet_stats}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split, 'stats':imagenet_stats}
# Cell
model_meta = {
models.xresnet.xresnet18 :{**_xresnet_meta}, models.xresnet.xresnet34: {**_xresnet_meta},
models.xresnet.xresnet50 :{**_xresnet_meta}, models.xresnet.xresnet101:{**_xresnet_meta},
models.xresnet.xresnet152:{**_xresnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
# Cell
@delegates(create_head)
def create_cnn_model(arch, n_out, pretrained=True, cut=None, n_in=3, init=nn.init.kaiming_normal_, custom_head=None,
concat_pool=True, **kwargs):
"Create custom convnet architecture"
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, n_in, pretrained, ifnone(cut, meta['cut']))
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children()))
head = create_head(nf, n_out, concat_pool=concat_pool, **kwargs)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
# Cell
def _add_norm(dls, meta, pretrained):
if not pretrained: return
after_batch = dls.after_batch
if first(o for o in after_batch.fs if isinstance(o,Normalize)): return
stats = meta.get('stats')
if stats is None: return
after_batch.add(Normalize.from_stats(*stats))
# Cell
@delegates(create_cnn_model)
def cnn_learner(dls, arch, normalize=True, n_out=None, pretrained=True, config=None,
# learner args
loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=None, cbs=None, metrics=None, path=None,
model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95),
# other model args
**kwargs):
"Build a convnet style learner from `dls` and `arch`"
if config:
warnings.warn('config param is deprecated. Pass your args directly to cnn_learner.')
kwargs = {**config, **kwargs}
meta = model_meta.get(arch, _default_meta)
if normalize: _add_norm(dls, meta, pretrained)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
model = create_cnn_model(arch, n_out, pretrained=pretrained, **kwargs)
splitter=ifnone(splitter, meta['split'])
learn = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
metrics=metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn,
moms=moms)
if pretrained: learn.freeze()
# keep track of args for loggers
store_attr('arch,normalize,n_out,pretrained', self=learn, **kwargs)
return learn
# Cell
@delegates(models.unet.DynamicUnet.__init__)
def create_unet_model(arch, n_out, img_size, pretrained=True, cut=None, n_in=3, **kwargs):
"Create custom unet architecture"
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, n_in, pretrained, ifnone(cut, meta['cut']))
model = models.unet.DynamicUnet(body, n_out, img_size, **kwargs)
return model
# Cell
@delegates(create_unet_model)
def unet_learner(dls, arch, normalize=True, n_out=None, pretrained=True, config=None,
# learner args
loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=None, cbs=None, metrics=None, path=None,
model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95),
# other model args
**kwargs):
"Build a unet learner from `dls` and `arch`"
if config:
warnings.warn('config param is deprecated. Pass your args directly to unet_learner.')
kwargs = {**config, **kwargs}
meta = model_meta.get(arch, _default_meta)
if normalize: _add_norm(dls, meta, pretrained)
n_out = ifnone(n_out, get_c(dls))
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
img_size = dls.one_batch()[0].shape[-2:]
assert img_size, "image size could not be inferred from data"
model = create_unet_model(arch, n_out, img_size, pretrained=pretrained, **kwargs)
splitter=ifnone(splitter, meta['split'])
learn = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
metrics=metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn,
moms=moms)
if pretrained: learn.freeze()
# keep track of args for loggers
store_attr('arch,normalize,n_out,pretrained', self=learn, **kwargs)
return learn
# Cell
@typedispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
# Cell
@typedispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
for i in range(2):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
# Cell
@typedispatch
def show_results(x:TensorImage, y:(TensorMask, TensorPoint, TensorBBox), samples, outs, ctxs=None, max_n=6,
nrows=None, ncols=1, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True,
title='Target/Prediction')
for i in range(2):
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(2*max_n))]
for o in [samples,outs]:
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(o.itemgot(0),ctxs[1::2],range(2*max_n))]
return ctxs
# Cell
@typedispatch
def show_results(x:TensorImage, y:TensorImage, samples, outs, ctxs=None, max_n=10, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(3*min(len(samples), max_n), ncols=3, figsize=figsize, title='Input/Target/Prediction')
for i in range(2):
ctxs[i::3] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::3],range(max_n))]
ctxs[2::3] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs[2::3],range(max_n))]
return ctxs
# Cell
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
# Cell
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
rows = get_empty_df(len(samples))
outs = L(s[1:] + o + (TitledStr(r), TitledFloat(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
rows = [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows)) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/learner.py | learner.py |
__all__ = ['RandTransform', 'TensorTypes', 'FlipItem', 'DihedralItem', 'PadMode', 'CropPad', 'RandomCrop',
'OldRandomCrop', 'ResizeMethod', 'Resize', 'RandomResizedCrop', 'RatioResize', 'affine_grid',
'AffineCoordTfm', 'RandomResizedCropGPU', 'mask_tensor', 'affine_mat', 'flip_mat', 'Flip',
'DeterministicDraw', 'DeterministicFlip', 'dihedral_mat', 'Dihedral', 'DeterministicDihedral', 'rotate_mat',
'Rotate', 'zoom_mat', 'Zoom', 'find_coeffs', 'apply_perspective', 'Warp', 'SpaceTfm', 'LightingTfm',
'Brightness', 'Contrast', 'grayscale', 'Saturation', 'rgb2hsv', 'hsv2rgb', 'HSVTfm', 'Hue',
'cutout_gaussian', 'norm_apply_denorm', 'RandomErasing', 'setup_aug_tfms', 'aug_transforms']
# Cell
from ..data.all import *
from .core import *
from .data import *
# Cell
from torch import stack, zeros_like as t0, ones_like as t1
from torch.distributions.bernoulli import Bernoulli
# Cell
class RandTransform(DisplayedTransform):
"A transform that before_call its state at each `__call__`"
do,nm,supports,split_idx = True,None,[],0
def __init__(self, p=1., nm=None, before_call=None, **kwargs):
store_attr('p')
super().__init__(**kwargs)
self.before_call = ifnone(before_call,self.before_call)
def before_call(self, b, split_idx):
"Set `self.do` based on `self.p`"
self.do = self.p==1. or random.random() < self.p
def __call__(self, b, split_idx=None, **kwargs):
self.before_call(b, split_idx=split_idx)
return super().__call__(b, split_idx=split_idx, **kwargs) if self.do else b
# Cell
def _neg_axis(x, axis):
x[...,axis] = -x[...,axis]
return x
TensorTypes = (TensorImage,TensorMask,TensorPoint,TensorBBox)
# Internal Cell
@patch
def flip_lr(x:Image.Image): return x.transpose(Image.FLIP_LEFT_RIGHT)
@patch
def flip_lr(x:TensorImageBase): return x.flip(-1)
@patch
def flip_lr(x:TensorPoint): return TensorPoint(_neg_axis(x.clone(), 0))
@patch
def flip_lr(x:TensorBBox): return TensorBBox(TensorPoint(x.view(-1,2)).flip_lr().view(-1,4))
# Cell
class FlipItem(RandTransform):
"Randomly flip with probability `p`"
def __init__(self, p=0.5): super().__init__(p=p)
def encodes(self, x:(Image.Image,*TensorTypes)): return x.flip_lr()
# Internal Cell
@patch
def dihedral(x:PILImage, k): return x if k==0 else x.transpose(k-1)
@patch
def dihedral(x:TensorImage, k):
if k in [1,3,4,7]: x = x.flip(-1)
if k in [2,4,5,7]: x = x.flip(-2)
if k in [3,5,6,7]: x = x.transpose(-1,-2)
return x
@patch
def dihedral(x:TensorPoint, k):
if k in [1,3,4,7]: x = _neg_axis(x, 0)
if k in [2,4,5,7]: x = _neg_axis(x, 1)
if k in [3,5,6,7]: x = x.flip(1)
return x
@patch
def dihedral(x:TensorBBox, k):
pnts = TensorPoint(x.view(-1,2)).dihedral(k).view(-1,2,2)
tl,br = pnts.min(dim=1)[0],pnts.max(dim=1)[0]
return TensorBBox(torch.cat([tl, br], dim=1), img_size=x.img_size)
# Cell
class DihedralItem(RandTransform):
"Randomly flip with probability `p`"
def before_call(self, b, split_idx):
super().before_call(b, split_idx)
self.k = random.randint(0,7)
def encodes(self, x:(Image.Image,*TensorTypes)): return x.dihedral(self.k)
# Cell
from torchvision.transforms.functional import pad as tvpad
# Cell
mk_class('PadMode', **{o:o.lower() for o in ['Zeros', 'Border', 'Reflection']},
doc="All possible padding mode as attributes to get tab-completion and typo-proofing")
# Cell
#nbdev_comment _all_ = ['PadMode']
# Internal Cell
_pad_modes = {'zeros': 'constant', 'border': 'edge', 'reflection': 'reflect'}
@patch
def _do_crop_pad(x:Image.Image, sz, tl, orig_sz,
pad_mode=PadMode.Zeros, resize_mode=Image.BILINEAR, resize_to=None):
if any(tl.ge(0)):
# At least one dim is inside the image, so needs to be cropped
c = tl.max(0)
x = x.crop((*c, *c.add(sz).min(orig_sz)))
if any(tl.lt(0)):
# At least one dim is outside the image, so needs to be padded
p = (-tl).max(0)
f = (sz-orig_sz-p).max(0)
x = tvpad(x, (*p, *f), padding_mode=_pad_modes[pad_mode])
if resize_to is not None: x = x.resize(resize_to, resize_mode)
return x
@patch
def _do_crop_pad(x:TensorPoint, sz, tl, orig_sz, pad_mode=PadMode.Zeros, resize_to=None, **kwargs):
#assert pad_mode==PadMode.Zeros,"Only zero padding is supported for `TensorPoint` and `TensorBBox`"
orig_sz,sz,tl = map(FloatTensor, (orig_sz,sz,tl))
return TensorPoint((x+1)*orig_sz/sz - tl*2/sz - 1, sz=sz if resize_to is None else resize_to)
@patch
def _do_crop_pad(x:TensorBBox, sz, tl, orig_sz, pad_mode=PadMode.Zeros, resize_to=None, **kwargs):
bbox = TensorPoint._do_crop_pad(x.view(-1,2), sz, tl, orig_sz, pad_mode, resize_to).view(-1,4)
return TensorBBox(bbox, img_size=x.img_size)
@patch
def crop_pad(x:(TensorBBox,TensorPoint,Image.Image),
sz, tl=None, orig_sz=None, pad_mode=PadMode.Zeros, resize_mode=Image.BILINEAR, resize_to=None):
if isinstance(sz,int): sz = (sz,sz)
orig_sz = fastuple(_get_sz(x) if orig_sz is None else orig_sz)
sz,tl = fastuple(sz),fastuple(((_get_sz(x)-sz)//2) if tl is None else tl)
return x._do_crop_pad(sz, tl, orig_sz=orig_sz, pad_mode=pad_mode, resize_mode=resize_mode, resize_to=resize_to)
# Cell
def _process_sz(size):
if isinstance(size,int): size=(size,size)
return fastuple(size[1],size[0])
def _get_sz(x):
if isinstance(x, tuple): x = x[0]
if not isinstance(x, Tensor): return fastuple(x.size)
return fastuple(getattr(x, 'img_size', getattr(x, 'sz', (x.shape[-1], x.shape[-2]))))
# Cell
@delegates()
class CropPad(DisplayedTransform):
"Center crop or pad an image to `size`"
order = 0
def __init__(self, size, pad_mode=PadMode.Zeros, **kwargs):
size = _process_sz(size)
store_attr()
super().__init__(**kwargs)
def encodes(self, x:(Image.Image,TensorBBox,TensorPoint)):
orig_sz = _get_sz(x)
tl = (orig_sz-self.size)//2
return x.crop_pad(self.size, tl, orig_sz=orig_sz, pad_mode=self.pad_mode)
# Cell
@delegates()
class RandomCrop(RandTransform):
"Randomly crop an image to `size`"
split_idx,order = None,1
def __init__(self, size, **kwargs):
size = _process_sz(size)
store_attr()
super().__init__(**kwargs)
def before_call(self, b, split_idx):
self.orig_sz = _get_sz(b)
if split_idx: self.tl = (self.orig_sz-self.size)//2
else:
wd = self.orig_sz[0] - self.size[0]
hd = self.orig_sz[1] - self.size[1]
w_rand = (wd, -1) if wd < 0 else (0, wd)
h_rand = (hd, -1) if hd < 0 else (0, hd)
self.tl = fastuple(random.randint(*w_rand), random.randint(*h_rand))
def encodes(self, x:(Image.Image,TensorBBox,TensorPoint)):
return x.crop_pad(self.size, self.tl, orig_sz=self.orig_sz)
# Cell
class OldRandomCrop(CropPad):
"Randomly crop an image to `size`"
def before_call(self, b, split_idx):
super().before_call(b, split_idx)
w,h = self.orig_sz
if not split_idx: self.tl = (random.randint(0,w-self.cp_size[0]), random.randint(0,h-self.cp_size[1]))
# Cell
mk_class('ResizeMethod', **{o:o.lower() for o in ['Squish', 'Crop', 'Pad']},
doc="All possible resize method as attributes to get tab-completion and typo-proofing")
# Cell
#nbdev_comment _all_ = ['ResizeMethod']
# Cell
@delegates()
class Resize(RandTransform):
split_idx,mode,mode_mask,order = None,Image.BILINEAR,Image.NEAREST,1
"Resize image to `size` using `method`"
def __init__(self, size, method=ResizeMethod.Crop, pad_mode=PadMode.Reflection,
resamples=(Image.BILINEAR, Image.NEAREST), **kwargs):
size = _process_sz(size)
store_attr()
super().__init__(**kwargs)
self.mode,self.mode_mask = resamples
def before_call(self, b, split_idx):
if self.method==ResizeMethod.Squish: return
self.pcts = (0.5,0.5) if split_idx else (random.random(),random.random())
def encodes(self, x:(Image.Image,TensorBBox,TensorPoint)):
orig_sz = _get_sz(x)
if self.method==ResizeMethod.Squish:
return x.crop_pad(orig_sz, fastuple(0,0), orig_sz=orig_sz, pad_mode=self.pad_mode,
resize_mode=self.mode_mask if isinstance(x,PILMask) else self.mode, resize_to=self.size)
w,h = orig_sz
op = (operator.lt,operator.gt)[self.method==ResizeMethod.Pad]
m = w/self.size[0] if op(w/self.size[0],h/self.size[1]) else h/self.size[1]
cp_sz = (int(m*self.size[0]),int(m*self.size[1]))
tl = fastuple(int(self.pcts[0]*(w-cp_sz[0])), int(self.pcts[1]*(h-cp_sz[1])))
return x.crop_pad(cp_sz, tl, orig_sz=orig_sz, pad_mode=self.pad_mode,
resize_mode=self.mode_mask if isinstance(x,PILMask) else self.mode, resize_to=self.size)
# Cell
@delegates()
class RandomResizedCrop(RandTransform):
"Picks a random scaled crop of an image and resize it to `size`"
split_idx,order = None,1
def __init__(self, size, min_scale=0.08, ratio=(3/4, 4/3), resamples=(Image.BILINEAR, Image.NEAREST),
val_xtra=0.14, **kwargs):
size = _process_sz(size)
store_attr()
super().__init__(**kwargs)
self.mode,self.mode_mask = resamples
def before_call(self, b, split_idx):
w,h = self.orig_sz = _get_sz(b)
if split_idx:
xtra = math.ceil(max(*self.size[:2])*self.val_xtra/8)*8
self.final_size = (self.size[0]+xtra, self.size[1]+xtra)
self.tl,self.cp_size = (0,0),self.orig_sz
return
self.final_size = self.size
for attempt in range(10):
area = random.uniform(self.min_scale,1.) * w * h
ratio = math.exp(random.uniform(math.log(self.ratio[0]), math.log(self.ratio[1])))
nw = int(round(math.sqrt(area * ratio)))
nh = int(round(math.sqrt(area / ratio)))
if nw <= w and nh <= h:
self.cp_size = (nw,nh)
self.tl = random.randint(0,w-nw), random.randint(0,h - nh)
return
if w/h < self.ratio[0]: self.cp_size = (w, int(w/self.ratio[0]))
elif w/h > self.ratio[1]: self.cp_size = (int(h*self.ratio[1]), h)
else: self.cp_size = (w, h)
self.tl = ((w-self.cp_size[0])//2, (h-self.cp_size[1])//2)
def encodes(self, x:(Image.Image,TensorBBox,TensorPoint)):
res = x.crop_pad(self.cp_size, self.tl, orig_sz=self.orig_sz,
resize_mode=self.mode_mask if isinstance(x,PILMask) else self.mode, resize_to=self.final_size)
if self.final_size != self.size: res = res.crop_pad(self.size) #Validation set: one final center crop
return res
# Cell
class RatioResize(DisplayedTransform):
'Resizes the biggest dimension of an image to `max_sz` maintaining the aspect ratio'
order = 1
def __init__(self, max_sz, resamples=(Image.BILINEAR, Image.NEAREST), **kwargs):
store_attr()
super().__init__(**kwargs)
def encodes(self, x:(Image.Image,TensorBBox,TensorPoint)):
w,h = _get_sz(x)
if w >= h: nw,nh = self.max_sz,h*self.max_sz/w
else: nw,nh = w*self.max_sz/h,self.max_sz
return Resize(size=(int(nh),int(nw)), resamples=self.resamples)(x)
# Cell
def _init_mat(x):
mat = torch.eye(3, device=x.device).float()
return mat.unsqueeze(0).expand(x.size(0), 3, 3).contiguous()
# Cell
def _grid_sample(x, coords, mode='bilinear', padding_mode='reflection', align_corners=None):
"Resample pixels in `coords` from `x` by `mode`, with `padding_mode` in ('reflection','border','zeros')."
#coords = coords.permute(0, 3, 1, 2).contiguous().permute(0, 2, 3, 1) # optimize layout for grid_sample
if mode=='bilinear': # hack to get smoother downwards resampling
mn,mx = coords.min(),coords.max()
# max amount we're affine zooming by (>1 means zooming in)
z = 1/(mx-mn).item()*2
# amount we're resizing by, with 100% extra margin
d = min(x.shape[-2]/coords.shape[-2], x.shape[-1]/coords.shape[-1])/2
# If we're resizing up by >200%, and we're zooming less than that, interpolate first
if d>1 and d>z:
# Pytorch > v1.4.x needs an extra argument when calling nn.functional.interpolate to preserve previous behaviour
if (int(torch.__version__[0:4].replace(".", "")) > 14):
x = F.interpolate(x, scale_factor=1/d, mode='area', recompute_scale_factor=True)
else:
x = F.interpolate(x, scale_factor=1/d, mode='area')
return F.grid_sample(x, coords, mode=mode, padding_mode=padding_mode, align_corners=align_corners)
# Cell
def affine_grid(theta, size, align_corners=None):
return TensorFlowField(F.affine_grid(theta, size, align_corners=align_corners))
# Internal Cell
@patch
def affine_coord(x: TensorImage, mat=None, coord_tfm=None, sz=None, mode='bilinear',
pad_mode=PadMode.Reflection, align_corners=True):
if mat is None and coord_tfm is None and sz is None: return x
size = tuple(x.shape[-2:]) if sz is None else (sz,sz) if isinstance(sz,int) else tuple(sz)
if mat is None: mat = _init_mat(x)[:,:2]
coords = affine_grid(mat, x.shape[:2] + size, align_corners=align_corners)
if coord_tfm is not None: coords = coord_tfm(coords)
return TensorImage(_grid_sample(x, coords, mode=mode, padding_mode=pad_mode, align_corners=align_corners))
@patch
def affine_coord(x: TensorMask, mat=None, coord_tfm=None, sz=None, mode='nearest',
pad_mode=PadMode.Reflection, align_corners=True):
add_dim = (x.ndim==3)
if add_dim: x = x[:,None]
res = TensorImage.affine_coord(x.float(), mat, coord_tfm, sz, mode, pad_mode, align_corners).long()
if add_dim: res = res[:,0]
return TensorMask(res)
@patch
def affine_coord(x: TensorPoint, mat=None, coord_tfm=None, sz=None, mode='nearest',
pad_mode=PadMode.Zeros, align_corners=True):
#assert pad_mode==PadMode.Zeros, "Only zero padding is supported for `TensorPoint` and `TensorBBox`"
if sz is None: sz = getattr(x, "img_size", None)
if coord_tfm is not None: x = coord_tfm(x, invert=True)
if mat is not None:
mat = TensorPoint(mat)
x = (x - mat[:,:,2].unsqueeze(1)) @ torch.inverse(mat[:,:,:2].transpose(1,2))
return TensorPoint(x, sz=sz)
@patch
def affine_coord(x: TensorBBox, mat=None, coord_tfm=None, sz=None, mode='nearest',
pad_mode=PadMode.Zeros, align_corners=True):
if mat is None and coord_tfm is None: return x
if sz is None: sz = getattr(x, "img_size", None)
bs,n = x.shape[:2]
pnts = stack([x[...,:2], stack([x[...,0],x[...,3]],dim=2),
stack([x[...,2],x[...,1]],dim=2), x[...,2:]], dim=2)
pnts = TensorPoint(pnts.view(bs, 4*n, 2), img_size=sz).affine_coord(mat, coord_tfm, sz, mode, pad_mode)
pnts = pnts.view(bs, n, 4, 2)
tl,dr = pnts.min(dim=2)[0],pnts.max(dim=2)[0]
return TensorBBox(torch.cat([tl, dr], dim=2), img_size=sz)
# Cell
def _prepare_mat(x, mat):
h,w = getattr(x, 'img_size', x.shape[-2:])
mat[:,0,1] *= h/w
mat[:,1,0] *= w/h
return mat[:,:2]
# Cell
class AffineCoordTfm(RandTransform):
"Combine and apply affine and coord transforms"
order,split_idx = 30,None
def __init__(self, aff_fs=None, coord_fs=None, size=None, mode='bilinear', pad_mode=PadMode.Reflection,
mode_mask='nearest', align_corners=None, **kwargs):
store_attr(but=['aff_fs','coord_fs'])
super().__init__(**kwargs)
self.aff_fs,self.coord_fs = L(aff_fs),L(coord_fs)
self.cp_size = None if size is None else (size,size) if isinstance(size, int) else tuple(size)
def before_call(self, b, split_idx):
while isinstance(b, tuple): b = b[0]
self.split_idx = split_idx
self.do,self.mat = True,self._get_affine_mat(b)
for t in self.coord_fs: t.before_call(b)
def compose(self, tfm):
"Compose `self` with another `AffineCoordTfm` to only do the interpolation step once"
# TODO: keep `name` up to date with the combination
# TODO: have option to only show a subset of the attrs, e.g. for `Flip`
self.aff_fs += tfm.aff_fs
self.coord_fs += tfm.coord_fs
def _get_affine_mat(self, x):
aff_m = _init_mat(x)
if self.split_idx: return _prepare_mat(x, aff_m)
ms = [f(x) for f in self.aff_fs]
ms = [m for m in ms if m is not None]
for m in ms: aff_m = aff_m @ m
return _prepare_mat(x, aff_m)
def _encode(self, x, mode, reverse=False):
coord_func = None if len(self.coord_fs)==0 or self.split_idx else partial(compose_tfms, tfms=self.coord_fs, reverse=reverse)
return x.affine_coord(self.mat, coord_func, sz=self.size, mode=mode, pad_mode=self.pad_mode, align_corners=self.align_corners)
def encodes(self, x:TensorImage): return self._encode(x, self.mode)
def encodes(self, x:TensorMask): return self._encode(x, self.mode_mask)
def encodes(self, x:(TensorPoint, TensorBBox)): return self._encode(x, self.mode, reverse=True)
# Cell
class RandomResizedCropGPU(RandTransform):
"Picks a random scaled crop of an image and resize it to `size`"
split_idx,order = None,30
def __init__(self, size, min_scale=0.08, ratio=(3/4, 4/3), mode='bilinear', valid_scale=1., **kwargs):
if isinstance(size, int): size = (size,size)
store_attr()
super().__init__(**kwargs)
def before_call(self, b, split_idx):
self.do = True
h,w = fastuple((b[0] if isinstance(b, tuple) else b).shape[-2:])
for attempt in range(10):
if split_idx: break
area = random.uniform(self.min_scale,1.) * w * h
ratio = math.exp(random.uniform(math.log(self.ratio[0]), math.log(self.ratio[1])))
nw = int(round(math.sqrt(area * ratio)))
nh = int(round(math.sqrt(area / ratio)))
if nw <= w and nh <= h:
self.cp_size = (nh,nw)
self.tl = random.randint(0,h - nh),random.randint(0,w-nw)
return
if w/h < self.ratio[0]: self.cp_size = (int(w/self.ratio[0]), w)
elif w/h > self.ratio[1]: self.cp_size = (h, int(h*self.ratio[1]))
else: self.cp_size = (h, w)
if split_idx: self.cp_size = (int(self.cp_size[0]*self.valid_scale), int(self.cp_size[1]*self.valid_scale))
self.tl = ((h-self.cp_size[0])//2,(w-self.cp_size[1])//2)
def encodes(self, x:TensorImage):
x = x[...,self.tl[0]:self.tl[0]+self.cp_size[0], self.tl[1]:self.tl[1]+self.cp_size[1]]
return TensorImage(x).affine_coord(sz=self.size, mode=self.mode)
# Cell
def mask_tensor(x, p=0.5, neutral=0., batch=False):
"Mask elements of `x` with `neutral` with probability `1-p`"
if p==1.: return x
if batch: return x if random.random() < p else x.new_zeros(*x.size()) + neutral
if neutral != 0: x.add_(-neutral)
mask = x.new_empty(*x.size()).bernoulli_(p)
x.mul_(mask)
return x.add_(neutral) if neutral != 0 else x
# Cell
def _draw_mask(x, def_draw, draw=None, p=0.5, neutral=0., batch=False):
"Creates mask_tensor based on `x` with `neutral` with probability `1-p`. "
if draw is None: draw=def_draw
if callable(draw): res=draw(x)
elif is_listy(draw):
assert len(draw)>=x.size(0)
res = tensor(draw[:x.size(0)], dtype=x.dtype, device=x.device)
else: res = x.new_zeros(x.size(0)) + draw
return TensorBase(mask_tensor(res, p=p, neutral=neutral, batch=batch))
# Cell
def affine_mat(*ms):
"Restructure length-6 vector `ms` into an affine matrix with 0,0,1 in the last line"
return stack([stack([ms[0], ms[1], ms[2]], dim=1),
stack([ms[3], ms[4], ms[5]], dim=1),
stack([t0(ms[0]), t0(ms[0]), t1(ms[0])], dim=1)], dim=1)
# Cell
def flip_mat(x, p=0.5, draw=None, batch=False):
"Return a random flip matrix"
def _def_draw(x): return x.new_ones(x.size(0))
mask = x.new_ones(x.size(0)) - 2*_draw_mask(x, _def_draw, draw=draw, p=p, batch=batch)
return affine_mat(mask, t0(mask), t0(mask),
t0(mask), t1(mask), t0(mask))
# Cell
def _get_default(x, mode=None, pad_mode=None):
if mode is None: mode='bilinear' if isinstance(x, TensorMask) else 'bilinear'
if pad_mode is None: pad_mode=PadMode.Zeros if isinstance(x, (TensorPoint, TensorBBox)) else PadMode.Reflection
x0 = x[0] if isinstance(x, tuple) else x
return x0,mode,pad_mode
# Internal Cell
@patch
def flip_batch(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), p=0.5, draw=None, size=None,
mode=None, pad_mode=None, align_corners=True, batch=False):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
mat=flip_mat(x0, p=p, draw=draw, batch=batch)
return x.affine_coord(mat=mat[:,:2], sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
class Flip(AffineCoordTfm):
"Randomly flip a batch of images with a probability `p`"
def __init__(self, p=0.5, draw=None, size=None, mode='bilinear', pad_mode=PadMode.Reflection, align_corners=True, batch=False):
aff_fs = partial(flip_mat, p=p, draw=draw, batch=batch)
super().__init__(aff_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners, p=p)
# Cell
class DeterministicDraw():
def __init__(self, vals): self.vals,self.count = vals,-1
def __call__(self, x):
self.count += 1
return x.new_zeros(x.size(0)) + self.vals[self.count%len(self.vals)]
# Cell
class DeterministicFlip(Flip):
"Flip the batch every other call"
def __init__(self, size=None, mode='bilinear', pad_mode=PadMode.Reflection, align_corners=True, **kwargs):
super().__init__(p=1., draw=DeterministicDraw([0,1]), mode=mode, pad_mode=pad_mode, align_corners=align_corners, **kwargs)
# Cell
def dihedral_mat(x, p=0.5, draw=None, batch=False):
"Return a random dihedral matrix"
def _def_draw(x): return torch.randint(0,8, (x.size(0),), device=x.device)
def _def_draw_b(x): return random.randint(0,7) + x.new_zeros((x.size(0),)).long()
idx = _draw_mask(x, _def_draw_b if batch else _def_draw, draw=draw, p=p, batch=batch).long()
xs = tensor([1,-1,1,-1,-1,1,1,-1], device=x.device).gather(0, idx)
ys = tensor([1,1,-1,1,-1,-1,1,-1], device=x.device).gather(0, idx)
m0 = tensor([1,1,1,0,1,0,0,0], device=x.device).gather(0, idx)
m1 = tensor([0,0,0,1,0,1,1,1], device=x.device).gather(0, idx)
return affine_mat(xs*m0, xs*m1, t0(xs),
ys*m1, ys*m0, t0(xs)).float()
# Internal Cell
@patch
def dihedral_batch(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), p=0.5, draw=None, size=None,
mode=None, pad_mode=None, batch=False, align_corners=True):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
mat = _prepare_mat(x, dihedral_mat(x0, p=p, draw=draw, batch=batch))
return x.affine_coord(mat=mat, sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
class Dihedral(AffineCoordTfm):
"Apply a random dihedral transformation to a batch of images with a probability `p`"
def __init__(self, p=0.5, draw=None, size=None, mode='bilinear', pad_mode=PadMode.Reflection, align_corners=None, batch=False):
f = partial(dihedral_mat, p=p, draw=draw, batch=batch)
super().__init__(aff_fs=f, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
class DeterministicDihedral(Dihedral):
def __init__(self, size=None, mode='bilinear', pad_mode=PadMode.Reflection, align_corners=None):
"Flip the batch every other call"
super().__init__(p=1., draw=DeterministicDraw(list(range(8))), pad_mode=pad_mode, align_corners=align_corners)
# Cell
def rotate_mat(x, max_deg=10, p=0.5, draw=None, batch=False):
"Return a random rotation matrix with `max_deg` and `p`"
def _def_draw(x): return x.new_empty(x.size(0)).uniform_(-max_deg, max_deg)
def _def_draw_b(x): return x.new_zeros(x.size(0)) + random.uniform(-max_deg, max_deg)
thetas = _draw_mask(x, _def_draw_b if batch else _def_draw, draw=draw, p=p, batch=batch) * math.pi/180
return affine_mat(thetas.cos(), thetas.sin(), t0(thetas),
-thetas.sin(), thetas.cos(), t0(thetas))
# Internal Cell
@patch
@delegates(rotate_mat)
def rotate(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), size=None, mode=None, pad_mode=None, align_corners=True, **kwargs):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
mat = _prepare_mat(x, rotate_mat(x0, **kwargs))
return x.affine_coord(mat=mat, sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
class Rotate(AffineCoordTfm):
"Apply a random rotation of at most `max_deg` with probability `p` to a batch of images"
def __init__(self, max_deg=10, p=0.5, draw=None, size=None, mode='bilinear', pad_mode=PadMode.Reflection,
align_corners=True, batch=False):
aff_fs = partial(rotate_mat, max_deg=max_deg, p=p, draw=draw, batch=batch)
super().__init__(aff_fs=aff_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
def zoom_mat(x, min_zoom=1., max_zoom=1.1, p=0.5, draw=None, draw_x=None, draw_y=None, batch=False):
"Return a random zoom matrix with `max_zoom` and `p`"
def _def_draw(x): return x.new_empty(x.size(0)).uniform_(min_zoom, max_zoom)
def _def_draw_b(x): return x.new_zeros(x.size(0)) + random.uniform(min_zoom, max_zoom)
def _def_draw_ctr(x): return x.new_empty(x.size(0)).uniform_(0,1)
def _def_draw_ctr_b(x): return x.new_zeros(x.size(0)) + random.uniform(0,1)
assert(min_zoom<=max_zoom)
s = 1/_draw_mask(x, _def_draw_b if batch else _def_draw, draw=draw, p=p, neutral=1., batch=batch)
def_draw_c = _def_draw_ctr_b if batch else _def_draw_ctr
col_pct = _draw_mask(x, def_draw_c, draw=draw_x, p=1., batch=batch)
row_pct = _draw_mask(x, def_draw_c, draw=draw_y, p=1., batch=batch)
col_c = (1-s) * (2*col_pct - 1)
row_c = (1-s) * (2*row_pct - 1)
return affine_mat(s, t0(s), col_c,
t0(s), s, row_c)
# Internal Cell
@patch
@delegates(zoom_mat)
def zoom(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), size=None, mode='bilinear', pad_mode=PadMode.Reflection,
align_corners=True, **kwargs):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
return x.affine_coord(mat=zoom_mat(x0, **kwargs)[:,:2], sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
class Zoom(AffineCoordTfm):
"Apply a random zoom of at most `max_zoom` with probability `p` to a batch of images"
def __init__(self,min_zoom=1., max_zoom=1.1, p=0.5, draw=None, draw_x=None, draw_y=None, size=None, mode='bilinear',
pad_mode=PadMode.Reflection, batch=False, align_corners=True):
aff_fs = partial(zoom_mat, min_zoom=min_zoom, max_zoom=max_zoom, p=p, draw=draw, draw_x=draw_x, draw_y=draw_y, batch=batch)
super().__init__(aff_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
def find_coeffs(p1, p2):
"Find coefficients for warp tfm from `p1` to `p2`"
m = []
p = p1[:,0,0]
#The equations we'll need to solve.
for i in range(p1.shape[1]):
m.append(stack([p2[:,i,0], p2[:,i,1], t1(p), t0(p), t0(p), t0(p), -p1[:,i,0]*p2[:,i,0], -p1[:,i,0]*p2[:,i,1]]))
m.append(stack([t0(p), t0(p), t0(p), p2[:,i,0], p2[:,i,1], t1(p), -p1[:,i,1]*p2[:,i,0], -p1[:,i,1]*p2[:,i,1]]))
#The 8 scalars we seek are solution of AX = B
A = stack(m).permute(2, 0, 1)
B = p1.view(p1.shape[0], 8, 1)
return torch.solve(B,A)[0]
# Cell
def apply_perspective(coords, coeffs):
"Apply perspective tranfom on `coords` with `coeffs`"
sz = coords.shape
coords = coords.view(sz[0], -1, 2)
coeffs = torch.cat([coeffs, t1(coeffs[:,:1])], dim=1).view(coeffs.shape[0], 3,3)
coords1 = coords @ coeffs[...,:2].transpose(1,2) + coeffs[...,2].unsqueeze(1)
if (coords1[...,2]==0.).any(): return coords[...,:2].view(*sz)
coords = coords1/coords1[...,2].unsqueeze(-1)
return coords[...,:2].view(*sz)
# Cell
class _WarpCoord():
def __init__(self, magnitude=0.2, p=0.5, draw_x=None, draw_y=None, batch=False):
store_attr()
self.coeffs = None
def _def_draw(self, x):
if not self.batch: return x.new_empty(x.size(0)).uniform_(-self.magnitude, self.magnitude)
return x.new_zeros(x.size(0)) + random.uniform(-self.magnitude, self.magnitude)
def before_call(self, x):
x_t = _draw_mask(x, self._def_draw, self.draw_x, p=self.p, batch=self.batch)
y_t = _draw_mask(x, self._def_draw, self.draw_y, p=self.p, batch=self.batch)
orig_pts = torch.tensor([[-1,-1], [-1,1], [1,-1], [1,1]], dtype=x.dtype, device=x.device)
self.orig_pts = orig_pts.unsqueeze(0).expand(x.size(0),4,2)
targ_pts = stack([stack([-1-y_t, -1-x_t]), stack([-1+y_t, 1+x_t]),
stack([ 1+y_t, -1+x_t]), stack([ 1-y_t, 1-x_t])])
self.targ_pts = targ_pts.permute(2,0,1)
def __call__(self, x, invert=False):
coeffs = find_coeffs(self.targ_pts, self.orig_pts) if invert else find_coeffs(self.orig_pts, self.targ_pts)
return apply_perspective(x, coeffs)
# Internal Cell
@patch
@delegates(_WarpCoord.__init__)
def warp(x:(TensorImage,TensorMask,TensorPoint,TensorBBox), size=None, mode='bilinear',
pad_mode=PadMode.Reflection, align_corners=True, **kwargs):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
coord_tfm = _WarpCoord(**kwargs)
coord_tfm.before_call(x0)
return x.affine_coord(coord_tfm=coord_tfm, sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
class Warp(AffineCoordTfm):
"Apply perspective warping with `magnitude` and `p` on a batch of matrices"
def __init__(self, magnitude=0.2, p=0.5, draw_x=None, draw_y=None,size=None, mode='bilinear',
pad_mode=PadMode.Reflection, batch=False, align_corners=True):
store_attr()
coord_fs = _WarpCoord(magnitude=magnitude, p=p, draw_x=draw_x, draw_y=draw_y, batch=batch)
super().__init__(coord_fs=coord_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners )
# Cell
@patch
def lighting(x: TensorImage, func): return TensorImage(torch.sigmoid(func(logit(x))))
# Cell
class SpaceTfm(RandTransform):
"Apply `fs` to the logits"
order = 40
def __init__(self, fs, space_fn, **kwargs):
super().__init__(**kwargs)
self.space_fn=space_fn
self.fs=L(fs)
def before_call(self, b, split_idx):
self.do = True
while isinstance(b, tuple): b = b[0]
for t in self.fs: t.before_call(b)
def compose(self, tfm):
"Compose `self` with another `LightingTransform`"
self.fs += tfm.fs
def encodes(self,x:TensorImage): return self.space_fn(x,partial(compose_tfms, tfms=self.fs))
# Cell
class LightingTfm(SpaceTfm):
"Apply `fs` to the logits"
order = 40
def __init__(self, fs, **kwargs):
super().__init__(fs, TensorImage.lighting, **kwargs)
# Cell
class _BrightnessLogit():
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: return x.new_empty(x.size(0)).uniform_(0.5*(1-self.max_lighting), 0.5*(1+self.max_lighting))
return x.new_zeros(x.size(0)) + random.uniform(0.5*(1-self.max_lighting), 0.5*(1+self.max_lighting))
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=0.5, batch=self.batch)
def __call__(self, x): return x.add_(logit(self.change[:,None,None,None]))
# Internal Cell
@patch
@delegates(_BrightnessLogit.__init__)
def brightness(x: TensorImage, **kwargs):
func = _BrightnessLogit(**kwargs)
func.before_call(x)
return x.lighting(func)
# Cell
class Brightness(LightingTfm):
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False):
"Apply change in brightness of `max_lighting` to batch of images with probability `p`."
store_attr()
super().__init__(_BrightnessLogit(max_lighting, p, draw, batch))
# Cell
class _ContrastLogit():
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: res = x.new_empty(x.size(0)).uniform_(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
else: res = x.new_zeros(x.size(0)) + random.uniform(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
return torch.exp(res)
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=1., batch=self.batch)
def __call__(self, x): return x.mul_(self.change[:,None,None,None])
# Internal Cell
@patch
@delegates(_ContrastLogit.__init__)
def contrast(x: TensorImage, **kwargs):
func = _ContrastLogit(**kwargs)
func.before_call(x)
return x.lighting(func)
# Cell
class Contrast(LightingTfm):
"Apply change in contrast of `max_lighting` to batch of images with probability `p`."
def __init__(self,max_lighting=0.2, p=0.75, draw=None, batch=False):
store_attr()
super().__init__(_ContrastLogit(max_lighting, p, draw, batch))
# Cell
def grayscale(x):
"Tensor to grayscale tensor. Uses the ITU-R 601-2 luma transform. "
return (x*torch.tensor([0.2989,0.5870,0.1140],device=x.device)[...,None,None]).sum(1)[:,None]
# Cell
class _SaturationLogit():
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: res = x.new_empty(x.size(0)).uniform_(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
else: res = x.new_zeros(x.size(0)) + random.uniform(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
return torch.exp(res)
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=1., batch=self.batch)
def __call__(self, x):
#interpolate between grayscale and original in-place
gs = grayscale(x)
gs.mul_(1-self.change[:,None,None,None])
x.mul_(self.change[:,None,None,None])
return x.add_(gs)
# Internal Cell
@patch
@delegates(_SaturationLogit.__init__)
def saturation(x: TensorImage, **kwargs):
func = _SaturationLogit(**kwargs)
func.before_call(x)
return x.lighting(func)
# Cell
class Saturation(LightingTfm):
"Apply change in saturation of `max_lighting` to batch of images with probability `p`."
# Ref: https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.functional.adjust_saturation
def __init__(self,max_lighting=0.2, p=0.75, draw=None, batch=False):
store_attr()
super().__init__(_SaturationLogit(max_lighting, p, draw, batch))
# Cell
def rgb2hsv(img):
"Converts a RGB image to an HSV image. Note: Will not work on logit space images."
r, g, b = img.unbind(1)
# temp commented out due to https://github.com/pytorch/pytorch/issues/47069
# maxc = torch.max(img, dim=1).values
# minc = torch.min(img, dim=1).values
maxc = torch.max(img, dim=1)[0]
minc = torch.min(img, dim=1)[0]
eqc = maxc == minc
cr = maxc - minc
s = cr / torch.where(eqc, maxc.new_ones(()), maxc)
cr_divisor = torch.where(eqc, maxc.new_ones(()), cr)
rc = (maxc - r) / cr_divisor
gc = (maxc - g) / cr_divisor
bc = (maxc - b) / cr_divisor
hr = (maxc == r) * (bc - gc)
hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)
hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)
h = (hr + hg + hb)
h = torch.fmod((h / 6.0 + 1.0), 1.0)
return torch.stack((h, s, maxc),dim=1)
# Cell
def hsv2rgb(img):
"Converts a HSV image to an RGB image."
h, s, v = img.unbind(1)
i = torch.floor(h * 6.0)
f = (h * 6.0) - i
i = i.to(dtype=torch.int32)
p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)
q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)
t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)
i = i % 6
mask = i[:,None] == torch.arange(6,device=i.device)[:, None, None][None]
a1 = torch.stack((v, q, p, p, t, v),dim=1)
a2 = torch.stack((t, v, v, q, p, p),dim=1)
a3 = torch.stack((p, p, t, v, v, q),dim=1)
a4 = torch.stack((a1, a2, a3),dim=1)
return torch.einsum("nijk, nxijk -> nxjk", mask.to(dtype=img.dtype), a4)
# Internal Cell
@patch
def hsv(x: TensorImage, func): return TensorImage(hsv2rgb(func(rgb2hsv(x))))
# Cell
class HSVTfm(SpaceTfm):
"Apply `fs` to the images in HSV space"
def __init__(self, fs, **kwargs):
super().__init__(fs, TensorImage.hsv, **kwargs)
# Cell
class _Hue():
def __init__(self, max_hue=0.1, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: res = x.new_empty(x.size(0)).uniform_(math.log(1-self.max_hue), -math.log(1-self.max_hue))
else: res = x.new_zeros(x.size(0)) + random.uniform(math.log(1-self.max_hue), -math.log(1-self.max_hue))
return torch.exp(res)
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=0., batch=self.batch)
def __call__(self, x):
h,s,v = x.unbind(1)
h += self.change[:,None,None]
h = h % 1.0
return x.set_(torch.stack((h, s, v),dim=1))
# Internal Cell
@patch
@delegates(_Hue.__init__)
def hue(x: TensorImage, **kwargs):
func = _Hue(**kwargs)
func.before_call(x)
return TensorImage(x.hsv(func))
# Cell
class Hue(HSVTfm):
"Apply change in hue of `max_hue` to batch of images with probability `p`."
# Ref: https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.functional.adjust_hue
def __init__(self,max_hue=0.1, p=0.75, draw=None, batch=False):
super().__init__(_Hue(max_hue, p, draw, batch))
# Cell
def cutout_gaussian(x, areas):
"Replace all `areas` in `x` with N(0,1) noise"
chan,img_h,img_w = x.shape[-3:]
for rl,rh,cl,ch in areas: x[..., rl:rh, cl:ch].normal_()
return x
# Cell
def norm_apply_denorm(x, f, nrm):
"Normalize `x` with `nrm`, then apply `f`, then denormalize"
y = f(nrm(x.clone()))
return nrm.decode(y).clamp(0,1)
# Cell
def _slice(area, sz):
bound = int(round(math.sqrt(area)))
loc = random.randint(0, max(sz-bound, 0))
return loc,loc+bound
# Cell
class RandomErasing(RandTransform):
"Randomly selects a rectangle region in an image and randomizes its pixels."
order = 100 # After Normalize
def __init__(self, p=0.5, sl=0., sh=0.3, min_aspect=0.3, max_count=1):
store_attr()
super().__init__(p=p)
self.log_ratio = (math.log(min_aspect), math.log(1/min_aspect))
def _bounds(self, area, img_h, img_w):
r_area = random.uniform(self.sl,self.sh) * area
aspect = math.exp(random.uniform(*self.log_ratio))
return _slice(r_area*aspect, img_h) + _slice(r_area/aspect, img_w)
def encodes(self,x:TensorImage):
count = random.randint(1, self.max_count)
_,img_h,img_w = x.shape[-3:]
area = img_h*img_w/count
areas = [self._bounds(area, img_h, img_w) for _ in range(count)]
return cutout_gaussian(x, areas)
# Cell
def _compose_same_tfms(tfms):
tfms = L(tfms)
if len(tfms) == 0: return None
res = tfms[0]
for tfm in tfms[1:]: res.compose(tfm)
return res
# Cell
def setup_aug_tfms(tfms):
"Go through `tfms` and combines together affine/coord or lighting transforms"
aff_tfms = [tfm for tfm in tfms if isinstance(tfm, AffineCoordTfm)]
lig_tfms = [tfm for tfm in tfms if isinstance(tfm, LightingTfm)]
others = [tfm for tfm in tfms if tfm not in aff_tfms+lig_tfms]
lig_tfm = _compose_same_tfms(lig_tfms)
aff_tfm = _compose_same_tfms(aff_tfms)
res = [aff_tfm] if aff_tfm is not None else []
if lig_tfm is not None: res.append(lig_tfm)
return res + others
# Cell
def aug_transforms(mult=1.0, do_flip=True, flip_vert=False, max_rotate=10., min_zoom=1., max_zoom=1.1,
max_lighting=0.2, max_warp=0.2, p_affine=0.75, p_lighting=0.75, xtra_tfms=None, size=None,
mode='bilinear', pad_mode=PadMode.Reflection, align_corners=True, batch=False, min_scale=1.):
"Utility func to easily create a list of flip, rotate, zoom, warp, lighting transforms."
res,tkw = [],dict(size=size if min_scale==1. else None, mode=mode, pad_mode=pad_mode, batch=batch, align_corners=align_corners)
max_rotate,max_lighting,max_warp = array([max_rotate,max_lighting,max_warp])*mult
if do_flip: res.append(Dihedral(p=0.5, **tkw) if flip_vert else Flip(p=0.5, **tkw))
if max_warp: res.append(Warp(magnitude=max_warp, p=p_affine, **tkw))
if max_rotate: res.append(Rotate(max_deg=max_rotate, p=p_affine, **tkw))
if min_zoom<1 or max_zoom>1: res.append(Zoom(min_zoom=min_zoom, max_zoom=max_zoom, p=p_affine, **tkw))
if max_lighting:
res.append(Brightness(max_lighting=max_lighting, p=p_lighting, batch=batch))
res.append(Contrast(max_lighting=max_lighting, p=p_lighting, batch=batch))
if min_scale!=1.: xtra_tfms = RandomResizedCropGPU(size, min_scale=min_scale, ratio=(1,1)) + L(xtra_tfms)
return setup_aug_tfms(res + L(xtra_tfms)) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/augment.py | augment.py |
__all__ = ['Image', 'ToTensor', 'imagenet_stats', 'cifar_stats', 'mnist_stats', 'n_px', 'shape', 'aspect', 'to_image',
'load_image', 'image2tensor', 'PILBase', 'PILImage', 'PILImageBW', 'PILMask', 'OpenMask', 'AddMaskCodes',
'TensorPoint', 'TensorPointCreate', 'get_annotations', 'TensorBBox', 'LabeledBBox', 'encodes', 'encodes',
'PointScaler', 'BBoxLabeler', 'decodes', 'encodes', 'decodes']
# Cell
from ..torch_basics import *
from ..data.all import *
from PIL import Image
# Cell
#nbdev_comment _all_ = ['Image','ToTensor']
# Cell
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
cifar_stats = ([0.491, 0.482, 0.447], [0.247, 0.243, 0.261])
mnist_stats = ([0.131], [0.308])
# Cell
if not hasattr(Image,'_patched'):
_old_sz = Image.Image.size.fget
@patch(as_prop=True)
def size(x:Image.Image): return fastuple(_old_sz(x))
Image._patched = True
# Cell
@patch(as_prop=True)
def n_px(x: Image.Image): return x.size[0] * x.size[1]
# Cell
@patch(as_prop=True)
def shape(x: Image.Image): return x.size[1],x.size[0]
# Cell
@patch(as_prop=True)
def aspect(x: Image.Image): return x.size[0]/x.size[1]
# Cell
@patch
def reshape(x: Image.Image, h, w, resample=0):
"`resize` `x` to `(w,h)`"
return x.resize((w,h), resample=resample)
# Cell
@patch
def to_bytes_format(im:Image.Image, format='png'):
"Convert to bytes, default to PNG format"
arr = io.BytesIO()
im.save(arr, format=format)
return arr.getvalue()
# Cell
@patch
def to_thumb(self:Image.Image, h, w=None):
"Same as `thumbnail`, but uses a copy"
if w is None: w=h
im = self.copy()
im.thumbnail((w,h))
return im
# Cell
@patch
def resize_max(x: Image.Image, resample=0, max_px=None, max_h=None, max_w=None):
"`resize` `x` to `max_px`, or `max_h`, or `max_w`"
h,w = x.shape
if max_px and x.n_px>max_px: h,w = fastuple(h,w).mul(math.sqrt(max_px/x.n_px))
if max_h and h>max_h: h,w = (max_h ,max_h*w/h)
if max_w and w>max_w: h,w = (max_w*h/w,max_w )
return x.reshape(round(h), round(w), resample=resample)
# Cell
def to_image(x):
"Convert a tensor or array to a PIL int8 Image"
if isinstance(x,Image.Image): return x
if isinstance(x,Tensor): x = to_np(x.permute((1,2,0)))
if x.dtype==np.float32: x = (x*255).astype(np.uint8)
return Image.fromarray(x, mode=['RGB','CMYK'][x.shape[0]==4])
# Cell
def load_image(fn, mode=None):
"Open and load a `PIL.Image` and convert to `mode`"
im = Image.open(fn)
im.load()
im = im._new(im.im)
return im.convert(mode) if mode else im
# Cell
def image2tensor(img):
"Transform image to byte tensor in `c*h*w` dim order."
res = tensor(img)
if res.dim()==2: res = res.unsqueeze(-1)
return res.permute(2,0,1)
# Cell
class PILBase(Image.Image, metaclass=BypassNewMeta):
_bypass_type=Image.Image
_show_args = {'cmap':'viridis'}
_open_args = {'mode': 'RGB'}
@classmethod
def create(cls, fn:(Path,str,Tensor,ndarray,bytes), **kwargs)->None:
"Open an `Image` from path `fn`"
if isinstance(fn,TensorImage): fn = fn.permute(1,2,0).type(torch.uint8)
if isinstance(fn, TensorMask): fn = fn.type(torch.uint8)
if isinstance(fn,Tensor): fn = fn.numpy()
if isinstance(fn,ndarray): return cls(Image.fromarray(fn))
if isinstance(fn,bytes): fn = io.BytesIO(fn)
return cls(load_image(fn, **merge(cls._open_args, kwargs)))
def show(self, ctx=None, **kwargs):
"Show image using `merge(self._show_args, kwargs)`"
return show_image(self, ctx=ctx, **merge(self._show_args, kwargs))
def __repr__(self): return f'{self.__class__.__name__} mode={self.mode} size={"x".join([str(d) for d in self.size])}'
# Cell
class PILImage(PILBase): pass
# Cell
class PILImageBW(PILImage): _show_args,_open_args = {'cmap':'Greys'},{'mode': 'L'}
# Cell
class PILMask(PILBase): _open_args,_show_args = {'mode':'L'},{'alpha':0.5, 'cmap':'tab20'}
# Cell
OpenMask = Transform(PILMask.create)
OpenMask.loss_func = CrossEntropyLossFlat(axis=1)
PILMask.create = OpenMask
# Cell
class AddMaskCodes(Transform):
"Add the code metadata to a `TensorMask`"
def __init__(self, codes=None):
self.codes = codes
if codes is not None: self.vocab,self.c = codes,len(codes)
def decodes(self, o:TensorMask):
if self.codes is not None: o.codes=self.codes
return o
# Cell
class TensorPoint(TensorBase):
"Basic type for points in an image"
_show_args = dict(s=10, marker='.', c='r')
@classmethod
def create(cls, t, img_size=None)->None:
"Convert an array or a list of points `t` to a `Tensor`"
return cls(tensor(t).view(-1, 2).float(), img_size=img_size)
def show(self, ctx=None, **kwargs):
if 'figsize' in kwargs: del kwargs['figsize']
x = self.view(-1,2)
ctx.scatter(x[:, 0], x[:, 1], **{**self._show_args, **kwargs})
return ctx
# Cell
TensorPointCreate = Transform(TensorPoint.create)
TensorPointCreate.loss_func = MSELossFlat()
TensorPoint.create = TensorPointCreate
# Cell
def get_annotations(fname, prefix=None):
"Open a COCO style json in `fname` and returns the lists of filenames (with maybe `prefix`) and labelled bboxes."
annot_dict = json.load(open(fname))
id2images, id2bboxes, id2cats = {}, collections.defaultdict(list), collections.defaultdict(list)
classes = {o['id']:o['name'] for o in annot_dict['categories']}
for o in annot_dict['annotations']:
bb = o['bbox']
id2bboxes[o['image_id']].append([bb[0],bb[1], bb[0]+bb[2], bb[1]+bb[3]])
id2cats[o['image_id']].append(classes[o['category_id']])
id2images = {o['id']:ifnone(prefix, '') + o['file_name'] for o in annot_dict['images'] if o['id'] in id2bboxes}
ids = list(id2images.keys())
return [id2images[k] for k in ids], [(id2bboxes[k], id2cats[k]) for k in ids]
# Cell
from matplotlib import patches, patheffects
# Cell
def _draw_outline(o, lw):
o.set_path_effects([patheffects.Stroke(linewidth=lw, foreground='black'), patheffects.Normal()])
def _draw_rect(ax, b, color='white', text=None, text_size=14, hw=True, rev=False):
lx,ly,w,h = b
if rev: lx,ly,w,h = ly,lx,h,w
if not hw: w,h = w-lx,h-ly
patch = ax.add_patch(patches.Rectangle((lx,ly), w, h, fill=False, edgecolor=color, lw=2))
_draw_outline(patch, 4)
if text is not None:
patch = ax.text(lx,ly, text, verticalalignment='top', color=color, fontsize=text_size, weight='bold')
_draw_outline(patch,1)
# Cell
class TensorBBox(TensorPoint):
"Basic type for a tensor of bounding boxes in an image"
@classmethod
def create(cls, x, img_size=None)->None: return cls(tensor(x).view(-1, 4).float(), img_size=img_size)
def show(self, ctx=None, **kwargs):
x = self.view(-1,4)
for b in x: _draw_rect(ctx, b, hw=False, **kwargs)
return ctx
# Cell
class LabeledBBox(L):
"Basic type for a list of bounding boxes in an image"
def show(self, ctx=None, **kwargs):
for b,l in zip(self.bbox, self.lbl):
if l != '#na#': ctx = retain_type(b, self.bbox).show(ctx=ctx, text=l)
return ctx
bbox,lbl = add_props(lambda i,self: self[i])
# Cell
PILImage ._tensor_cls = TensorImage
PILImageBW._tensor_cls = TensorImageBW
PILMask ._tensor_cls = TensorMask
# Cell
@ToTensor
def encodes(self, o:PILBase): return o._tensor_cls(image2tensor(o))
@ToTensor
def encodes(self, o:PILMask): return o._tensor_cls(image2tensor(o)[0])
# Cell
def _scale_pnts(y, sz, do_scale=True, y_first=False):
if y_first: y = y.flip(1)
res = y * 2/tensor(sz).float() - 1 if do_scale else y
return TensorPoint(res, img_size=sz)
def _unscale_pnts(y, sz): return TensorPoint((y+1) * tensor(sz).float()/2, img_size=sz)
# Cell
class PointScaler(Transform):
"Scale a tensor representing points"
order = 1
def __init__(self, do_scale=True, y_first=False): self.do_scale,self.y_first = do_scale,y_first
def _grab_sz(self, x):
self.sz = [x.shape[-1], x.shape[-2]] if isinstance(x, Tensor) else x.size
return x
def _get_sz(self, x): return getattr(x, 'img_size') if self.sz is None else self.sz
def setups(self, dl):
res = first(dl.do_item(0), risinstance(TensorPoint))
if res is not None: self.c = res.numel()
def encodes(self, x:(PILBase,TensorImageBase)): return self._grab_sz(x)
def decodes(self, x:(PILBase,TensorImageBase)): return self._grab_sz(x)
def encodes(self, x:TensorPoint): return _scale_pnts(x, self._get_sz(x), self.do_scale, self.y_first)
def decodes(self, x:TensorPoint): return _unscale_pnts(x.view(-1, 2), self._get_sz(x))
# Cell
class BBoxLabeler(Transform):
def setups(self, dl): self.vocab = dl.vocab
def decode (self, x, **kwargs):
self.bbox,self.lbls = None,None
return self._call('decodes', x, **kwargs)
def decodes(self, x:TensorMultiCategory):
self.lbls = [self.vocab[a] for a in x]
return x if self.bbox is None else LabeledBBox(self.bbox, self.lbls)
def decodes(self, x:TensorBBox):
self.bbox = x
return self.bbox if self.lbls is None else LabeledBBox(self.bbox, self.lbls)
# Cell
#LabeledBBox can be sent in a tl with MultiCategorize (depending on the order of the tls) but it is already decoded.
@MultiCategorize
def decodes(self, x:LabeledBBox): return x
# Cell
@PointScaler
def encodes(self, x:TensorBBox):
pnts = self.encodes(cast(x.view(-1,2), TensorPoint))
return cast(pnts.view(-1, 4), TensorBBox)
@PointScaler
def decodes(self, x:TensorBBox):
pnts = self.decodes(cast(x.view(-1,2), TensorPoint))
return cast(pnts.view(-1, 4), TensorBBox) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/core.py | core.py |
__all__ = ['download_images', 'resize_to', 'verify_image', 'verify_images', 'resize_image', 'resize_images']
# Cell
from ..torch_basics import *
from ..data.all import *
from .core import *
from pathlib import Path
# Cell
def _get_downloaded_image_filename(dest, name, suffix):
start_index = 1
candidate_name = name
while (dest/f"{candidate_name}{suffix}").is_file():
candidate_name = f"{candidate_name}{start_index}"
start_index += 1
return candidate_name
# Cell
def _download_image_inner(dest, inp, timeout=4, preserve_filename=False):
i,url = inp
url_path = Path(url)
suffix = url_path.suffix if url_path.suffix else '.jpg'
name = _get_downloaded_image_filename(dest, url_path.stem, suffix) if preserve_filename else f"{i:08d}"
try: download_url(url, dest/f"{name}{suffix}", overwrite=True, show_progress=False, timeout=timeout)
except Exception as e: f"Couldn't download {url}."
# Cell
def download_images(dest, url_file=None, urls=None, max_pics=1000, n_workers=8, timeout=4, preserve_filename=False):
"Download images listed in text file `url_file` to path `dest`, at most `max_pics`"
if urls is None: urls = url_file.read_text().strip().split("\n")[:max_pics]
dest = Path(dest)
dest.mkdir(exist_ok=True)
parallel(partial(_download_image_inner, dest, timeout=timeout, preserve_filename=preserve_filename),
list(enumerate(urls)), n_workers=n_workers)
# Cell
def resize_to(img, targ_sz, use_min=False):
"Size to resize to, to hit `targ_sz` at same aspect ratio, in PIL coords (i.e w*h)"
w,h = img.size
min_sz = (min if use_min else max)(w,h)
ratio = targ_sz/min_sz
return int(w*ratio),int(h*ratio)
# Cell
def verify_image(fn):
"Confirm that `fn` can be opened"
try:
im = Image.open(fn)
im.draft(im.mode, (32,32))
im.load()
return True
except: return False
# Cell
def verify_images(fns):
"Find images in `fns` that can't be opened"
return L(fns[i] for i,o in enumerate(parallel(verify_image, fns)) if not o)
# Cell
def resize_image(file, dest, max_size=None, n_channels=3, ext=None,
img_format=None, resample=Image.BILINEAR, resume=False, **kwargs ):
"Resize file to dest to max_size"
dest = Path(dest)
dest_fname = dest/file.name
if resume and dest_fname.exists(): return
if verify_image(file):
img = Image.open(file)
imgarr = np.array(img)
img_channels = 1 if len(imgarr.shape) == 2 else imgarr.shape[2]
if (max_size is not None and (img.height > max_size or img.width > max_size)) or img_channels != n_channels:
if ext is not None: dest_fname=dest_fname.with_suffix(ext)
if max_size is not None:
new_sz = resize_to(img, max_size)
img = img.resize(new_sz, resample=resample)
if n_channels == 3: img = img.convert("RGB")
img.save(dest_fname, img_format, **kwargs)
# Cell
def resize_images(path, max_workers=defaults.cpus, max_size=None, recurse=False,
dest=Path('.'), n_channels=3, ext=None, img_format=None, resample=Image.BILINEAR,
resume=None, **kwargs):
"Resize files on path recursively to dest to max_size"
path = Path(path)
if resume is None and dest != Path('.'): resume=False
os.makedirs(dest, exist_ok=True)
files = get_image_files(path, recurse=recurse)
parallel(resize_image, files, max_workers=max_workers, max_size=max_size, dest=dest, n_channels=n_channels, ext=ext,
img_format=img_format, resample=resample, resume=resume, **kwargs) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/utils.py | utils.py |
__all__ = ['HBox', 'VBox', 'widgets', 'Button', 'Checkbox', 'Dropdown', 'Layout', 'Box', 'Output', 'Label',
'FileUpload', 'widget', 'carousel', 'ImagesCleaner', 'ImageClassifierCleaner']
# Cell
from ..torch_basics import *
from ..data.all import *
from .core import *
from ipywidgets import HBox,VBox,widgets,Button,Checkbox,Dropdown,Layout,Box,Output,Label,FileUpload
# Cell
#nbdev_comment _all_ = ['HBox','VBox','widgets','Button','Checkbox','Dropdown','Layout','Box','Output','Label','FileUpload']
# Cell
@patch
def __getitem__(self:Box, i): return self.children[i]
# Cell
def widget(im, *args, **layout):
"Convert anything that can be `display`ed by IPython into a widget"
o = Output(layout=merge(*args, layout))
with o: display(im)
return o
# Cell
def _update_children(change):
for o in change['owner'].children:
if not o.layout.flex: o.layout.flex = '0 0 auto'
# Cell
def carousel(children=(), **layout):
"A horizontally scrolling carousel"
def_layout = dict(overflow='scroll hidden', flex_flow='row', display='flex')
res = Box([], layout=merge(def_layout, layout))
res.observe(_update_children, names='children')
res.children = children
return res
# Cell
def _open_thumb(fn, h, w): return Image.open(fn).to_thumb(h, w).convert('RGBA')
# Cell
class ImagesCleaner:
"A widget that displays all images in `fns` along with a `Dropdown`"
def __init__(self, opts=(), height=128, width=256, max_n=30):
opts = ('<Keep>', '<Delete>')+tuple(opts)
store_attr('opts,height,width,max_n')
self.widget = carousel(width='100%')
def set_fns(self, fns):
self.fns = L(fns)[:self.max_n]
ims = parallel(_open_thumb, self.fns, h=self.height, w=self.width, progress=False,
n_workers=min(len(self.fns)//10,defaults.cpus))
self.widget.children = [VBox([widget(im, height=f'{self.height}px'), Dropdown(
options=self.opts, layout={'width': 'max-content'})]) for im in ims]
def _ipython_display_(self): display(self.widget)
def values(self): return L(self.widget.children).itemgot(1).attrgot('value')
def delete(self): return self.values().argwhere(eq('<Delete>'))
def change(self):
idxs = self.values().argwhere(not_(in_(['<Delete>','<Keep>'])))
return idxs.zipwith(self.values()[idxs])
# Cell
def _get_iw_info(learn, ds_idx=0):
dl = learn.dls[ds_idx].new(shuffle=False, drop_last=False)
inp,probs,targs,preds,losses = learn.get_preds(dl=dl, with_input=True, with_loss=True, with_decoded=True)
inp,targs = L(zip(*dl.decode_batch((inp,targs), max_n=9999)))
return L([dl.dataset.items,targs,losses]).zip()
# Cell
@delegates(ImagesCleaner)
class ImageClassifierCleaner(GetAttr):
"A widget that provides an `ImagesCleaner` with a CNN `Learner`"
def __init__(self, learn, **kwargs):
vocab = learn.dls.vocab
self.default = self.iw = ImagesCleaner(vocab, **kwargs)
self.dd_cats = Dropdown(options=vocab)
self.dd_ds = Dropdown(options=('Train','Valid'))
self.iwis = _get_iw_info(learn,0),_get_iw_info(learn,1)
self.dd_ds.observe(self.on_change_ds, 'value')
self.dd_cats.observe(self.on_change_ds, 'value')
self.on_change_ds()
self.widget = VBox([self.dd_cats, self.dd_ds, self.iw.widget])
def _ipython_display_(self): display(self.widget)
def on_change_ds(self, change=None):
info = L(o for o in self.iwis[self.dd_ds.index] if o[1]==self.dd_cats.value)
self.iw.set_fns(info.sorted(2, reverse=True).itemgot(0)) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/widgets.py | widgets.py |
__all__ = ['GANModule', 'basic_critic', 'AddChannels', 'basic_generator', 'DenseResBlock', 'gan_critic', 'GANLoss',
'AdaptiveLoss', 'accuracy_thresh_expand', 'set_freeze_model', 'GANTrainer', 'FixedGANSwitcher',
'AdaptiveGANSwitcher', 'GANDiscriminativeLR', 'InvisibleTensor', 'generate_noise', 'gan_loss_from_func',
'GANLearner']
# Cell
from ..basics import *
from .all import *
# Cell
class GANModule(Module):
"Wrapper around a `generator` and a `critic` to create a GAN."
def __init__(self, generator=None, critic=None, gen_mode=False):
if generator is not None: self.generator=generator
if critic is not None: self.critic =critic
store_attr('gen_mode')
def forward(self, *args):
return self.generator(*args) if self.gen_mode else self.critic(*args)
def switch(self, gen_mode=None):
"Put the module in generator mode if `gen_mode`, in critic mode otherwise."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
# Cell
@delegates(ConvLayer.__init__)
def basic_critic(in_size, n_channels, n_features=64, n_extra_layers=0, norm_type=NormType.Batch, **kwargs):
"A basic critic for images `n_channels` x `in_size` x `in_size`."
layers = [ConvLayer(n_channels, n_features, 4, 2, 1, norm_type=None, **kwargs)]
cur_size, cur_ftrs = in_size//2, n_features
layers += [ConvLayer(cur_ftrs, cur_ftrs, 3, 1, norm_type=norm_type, **kwargs) for _ in range(n_extra_layers)]
while cur_size > 4:
layers.append(ConvLayer(cur_ftrs, cur_ftrs*2, 4, 2, 1, norm_type=norm_type, **kwargs))
cur_ftrs *= 2 ; cur_size //= 2
init = kwargs.get('init', nn.init.kaiming_normal_)
layers += [init_default(nn.Conv2d(cur_ftrs, 1, 4, padding=0), init), Flatten()]
return nn.Sequential(*layers)
# Cell
class AddChannels(Module):
"Add `n_dim` channels at the end of the input."
def __init__(self, n_dim): self.n_dim=n_dim
def forward(self, x): return x.view(*(list(x.shape)+[1]*self.n_dim))
# Cell
@delegates(ConvLayer.__init__)
def basic_generator(out_size, n_channels, in_sz=100, n_features=64, n_extra_layers=0, **kwargs):
"A basic generator from `in_sz` to images `n_channels` x `out_size` x `out_size`."
cur_size, cur_ftrs = 4, n_features//2
while cur_size < out_size: cur_size *= 2; cur_ftrs *= 2
layers = [AddChannels(2), ConvLayer(in_sz, cur_ftrs, 4, 1, transpose=True, **kwargs)]
cur_size = 4
while cur_size < out_size // 2:
layers.append(ConvLayer(cur_ftrs, cur_ftrs//2, 4, 2, 1, transpose=True, **kwargs))
cur_ftrs //= 2; cur_size *= 2
layers += [ConvLayer(cur_ftrs, cur_ftrs, 3, 1, 1, transpose=True, **kwargs) for _ in range(n_extra_layers)]
layers += [nn.ConvTranspose2d(cur_ftrs, n_channels, 4, 2, 1, bias=False), nn.Tanh()]
return nn.Sequential(*layers)
# Cell
_conv_args = dict(act_cls = partial(nn.LeakyReLU, negative_slope=0.2), norm_type=NormType.Spectral)
def _conv(ni, nf, ks=3, stride=1, self_attention=False, **kwargs):
if self_attention: kwargs['xtra'] = SelfAttention(nf)
return ConvLayer(ni, nf, ks=ks, stride=stride, **_conv_args, **kwargs)
# Cell
@delegates(ConvLayer)
def DenseResBlock(nf, norm_type=NormType.Batch, **kwargs):
"Resnet block of `nf` features. `conv_kwargs` are passed to `conv_layer`."
return SequentialEx(ConvLayer(nf, nf, norm_type=norm_type, **kwargs),
ConvLayer(nf, nf, norm_type=norm_type, **kwargs),
MergeLayer(dense=True))
# Cell
def gan_critic(n_channels=3, nf=128, n_blocks=3, p=0.15):
"Critic to train a `GAN`."
layers = [
_conv(n_channels, nf, ks=4, stride=2),
nn.Dropout2d(p/2),
DenseResBlock(nf, **_conv_args)]
nf *= 2 # after dense block
for i in range(n_blocks):
layers += [
nn.Dropout2d(p),
_conv(nf, nf*2, ks=4, stride=2, self_attention=(i==0))]
nf *= 2
layers += [
ConvLayer(nf, 1, ks=4, bias=False, padding=0, norm_type=NormType.Spectral, act_cls=None),
Flatten()]
return nn.Sequential(*layers)
# Cell
class GANLoss(GANModule):
"Wrapper around `crit_loss_func` and `gen_loss_func`"
def __init__(self, gen_loss_func, crit_loss_func, gan_model):
super().__init__()
store_attr('gen_loss_func,crit_loss_func,gan_model')
def generator(self, output, target):
"Evaluate the `output` with the critic then uses `self.gen_loss_func`"
fake_pred = self.gan_model.critic(output)
self.gen_loss = self.gen_loss_func(fake_pred, output, target)
return self.gen_loss
def critic(self, real_pred, input):
"Create some `fake_pred` with the generator from `input` and compare them to `real_pred` in `self.crit_loss_func`."
fake = self.gan_model.generator(input).requires_grad_(False)
fake_pred = self.gan_model.critic(fake)
self.crit_loss = self.crit_loss_func(real_pred, fake_pred)
return self.crit_loss
# Cell
class AdaptiveLoss(Module):
"Expand the `target` to match the `output` size before applying `crit`."
def __init__(self, crit): self.crit = crit
def forward(self, output, target):
return self.crit(output, target[:,None].expand_as(output).float())
# Cell
def accuracy_thresh_expand(y_pred, y_true, thresh=0.5, sigmoid=True):
"Compute accuracy after expanding `y_true` to the size of `y_pred`."
if sigmoid: y_pred = y_pred.sigmoid()
return ((y_pred>thresh).byte()==y_true[:,None].expand_as(y_pred).byte()).float().mean()
# Cell
def set_freeze_model(m, rg):
for p in m.parameters(): p.requires_grad_(rg)
# Cell
class GANTrainer(Callback):
"Handles GAN Training."
run_after = TrainEvalCallback
def __init__(self, switch_eval=False, clip=None, beta=0.98, gen_first=False, show_img=True):
store_attr('switch_eval,clip,gen_first,show_img')
self.gen_loss,self.crit_loss = AvgSmoothLoss(beta=beta),AvgSmoothLoss(beta=beta)
def _set_trainable(self):
train_model = self.generator if self.gen_mode else self.critic
loss_model = self.generator if not self.gen_mode else self.critic
set_freeze_model(train_model, True)
set_freeze_model(loss_model, False)
if self.switch_eval:
train_model.train()
loss_model.eval()
def before_fit(self):
"Initialize smootheners."
self.generator,self.critic = self.model.generator,self.model.critic
self.gen_mode = self.gen_first
self.switch(self.gen_mode)
self.crit_losses,self.gen_losses = [],[]
self.gen_loss.reset() ; self.crit_loss.reset()
#self.recorder.no_val=True
#self.recorder.add_metric_names(['gen_loss', 'disc_loss'])
#self.imgs,self.titles = [],[]
def before_validate(self):
"Switch in generator mode for showing results."
self.switch(gen_mode=True)
def before_batch(self):
"Clamp the weights with `self.clip` if it's not None, set the correct input/target."
if self.training and self.clip is not None:
for p in self.critic.parameters(): p.data.clamp_(-self.clip, self.clip)
if not self.gen_mode:
(self.learn.xb,self.learn.yb) = (self.yb,self.xb)
def after_batch(self):
"Record `last_loss` in the proper list."
if not self.training: return
if self.gen_mode:
self.gen_loss.accumulate(self.learn)
self.gen_losses.append(self.gen_loss.value)
self.last_gen = self.learn.to_detach(self.pred)
else:
self.crit_loss.accumulate(self.learn)
self.crit_losses.append(self.crit_loss.value)
def before_epoch(self):
"Put the critic or the generator back to eval if necessary."
self.switch(self.gen_mode)
#def after_epoch(self):
# "Show a sample image."
# if not hasattr(self, 'last_gen') or not self.show_img: return
# data = self.learn.data
# img = self.last_gen[0]
# norm = getattr(data,'norm',False)
# if norm and norm.keywords.get('do_y',False): img = data.denorm(img)
# img = data.train_ds.y.reconstruct(img)
# self.imgs.append(img)
# self.titles.append(f'Epoch {epoch}')
# pbar.show_imgs(self.imgs, self.titles)
# return add_metrics(last_metrics, [getattr(self.smoothenerG,'smooth',None),getattr(self.smoothenerC,'smooth',None)])
def switch(self, gen_mode=None):
"Switch the model and loss function, if `gen_mode` is provided, in the desired mode."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
self._set_trainable()
self.model.switch(gen_mode)
self.loss_func.switch(gen_mode)
# Cell
class FixedGANSwitcher(Callback):
"Switcher to do `n_crit` iterations of the critic then `n_gen` iterations of the generator."
run_after = GANTrainer
def __init__(self, n_crit=1, n_gen=1): store_attr('n_crit,n_gen')
def before_train(self): self.n_c,self.n_g = 0,0
def after_batch(self):
"Switch the model if necessary."
if not self.training: return
if self.learn.gan_trainer.gen_mode:
self.n_g += 1
n_iter,n_in,n_out = self.n_gen,self.n_c,self.n_g
else:
self.n_c += 1
n_iter,n_in,n_out = self.n_crit,self.n_g,self.n_c
target = n_iter if isinstance(n_iter, int) else n_iter(n_in)
if target == n_out:
self.learn.gan_trainer.switch()
self.n_c,self.n_g = 0,0
# Cell
class AdaptiveGANSwitcher(Callback):
"Switcher that goes back to generator/critic when the loss goes below `gen_thresh`/`crit_thresh`."
run_after = GANTrainer
def __init__(self, gen_thresh=None, critic_thresh=None):
store_attr('gen_thresh,critic_thresh')
def after_batch(self):
"Switch the model if necessary."
if not self.training: return
if self.gan_trainer.gen_mode:
if self.gen_thresh is None or self.loss < self.gen_thresh: self.gan_trainer.switch()
else:
if self.critic_thresh is None or self.loss < self.critic_thresh: self.gan_trainer.switch()
# Cell
class GANDiscriminativeLR(Callback):
"`Callback` that handles multiplying the learning rate by `mult_lr` for the critic."
run_after = GANTrainer
def __init__(self, mult_lr=5.): self.mult_lr = mult_lr
def before_batch(self):
"Multiply the current lr if necessary."
if not self.learn.gan_trainer.gen_mode and self.training:
self.learn.opt.set_hyper('lr', self.learn.opt.hypers[0]['lr']*self.mult_lr)
def after_batch(self):
"Put the LR back to its value if necessary."
if not self.learn.gan_trainer.gen_mode: self.learn.opt.set_hyper('lr', self.learn.opt.hypers[0]['lr']/self.mult_lr)
# Cell
class InvisibleTensor(TensorBase):
def show(self, ctx=None, **kwargs): return ctx
# Cell
def generate_noise(fn, size=100): return cast(torch.randn(size), InvisibleTensor)
# Cell
@typedispatch
def show_batch(x:InvisibleTensor, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
# Cell
@typedispatch
def show_results(x:InvisibleTensor, y:TensorImage, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs,range(max_n))]
return ctxs
# Cell
def gan_loss_from_func(loss_gen, loss_crit, weights_gen=None):
"Define loss functions for a GAN from `loss_gen` and `loss_crit`."
def _loss_G(fake_pred, output, target, weights_gen=weights_gen):
ones = fake_pred.new_ones(fake_pred.shape[0])
weights_gen = ifnone(weights_gen, (1.,1.))
return weights_gen[0] * loss_crit(fake_pred, ones) + weights_gen[1] * loss_gen(output, target)
def _loss_C(real_pred, fake_pred):
ones = real_pred.new_ones (real_pred.shape[0])
zeros = fake_pred.new_zeros(fake_pred.shape[0])
return (loss_crit(real_pred, ones) + loss_crit(fake_pred, zeros)) / 2
return _loss_G, _loss_C
# Cell
def _tk_mean(fake_pred, output, target): return fake_pred.mean()
def _tk_diff(real_pred, fake_pred): return real_pred.mean() - fake_pred.mean()
# Cell
@delegates()
class GANLearner(Learner):
"A `Learner` suitable for GANs."
def __init__(self, dls, generator, critic, gen_loss_func, crit_loss_func, switcher=None, gen_first=False,
switch_eval=True, show_img=True, clip=None, cbs=None, metrics=None, **kwargs):
gan = GANModule(generator, critic)
loss_func = GANLoss(gen_loss_func, crit_loss_func, gan)
if switcher is None: switcher = FixedGANSwitcher(n_crit=5, n_gen=1)
trainer = GANTrainer(clip=clip, switch_eval=switch_eval, gen_first=gen_first, show_img=show_img)
cbs = L(cbs) + L(trainer, switcher)
metrics = L(metrics) + L(*LossMetrics('gen_loss,crit_loss'))
super().__init__(dls, gan, loss_func=loss_func, cbs=cbs, metrics=metrics, **kwargs)
@classmethod
def from_learners(cls, gen_learn, crit_learn, switcher=None, weights_gen=None, **kwargs):
"Create a GAN from `learn_gen` and `learn_crit`."
losses = gan_loss_from_func(gen_learn.loss_func, crit_learn.loss_func, weights_gen=weights_gen)
return cls(gen_learn.dls, gen_learn.model, crit_learn.model, *losses, switcher=switcher, **kwargs)
@classmethod
def wgan(cls, dls, generator, critic, switcher=None, clip=0.01, switch_eval=False, **kwargs):
"Create a WGAN from `data`, `generator` and `critic`."
return cls(dls, generator, critic, _tk_mean, _tk_diff, switcher=switcher, clip=clip, switch_eval=switch_eval, **kwargs)
GANLearner.from_learners = delegates(to=GANLearner.__init__)(GANLearner.from_learners)
GANLearner.wgan = delegates(to=GANLearner.__init__)(GANLearner.wgan) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/gan.py | gan.py |
__all__ = ['UnetBlock', 'ResizeToOrig', 'DynamicUnet']
# Cell
from ...torch_basics import *
from ...callback.hook import *
# Cell
def _get_sz_change_idxs(sizes):
"Get the indexes of the layers where the size of the activation changes."
feature_szs = [size[-1] for size in sizes]
sz_chg_idxs = list(np.where(np.array(feature_szs[:-1]) != np.array(feature_szs[1:]))[0])
return sz_chg_idxs
# Cell
class UnetBlock(Module):
"A quasi-UNet block, using `PixelShuffle_ICNR upsampling`."
@delegates(ConvLayer.__init__)
def __init__(self, up_in_c, x_in_c, hook, final_div=True, blur=False, act_cls=defaults.activation,
self_attention=False, init=nn.init.kaiming_normal_, norm_type=None, **kwargs):
self.hook = hook
self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, act_cls=act_cls, norm_type=norm_type)
self.bn = BatchNorm(x_in_c)
ni = up_in_c//2 + x_in_c
nf = ni if final_div else ni//2
self.conv1 = ConvLayer(ni, nf, act_cls=act_cls, norm_type=norm_type, **kwargs)
self.conv2 = ConvLayer(nf, nf, act_cls=act_cls, norm_type=norm_type,
xtra=SelfAttention(nf) if self_attention else None, **kwargs)
self.relu = act_cls()
apply_init(nn.Sequential(self.conv1, self.conv2), init)
def forward(self, up_in):
s = self.hook.stored
up_out = self.shuf(up_in)
ssh = s.shape[-2:]
if ssh != up_out.shape[-2:]:
up_out = F.interpolate(up_out, s.shape[-2:], mode='nearest')
cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1))
return self.conv2(self.conv1(cat_x))
# Cell
class ResizeToOrig(Module):
"Merge a shortcut with the result of the module by adding them or concatenating them if `dense=True`."
def __init__(self, mode='nearest'): self.mode = mode
def forward(self, x):
if x.orig.shape[-2:] != x.shape[-2:]:
x = F.interpolate(x, x.orig.shape[-2:], mode=self.mode)
return x
# Cell
class DynamicUnet(SequentialEx):
"Create a U-Net from a given architecture."
def __init__(self, encoder, n_out, img_size, blur=False, blur_final=True, self_attention=False,
y_range=None, last_cross=True, bottle=False, act_cls=defaults.activation,
init=nn.init.kaiming_normal_, norm_type=None, **kwargs):
imsize = img_size
sizes = model_sizes(encoder, size=imsize)
sz_chg_idxs = list(reversed(_get_sz_change_idxs(sizes)))
self.sfs = hook_outputs([encoder[i] for i in sz_chg_idxs], detach=False)
x = dummy_eval(encoder, imsize).detach()
ni = sizes[-1][1]
middle_conv = nn.Sequential(ConvLayer(ni, ni*2, act_cls=act_cls, norm_type=norm_type, **kwargs),
ConvLayer(ni*2, ni, act_cls=act_cls, norm_type=norm_type, **kwargs)).eval()
x = middle_conv(x)
layers = [encoder, BatchNorm(ni), nn.ReLU(), middle_conv]
for i,idx in enumerate(sz_chg_idxs):
not_final = i!=len(sz_chg_idxs)-1
up_in_c, x_in_c = int(x.shape[1]), int(sizes[idx][1])
do_blur = blur and (not_final or blur_final)
sa = self_attention and (i==len(sz_chg_idxs)-3)
unet_block = UnetBlock(up_in_c, x_in_c, self.sfs[i], final_div=not_final, blur=do_blur, self_attention=sa,
act_cls=act_cls, init=init, norm_type=norm_type, **kwargs).eval()
layers.append(unet_block)
x = unet_block(x)
ni = x.shape[1]
if imsize != sizes[0][-2:]: layers.append(PixelShuffle_ICNR(ni, act_cls=act_cls, norm_type=norm_type))
layers.append(ResizeToOrig())
if last_cross:
layers.append(MergeLayer(dense=True))
ni += in_channels(encoder)
layers.append(ResBlock(1, ni, ni//2 if bottle else ni, act_cls=act_cls, norm_type=norm_type, **kwargs))
layers += [ConvLayer(ni, n_out, ks=1, act_cls=None, norm_type=norm_type, **kwargs)]
apply_init(nn.Sequential(layers[3], layers[-2]), init)
#apply_init(nn.Sequential(layers[2]), init)
if y_range is not None: layers.append(SigmoidRange(*y_range))
super().__init__(*layers)
def __del__(self):
if hasattr(self, "sfs"): self.sfs.remove() | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/models/unet.py | unet.py |
__all__ = ['init_cnn', 'XResNet', 'xresnet18', 'xresnet34', 'xresnet50', 'xresnet101', 'xresnet152', 'xresnet18_deep',
'xresnet34_deep', 'xresnet50_deep', 'xresnet18_deeper', 'xresnet34_deeper', 'xresnet50_deeper', 'se_kwargs1',
'se_kwargs2', 'se_kwargs3', 'g0', 'g1', 'g2', 'g3', 'xse_resnet18', 'xse_resnext18', 'xresnext18',
'xse_resnet34', 'xse_resnext34', 'xresnext34', 'xse_resnet50', 'xse_resnext50', 'xresnext50',
'xse_resnet101', 'xse_resnext101', 'xresnext101', 'xse_resnet152', 'xsenet154', 'xse_resnext18_deep',
'xse_resnext34_deep', 'xse_resnext50_deep', 'xse_resnext18_deeper', 'xse_resnext34_deeper',
'xse_resnext50_deeper']
# Cell
from ...torch_basics import *
from torchvision.models.utils import load_state_dict_from_url
# Cell
def init_cnn(m):
if getattr(m, 'bias', None) is not None: nn.init.constant_(m.bias, 0)
if isinstance(m, (nn.Conv1d,nn.Conv2d,nn.Conv3d,nn.Linear)): nn.init.kaiming_normal_(m.weight)
for l in m.children(): init_cnn(l)
# Cell
class XResNet(nn.Sequential):
@delegates(ResBlock)
def __init__(self, block, expansion, layers, p=0.0, c_in=3, n_out=1000, stem_szs=(32,32,64),
widen=1.0, sa=False, act_cls=defaults.activation, ndim=2, ks=3, stride=2, **kwargs):
store_attr('block,expansion,act_cls,ndim,ks')
if ks % 2 == 0: raise Exception('kernel size has to be odd!')
stem_szs = [c_in, *stem_szs]
stem = [ConvLayer(stem_szs[i], stem_szs[i+1], ks=ks, stride=stride if i==0 else 1,
act_cls=act_cls, ndim=ndim)
for i in range(3)]
block_szs = [int(o*widen) for o in [64,128,256,512] +[256]*(len(layers)-4)]
block_szs = [64//expansion] + block_szs
blocks = self._make_blocks(layers, block_szs, sa, stride, **kwargs)
super().__init__(
*stem, MaxPool(ks=ks, stride=stride, padding=ks//2, ndim=ndim),
*blocks,
AdaptiveAvgPool(sz=1, ndim=ndim), Flatten(), nn.Dropout(p),
nn.Linear(block_szs[-1]*expansion, n_out),
)
init_cnn(self)
def _make_blocks(self, layers, block_szs, sa, stride, **kwargs):
return [self._make_layer(ni=block_szs[i], nf=block_szs[i+1], blocks=l,
stride=1 if i==0 else stride, sa=sa and i==len(layers)-4, **kwargs)
for i,l in enumerate(layers)]
def _make_layer(self, ni, nf, blocks, stride, sa, **kwargs):
return nn.Sequential(
*[self.block(self.expansion, ni if i==0 else nf, nf, stride=stride if i==0 else 1,
sa=sa and i==(blocks-1), act_cls=self.act_cls, ndim=self.ndim, ks=self.ks, **kwargs)
for i in range(blocks)])
# Cell
def _xresnet(pretrained, expansion, layers, **kwargs):
# TODO pretrain all sizes. Currently will fail with non-xrn50
url = 'https://s3.amazonaws.com/fast-ai-modelzoo/xrn50_940.pth'
res = XResNet(ResBlock, expansion, layers, **kwargs)
if pretrained: res.load_state_dict(load_state_dict_from_url(url, map_location='cpu')['model'], strict=False)
return res
def xresnet18 (pretrained=False, **kwargs): return _xresnet(pretrained, 1, [2, 2, 2, 2], **kwargs)
def xresnet34 (pretrained=False, **kwargs): return _xresnet(pretrained, 1, [3, 4, 6, 3], **kwargs)
def xresnet50 (pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3, 4, 6, 3], **kwargs)
def xresnet101(pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3, 4, 23, 3], **kwargs)
def xresnet152(pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3, 8, 36, 3], **kwargs)
def xresnet18_deep (pretrained=False, **kwargs): return _xresnet(pretrained, 1, [2,2,2,2,1,1], **kwargs)
def xresnet34_deep (pretrained=False, **kwargs): return _xresnet(pretrained, 1, [3,4,6,3,1,1], **kwargs)
def xresnet50_deep (pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3,4,6,3,1,1], **kwargs)
def xresnet18_deeper(pretrained=False, **kwargs): return _xresnet(pretrained, 1, [2,2,1,1,1,1,1,1], **kwargs)
def xresnet34_deeper(pretrained=False, **kwargs): return _xresnet(pretrained, 1, [3,4,6,3,1,1,1,1], **kwargs)
def xresnet50_deeper(pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3,4,6,3,1,1,1,1], **kwargs)
# Cell
se_kwargs1 = dict(groups=1 , reduction=16)
se_kwargs2 = dict(groups=32, reduction=16)
se_kwargs3 = dict(groups=32, reduction=0)
g0 = [2,2,2,2]
g1 = [3,4,6,3]
g2 = [3,4,23,3]
g3 = [3,8,36,3]
# Cell
def xse_resnet18(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 1, g0, n_out=n_out, **se_kwargs1, **kwargs)
def xse_resnext18(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g0, n_out=n_out, **se_kwargs2, **kwargs)
def xresnext18(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g0, n_out=n_out, **se_kwargs3, **kwargs)
def xse_resnet34(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 1, g1, n_out=n_out, **se_kwargs1, **kwargs)
def xse_resnext34(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g1, n_out=n_out, **se_kwargs2, **kwargs)
def xresnext34(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g1, n_out=n_out, **se_kwargs3, **kwargs)
def xse_resnet50(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 4, g1, n_out=n_out, **se_kwargs1, **kwargs)
def xse_resnext50(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g1, n_out=n_out, **se_kwargs2, **kwargs)
def xresnext50(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g1, n_out=n_out, **se_kwargs3, **kwargs)
def xse_resnet101(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 4, g2, n_out=n_out, **se_kwargs1, **kwargs)
def xse_resnext101(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g2, n_out=n_out, **se_kwargs2, **kwargs)
def xresnext101(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g2, n_out=n_out, **se_kwargs3, **kwargs)
def xse_resnet152(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 4, g3, n_out=n_out, **se_kwargs1, **kwargs)
def xsenet154(n_out=1000, pretrained=False, **kwargs):
return XResNet(SEBlock, g3, groups=64, reduction=16, p=0.2, n_out=n_out)
def xse_resnext18_deep (n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g0+[1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext34_deep (n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g1+[1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext50_deep (n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g1+[1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext18_deeper(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, [2,2,1,1,1,1,1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext34_deeper(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, [3,4,4,2,2,1,1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext50_deeper(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, [3,4,4,2,2,1,1,1], n_out=n_out, **se_kwargs2, **kwargs) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/models/xresnet.py | xresnet.py |
__all__ = ['Config', 'URLs', 'download_url', 'download_data', 'file_extract', 'newest_folder', 'rename_extracted',
'untar_data']
# Cell
from ..torch_basics import *
# Cell
class Config:
"Setup config at `~/.fastai` unless it exists already."
config_path = Path(os.getenv('FASTAI_HOME', '~/.fastai')).expanduser()
config_file = config_path/'config.yml'
def __init__(self):
self.config_path.mkdir(parents=True, exist_ok=True)
if not self.config_file.exists(): self.create_config()
self.d = self.load_config()
def __getitem__(self,k):
k = k.lower()
if k not in self.d: k = k+'_path'
return Path(self.d[k])
def __getattr__(self,k):
if k=='d': raise AttributeError
return self[k]
def __setitem__(self,k,v): self.d[k] = str(v)
def __contains__(self,k): return k in self.d
def load_config(self):
"load and return config if version equals 2 in existing, else create new config."
with open(self.config_file, 'r') as f:
config = yaml.safe_load(f)
if 'version' in config and config['version'] == 2: return config
elif 'version' in config: self.create_config(config)
else: self.create_config()
return self.load_config()
def create_config(self, cfg=None):
"create new config with default paths and set `version` to 2."
config = {'data_path': str(self.config_path/'data'),
'archive_path': str(self.config_path/'archive'),
'storage_path': '/tmp',
'model_path': str(self.config_path/'models'),
'version': 2}
if cfg is not None:
cfg['version'] = 2
config = merge(config, cfg)
self.save_file(config)
def save(self): self.save_file(self.d)
def save_file(self, config):
"save config file at default config location `~/.fastai/config.yml`."
with self.config_file.open('w') as f: yaml.dump(config, f, default_flow_style=False)
# Cell
class URLs():
"Global constants for dataset and model URLs."
LOCAL_PATH = Path.cwd()
MDL = 'http://files.fast.ai/models/'
S3 = 'https://s3.amazonaws.com/fast-ai-'
URL = f'{S3}sample/'
S3_IMAGE = f'{S3}imageclas/'
S3_IMAGELOC = f'{S3}imagelocal/'
S3_AUDI = f'{S3}audio/'
S3_NLP = f'{S3}nlp/'
S3_COCO = f'{S3}coco/'
S3_MODEL = f'{S3}modelzoo/'
# main datasets
ADULT_SAMPLE = f'{URL}adult_sample.tgz'
BIWI_SAMPLE = f'{URL}biwi_sample.tgz'
CIFAR = f'{URL}cifar10.tgz'
COCO_SAMPLE = f'{S3_COCO}coco_sample.tgz'
COCO_TINY = f'{S3_COCO}coco_tiny.tgz'
HUMAN_NUMBERS = f'{URL}human_numbers.tgz'
IMDB = f'{S3_NLP}imdb.tgz'
IMDB_SAMPLE = f'{URL}imdb_sample.tgz'
ML_SAMPLE = f'{URL}movie_lens_sample.tgz'
ML_100k = 'http://files.grouplens.org/datasets/movielens/ml-100k.zip'
MNIST_SAMPLE = f'{URL}mnist_sample.tgz'
MNIST_TINY = f'{URL}mnist_tiny.tgz'
MNIST_VAR_SIZE_TINY = f'{S3_IMAGE}mnist_var_size_tiny.tgz'
PLANET_SAMPLE = f'{URL}planet_sample.tgz'
PLANET_TINY = f'{URL}planet_tiny.tgz'
IMAGENETTE = f'{S3_IMAGE}imagenette2.tgz'
IMAGENETTE_160 = f'{S3_IMAGE}imagenette2-160.tgz'
IMAGENETTE_320 = f'{S3_IMAGE}imagenette2-320.tgz'
IMAGEWOOF = f'{S3_IMAGE}imagewoof2.tgz'
IMAGEWOOF_160 = f'{S3_IMAGE}imagewoof2-160.tgz'
IMAGEWOOF_320 = f'{S3_IMAGE}imagewoof2-320.tgz'
IMAGEWANG = f'{S3_IMAGE}imagewang.tgz'
IMAGEWANG_160 = f'{S3_IMAGE}imagewang-160.tgz'
IMAGEWANG_320 = f'{S3_IMAGE}imagewang-320.tgz'
# kaggle competitions download dogs-vs-cats -p {DOGS.absolute()}
DOGS = f'{URL}dogscats.tgz'
# image classification datasets
CALTECH_101 = f'{S3_IMAGE}caltech_101.tgz'
CARS = f'{S3_IMAGE}stanford-cars.tgz'
CIFAR_100 = f'{S3_IMAGE}cifar100.tgz'
CUB_200_2011 = f'{S3_IMAGE}CUB_200_2011.tgz'
FLOWERS = f'{S3_IMAGE}oxford-102-flowers.tgz'
FOOD = f'{S3_IMAGE}food-101.tgz'
MNIST = f'{S3_IMAGE}mnist_png.tgz'
PETS = f'{S3_IMAGE}oxford-iiit-pet.tgz'
# NLP datasets
AG_NEWS = f'{S3_NLP}ag_news_csv.tgz'
AMAZON_REVIEWS = f'{S3_NLP}amazon_review_full_csv.tgz'
AMAZON_REVIEWS_POLARITY = f'{S3_NLP}amazon_review_polarity_csv.tgz'
DBPEDIA = f'{S3_NLP}dbpedia_csv.tgz'
MT_ENG_FRA = f'{S3_NLP}giga-fren.tgz'
SOGOU_NEWS = f'{S3_NLP}sogou_news_csv.tgz'
WIKITEXT = f'{S3_NLP}wikitext-103.tgz'
WIKITEXT_TINY = f'{S3_NLP}wikitext-2.tgz'
YAHOO_ANSWERS = f'{S3_NLP}yahoo_answers_csv.tgz'
YELP_REVIEWS = f'{S3_NLP}yelp_review_full_csv.tgz'
YELP_REVIEWS_POLARITY = f'{S3_NLP}yelp_review_polarity_csv.tgz'
# Image localization datasets
BIWI_HEAD_POSE = f"{S3_IMAGELOC}biwi_head_pose.tgz"
CAMVID = f'{S3_IMAGELOC}camvid.tgz'
CAMVID_TINY = f'{URL}camvid_tiny.tgz'
LSUN_BEDROOMS = f'{S3_IMAGE}bedroom.tgz'
PASCAL_2007 = f'{S3_IMAGELOC}pascal_2007.tgz'
PASCAL_2012 = f'{S3_IMAGELOC}pascal_2012.tgz'
# Audio classification datasets
MACAQUES = 'https://storage.googleapis.com/ml-animal-sounds-datasets/macaques.zip'
ZEBRA_FINCH = 'https://storage.googleapis.com/ml-animal-sounds-datasets/zebra_finch.zip'
# Medical Imaging datasets
#SKIN_LESION = f'{S3_IMAGELOC}skin_lesion.tgz'
SIIM_SMALL = f'{S3_IMAGELOC}siim_small.tgz'
TCGA_SMALL = f'{S3_IMAGELOC}tcga_small.tgz'
#Pretrained models
OPENAI_TRANSFORMER = f'{S3_MODEL}transformer.tgz'
WT103_FWD = f'{S3_MODEL}wt103-fwd.tgz'
WT103_BWD = f'{S3_MODEL}wt103-bwd.tgz'
def path(url='.', c_key='archive'):
"Return local path where to download based on `c_key`"
fname = url.split('/')[-1]
local_path = URLs.LOCAL_PATH/('models' if c_key=='models' else 'data')/fname
if local_path.exists(): return local_path
return Config()[c_key]/fname
# Cell
def download_url(url, dest, overwrite=False, pbar=None, show_progress=True, chunk_size=1024*1024,
timeout=4, retries=5):
"Download `url` to `dest` unless it exists and not `overwrite`"
if os.path.exists(dest) and not overwrite: return
s = requests.Session()
s.mount('http://',requests.adapters.HTTPAdapter(max_retries=retries))
# additional line to identify as a firefox browser, see fastai/#2438
s.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0'})
u = s.get(url, stream=True, timeout=timeout)
try: file_size = int(u.headers["Content-Length"])
except: show_progress = False
with open(dest, 'wb') as f:
nbytes = 0
if show_progress: pbar = progress_bar(range(file_size), leave=False, parent=pbar)
try:
if show_progress: pbar.update(0)
for chunk in u.iter_content(chunk_size=chunk_size):
nbytes += len(chunk)
if show_progress: pbar.update(nbytes)
f.write(chunk)
except requests.exceptions.ConnectionError as e:
fname = url.split('/')[-1]
data_dir = dest.parent
print(f'\n Download of {url} has failed after {retries} retries\n'
f' Fix the download manually:\n'
f'$ mkdir -p {data_dir}\n'
f'$ cd {data_dir}\n'
f'$ wget -c {url}\n'
f'$ tar xf {fname}\n'
f' And re-run your code once the download is successful\n')
# Cell
def download_data(url, fname=None, c_key='archive', force_download=False, timeout=4):
"Download `url` to `fname`."
fname = Path(fname or URLs.path(url, c_key=c_key))
fname.parent.mkdir(parents=True, exist_ok=True)
if not fname.exists() or force_download: download_url(url, fname, overwrite=force_download, timeout=timeout)
return fname
# Cell
def _get_check(url):
"internal function to get the hash of the file at `url`."
checks = json.load(open(Path(__file__).parent/'checks.txt', 'r'))
return checks.get(url, '')
def _check_file(fname):
"internal function to get the hash of the local file at `fname`."
size = os.path.getsize(fname)
with open(fname, "rb") as f: hash_nb = hashlib.md5(f.read(2**20)).hexdigest()
return [size,hash_nb]
# Cell
def _add_check(url, fname):
"Internal function to update the internal check file with `url` and check on `fname`."
checks = json.load(open(Path(__file__).parent/'checks.txt', 'r'))
checks[url] = _check_file(fname)
json.dump(checks, open(Path(__file__).parent/'checks.txt', 'w'), indent=2)
# Cell
def file_extract(fname, dest=None):
"Extract `fname` to `dest` using `tarfile` or `zipfile`."
if dest is None: dest = Path(fname).parent
fname = str(fname)
if fname.endswith('gz'): tarfile.open(fname, 'r:gz').extractall(dest)
elif fname.endswith('zip'): zipfile.ZipFile(fname ).extractall(dest)
else: raise Exception(f'Unrecognized archive: {fname}')
# Cell
def _try_from_storage(dest, storage):
"an internal function to create symbolic links for files from `storage` to `dest` if `storage` exists"
if not storage.exists(): return
os.makedirs(dest, exist_ok=True)
for f in storage.glob('*'): os.symlink(f, dest/f.name, target_is_directory=f.is_dir())
# Cell
def newest_folder(path):
"Return newest folder on path"
list_of_paths = path.glob('*')
return max(list_of_paths, key=lambda p: p.stat().st_ctime)
# Cell
def rename_extracted(dest):
"Rename file if different from dest"
extracted = newest_folder(dest.parent)
if not (extracted.name == dest.name): extracted.rename(dest)
# Cell
def untar_data(url, fname=None, dest=None, c_key='data', force_download=False, extract_func=file_extract, timeout=4):
"Download `url` to `fname` if `dest` doesn't exist, and un-tgz or unzip to folder `dest`."
default_dest = URLs.path(url, c_key=c_key).with_suffix('')
dest = default_dest if dest is None else Path(dest)/default_dest.name
fname = Path(fname or URLs.path(url))
if fname.exists() and _get_check(url) and _check_file(fname) != _get_check(url):
print("A new version of this dataset is available, downloading...")
force_download = True
if force_download:
if fname.exists(): os.remove(fname)
if dest.exists(): shutil.rmtree(dest)
if not dest.exists(): _try_from_storage(dest, URLs.path(url, c_key='storage').with_suffix(''))
if not dest.exists():
fname = download_data(url, fname=fname, c_key=c_key, timeout=timeout)
if _get_check(url) and _check_file(fname) != _get_check(url):
print(f"File downloaded is broken. Remove {fname} and try again.")
extract_func(fname, dest.parent)
rename_extracted(dest)
return dest | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/data/external.py | external.py |
__all__ = ['show_batch', 'show_results', 'TfmdDL', 'DataLoaders', 'FilteredBase', 'TfmdLists', 'decode_at', 'show_at',
'Datasets', 'test_set']
# Cell
from ..torch_basics import *
from .load import *
# Cell
@typedispatch
def show_batch(x, y, samples, ctxs=None, max_n=9, **kwargs):
if ctxs is None: ctxs = Inf.nones
if hasattr(samples[0], 'show'):
ctxs = [s.show(ctx=c, **kwargs) for s,c,_ in zip(samples,ctxs,range(max_n))]
else:
for i in range_of(samples[0]):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
return ctxs
# Cell
@typedispatch
def show_results(x, y, samples, outs, ctxs=None, max_n=9, **kwargs):
if ctxs is None: ctxs = Inf.nones
for i in range(len(samples[0])):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
for i in range(len(outs[0])):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(i),ctxs,range(max_n))]
return ctxs
# Cell
#nbdev_comment _all_ = ["show_batch", "show_results"]
# Cell
_batch_tfms = ('after_item','before_batch','after_batch')
# Cell
@delegates()
class TfmdDL(DataLoader):
"Transformed `DataLoader`"
def __init__(self, dataset, bs=64, shuffle=False, num_workers=None, verbose=False, do_setup=True, **kwargs):
if num_workers is None: num_workers = min(16, defaults.cpus)
for nm in _batch_tfms: kwargs[nm] = Pipeline(kwargs.get(nm,None))
super().__init__(dataset, bs=bs, shuffle=shuffle, num_workers=num_workers, **kwargs)
if do_setup:
for nm in _batch_tfms:
pv(f"Setting up {nm}: {kwargs[nm]}", verbose)
kwargs[nm].setup(self)
def _one_pass(self):
b = self.do_batch([self.do_item(0)])
if self.device is not None: b = to_device(b, self.device)
its = self.after_batch(b)
self._n_inp = 1 if not isinstance(its, (list,tuple)) or len(its)==1 else len(its)-1
self._types = explode_types(its)
def _retain_dl(self,b):
if not getattr(self, '_types', None): self._one_pass()
return retain_types(b, typs=self._types)
@delegates(DataLoader.new)
def new(self, dataset=None, cls=None, **kwargs):
res = super().new(dataset, cls, do_setup=False, **kwargs)
if not hasattr(self, '_n_inp') or not hasattr(self, '_types'):
try:
self._one_pass()
res._n_inp,res._types = self._n_inp,self._types
except: print("Could not do one pass in your dataloader, there is something wrong in it")
else: res._n_inp,res._types = self._n_inp,self._types
return res
def before_iter(self):
super().before_iter()
split_idx = getattr(self.dataset, 'split_idx', None)
for nm in _batch_tfms:
f = getattr(self,nm)
if isinstance(f,Pipeline): f.split_idx=split_idx
def decode(self, b): return to_cpu(self.after_batch.decode(self._retain_dl(b)))
def decode_batch(self, b, max_n=9, full=True): return self._decode_batch(self.decode(b), max_n, full)
def _decode_batch(self, b, max_n=9, full=True):
f = self.after_item.decode
f1 = self.before_batch.decode
f = compose(f1, f, partial(getattr(self.dataset,'decode',noop), full = full))
return L(batch_to_samples(b, max_n=max_n)).map(f)
def _pre_show_batch(self, b, max_n=9):
"Decode `b` to be ready for `show_batch`"
b = self.decode(b)
if hasattr(b, 'show'): return b,None,None
its = self._decode_batch(b, max_n, full=False)
if not is_listy(b): b,its = [b],L((o,) for o in its)
return detuplify(b[:self.n_inp]),detuplify(b[self.n_inp:]),its
def show_batch(self, b=None, max_n=9, ctxs=None, show=True, unique=False, **kwargs):
if unique:
old_get_idxs = self.get_idxs
self.get_idxs = lambda: Inf.zeros
if b is None: b = self.one_batch()
if not show: return self._pre_show_batch(b, max_n=max_n)
show_batch(*self._pre_show_batch(b, max_n=max_n), ctxs=ctxs, max_n=max_n, **kwargs)
if unique: self.get_idxs = old_get_idxs
def show_results(self, b, out, max_n=9, ctxs=None, show=True, **kwargs):
x,y,its = self.show_batch(b, max_n=max_n, show=False)
b_out = type(b)(b[:self.n_inp] + (tuple(out) if is_listy(out) else (out,)))
x1,y1,outs = self.show_batch(b_out, max_n=max_n, show=False)
res = (x,x1,None,None) if its is None else (x, y, its, outs.itemgot(slice(self.n_inp,None)))
if not show: return res
show_results(*res, ctxs=ctxs, max_n=max_n, **kwargs)
@property
def n_inp(self):
if hasattr(self.dataset, 'n_inp'): return self.dataset.n_inp
if not hasattr(self, '_n_inp'): self._one_pass()
return self._n_inp
def to(self, device):
self.device = device
for tfm in self.after_batch.fs:
for a in L(getattr(tfm, 'parameters', None)): setattr(tfm, a, getattr(tfm, a).to(device))
return self
# Cell
add_docs(TfmdDL,
decode="Decode `b` using `tfms`",
decode_batch="Decode `b` entirely",
new="Create a new version of self with a few changed attributes",
show_batch="Show `b` (defaults to `one_batch`), a list of lists of pipeline outputs (i.e. output of a `DataLoader`)",
show_results="Show each item of `b` and `out`",
before_iter="override",
to="Put self and its transforms state on `device`")
# Cell
@docs
class DataLoaders(GetAttr):
"Basic wrapper around several `DataLoader`s."
_default='train'
def __init__(self, *loaders, path='.', device=None):
self.loaders,self.path = list(loaders),Path(path)
if device is not None or hasattr(loaders[0],'to'): self.device = device
def __getitem__(self, i): return self.loaders[i]
def new_empty(self):
loaders = [dl.new(dl.dataset.new_empty()) for dl in self.loaders]
return type(self)(*loaders, path=self.path, device=self.device)
def _set(i, self, v): self.loaders[i] = v
train ,valid = add_props(lambda i,x: x[i], _set)
train_ds,valid_ds = add_props(lambda i,x: x[i].dataset)
@property
def device(self): return self._device
@device.setter
def device(self, d):
for dl in self.loaders: dl.to(d)
self._device = d
def to(self, device):
self.device = device
return self
def cuda(self): return self.to(device=default_device())
def cpu(self): return self.to(device=torch.device('cpu'))
@classmethod
def from_dsets(cls, *ds, path='.', bs=64, device=None, dl_type=TfmdDL, **kwargs):
default = (True,) + (False,) * (len(ds)-1)
defaults = {'shuffle': default, 'drop_last': default}
for nm in _batch_tfms:
if nm in kwargs: kwargs[nm] = Pipeline(kwargs[nm])
kwargs = merge(defaults, {k: tuplify(v, match=ds) for k,v in kwargs.items()})
kwargs = [{k: v[i] for k,v in kwargs.items()} for i in range_of(ds)]
return cls(*[dl_type(d, bs=bs, **k) for d,k in zip(ds, kwargs)], path=path, device=device)
@classmethod
def from_dblock(cls, dblock, source, path='.', bs=64, val_bs=None, shuffle_train=True, device=None, **kwargs):
return dblock.dataloaders(source, path=path, bs=bs, val_bs=val_bs, shuffle_train=shuffle_train, device=device, **kwargs)
_docs=dict(__getitem__="Retrieve `DataLoader` at `i` (`0` is training, `1` is validation)",
train="Training `DataLoader`",
valid="Validation `DataLoader`",
train_ds="Training `Dataset`",
valid_ds="Validation `Dataset`",
to="Use `device`",
cuda="Use the gpu if available",
cpu="Use the cpu",
new_empty="Create a new empty version of `self` with the same transforms",
from_dblock="Create a dataloaders from a given `dblock`")
# Cell
class FilteredBase:
"Base class for lists with subsets"
_dl_type,_dbunch_type = TfmdDL,DataLoaders
def __init__(self, *args, dl_type=None, **kwargs):
if dl_type is not None: self._dl_type = dl_type
self.dataloaders = delegates(self._dl_type.__init__)(self.dataloaders)
super().__init__(*args, **kwargs)
@property
def n_subsets(self): return len(self.splits)
def _new(self, items, **kwargs): return super()._new(items, splits=self.splits, **kwargs)
def subset(self): raise NotImplemented
def dataloaders(self, bs=64, val_bs=None, shuffle_train=True, n=None, path='.', dl_type=None, dl_kwargs=None,
device=None, **kwargs):
if device is None: device=default_device()
if dl_kwargs is None: dl_kwargs = [{}] * self.n_subsets
if dl_type is None: dl_type = self._dl_type
drop_last = kwargs.pop('drop_last', shuffle_train)
dl = dl_type(self.subset(0), bs=bs, shuffle=shuffle_train, drop_last=drop_last, n=n, device=device,
**merge(kwargs, dl_kwargs[0]))
dls = [dl] + [dl.new(self.subset(i), bs=(bs if val_bs is None else val_bs), shuffle=False, drop_last=False,
n=None, **dl_kwargs[i]) for i in range(1, self.n_subsets)]
return self._dbunch_type(*dls, path=path, device=device)
FilteredBase.train,FilteredBase.valid = add_props(lambda i,x: x.subset(i))
# Cell
class TfmdLists(FilteredBase, L, GetAttr):
"A `Pipeline` of `tfms` applied to a collection of `items`"
_default='tfms'
def __init__(self, items, tfms, use_list=None, do_setup=True, split_idx=None, train_setup=True,
splits=None, types=None, verbose=False, dl_type=None):
super().__init__(items, use_list=use_list)
if dl_type is not None: self._dl_type = dl_type
self.splits = L([slice(None),[]] if splits is None else splits).map(mask2idxs)
if isinstance(tfms,TfmdLists): tfms = tfms.tfms
if isinstance(tfms,Pipeline): do_setup=False
self.tfms = Pipeline(tfms, split_idx=split_idx)
store_attr('types,split_idx')
if do_setup:
pv(f"Setting up {self.tfms}", verbose)
self.setup(train_setup=train_setup)
def _new(self, items, split_idx=None, **kwargs):
split_idx = ifnone(split_idx,self.split_idx)
return super()._new(items, tfms=self.tfms, do_setup=False, types=self.types, split_idx=split_idx, **kwargs)
def subset(self, i): return self._new(self._get(self.splits[i]), split_idx=i)
def _after_item(self, o): return self.tfms(o)
def __repr__(self): return f"{self.__class__.__name__}: {self.items}\ntfms - {self.tfms.fs}"
def __iter__(self): return (self[i] for i in range(len(self)))
def show(self, o, **kwargs): return self.tfms.show(o, **kwargs)
def decode(self, o, **kwargs): return self.tfms.decode(o, **kwargs)
def __call__(self, o, **kwargs): return self.tfms.__call__(o, **kwargs)
def overlapping_splits(self): return L(Counter(self.splits.concat()).values()).filter(gt(1))
def new_empty(self): return self._new([])
def setup(self, train_setup=True):
self.tfms.setup(self, train_setup)
if len(self) != 0:
x = super().__getitem__(0) if self.splits is None else super().__getitem__(self.splits[0])[0]
self.types = []
for f in self.tfms.fs:
self.types.append(getattr(f, 'input_types', type(x)))
x = f(x)
self.types.append(type(x))
types = L(t if is_listy(t) else [t] for t in self.types).concat().unique()
self.pretty_types = '\n'.join([f' - {t}' for t in types])
def infer_idx(self, x):
# TODO: check if we really need this, or can simplify
idx = 0
for t in self.types:
if isinstance(x, t): break
idx += 1
types = L(t if is_listy(t) else [t] for t in self.types).concat().unique()
pretty_types = '\n'.join([f' - {t}' for t in types])
assert idx < len(self.types), f"Expected an input of type in \n{pretty_types}\n but got {type(x)}"
return idx
def infer(self, x):
return compose_tfms(x, tfms=self.tfms.fs[self.infer_idx(x):], split_idx=self.split_idx)
def __getitem__(self, idx):
res = super().__getitem__(idx)
if self._after_item is None: return res
return self._after_item(res) if is_indexer(idx) else res.map(self._after_item)
# Cell
add_docs(TfmdLists,
setup="Transform setup with self",
decode="From `Pipeline`",
show="From `Pipeline`",
overlapping_splits="All splits that are in more than one split",
subset="New `TfmdLists` with same tfms that only includes items in `i`th split",
infer_idx="Finds the index where `self.tfms` can be applied to `x`, depending on the type of `x`",
infer="Apply `self.tfms` to `x` starting at the right tfm depending on the type of `x`",
new_empty="A new version of `self` but with no items")
# Cell
def decode_at(o, idx):
"Decoded item at `idx`"
return o.decode(o[idx])
# Cell
def show_at(o, idx, **kwargs):
"Show item at `idx`",
return o.show(o[idx], **kwargs)
# Cell
@docs
@delegates(TfmdLists)
class Datasets(FilteredBase):
"A dataset that creates a tuple from each `tfms`, passed through `item_tfms`"
def __init__(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs):
super().__init__(dl_type=dl_type)
self.tls = L(tls if tls else [TfmdLists(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
self.n_inp = ifnone(n_inp, max(1, len(self.tls)-1))
def __getitem__(self, it):
res = tuple([tl[it] for tl in self.tls])
return res if is_indexer(it) else list(zip(*res))
def __getattr__(self,k): return gather_attrs(self, k, 'tls')
def __dir__(self): return super().__dir__() + gather_attr_names(self, 'tls')
def __len__(self): return len(self.tls[0])
def __iter__(self): return (self[i] for i in range(len(self)))
def __repr__(self): return coll_repr(self)
def decode(self, o, full=True): return tuple(tl.decode(o_, full=full) for o_,tl in zip(o,tuplify(self.tls, match=o)))
def subset(self, i): return type(self)(tls=L(tl.subset(i) for tl in self.tls), n_inp=self.n_inp)
def _new(self, items, *args, **kwargs): return super()._new(items, tfms=self.tfms, do_setup=False, **kwargs)
def overlapping_splits(self): return self.tls[0].overlapping_splits()
def new_empty(self): return type(self)(tls=[tl.new_empty() for tl in self.tls], n_inp=self.n_inp)
@property
def splits(self): return self.tls[0].splits
@property
def split_idx(self): return self.tls[0].tfms.split_idx
@property
def items(self): return self.tls[0].items
@items.setter
def items(self, v):
for tl in self.tls: tl.items = v
def show(self, o, ctx=None, **kwargs):
for o_,tl in zip(o,self.tls): ctx = tl.show(o_, ctx=ctx, **kwargs)
return ctx
@contextmanager
def set_split_idx(self, i):
old_split_idx = self.split_idx
for tl in self.tls: tl.tfms.split_idx = i
try: yield self
finally:
for tl in self.tls: tl.tfms.split_idx = old_split_idx
_docs=dict(
decode="Compose `decode` of all `tuple_tfms` then all `tfms` on `i`",
show="Show item `o` in `ctx`",
dataloaders="Get a `DataLoaders`",
overlapping_splits="All splits that are in more than one split",
subset="New `Datasets` that only includes subset `i`",
new_empty="Create a new empty version of the `self`, keeping only the transforms",
set_split_idx="Contextmanager to use the same `Datasets` with another `split_idx`"
)
# Cell
def test_set(dsets, test_items, rm_tfms=None, with_labels=False):
"Create a test set from `test_items` using validation transforms of `dsets`"
if isinstance(dsets, Datasets):
tls = dsets.tls if with_labels else dsets.tls[:dsets.n_inp]
test_tls = [tl._new(test_items, split_idx=1) for tl in tls]
if rm_tfms is None: rm_tfms = [tl.infer_idx(get_first(test_items)) for tl in test_tls]
else: rm_tfms = tuplify(rm_tfms, match=test_tls)
for i,j in enumerate(rm_tfms): test_tls[i].tfms.fs = test_tls[i].tfms.fs[j:]
return Datasets(tls=test_tls)
elif isinstance(dsets, TfmdLists):
test_tl = dsets._new(test_items, split_idx=1)
if rm_tfms is None: rm_tfms = dsets.infer_idx(get_first(test_items))
test_tl.tfms.fs = test_tl.tfms.fs[rm_tfms:]
return test_tl
else: raise Exception(f"This method requires using the fastai library to assemble your data. Expected a `Datasets` or a `TfmdLists` but got {dsets.__class__.__name__}")
# Cell
@patch
@delegates(TfmdDL.__init__)
def test_dl(self:DataLoaders, test_items, rm_type_tfms=None, with_labels=False, **kwargs):
"Create a test dataloader from `test_items` using validation transforms of `dls`"
test_ds = test_set(self.valid_ds, test_items, rm_tfms=rm_type_tfms, with_labels=with_labels
) if isinstance(self.valid_ds, (Datasets, TfmdLists)) else test_items
return self.valid.new(test_ds, **kwargs) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/data/core.py | core.py |
__all__ = ['get_files', 'FileGetter', 'image_extensions', 'get_image_files', 'ImageGetter', 'get_text_files',
'ItemGetter', 'AttrGetter', 'RandomSplitter', 'TrainTestSplitter', 'IndexSplitter', 'GrandparentSplitter',
'FuncSplitter', 'MaskSplitter', 'FileSplitter', 'ColSplitter', 'RandomSubsetSplitter', 'parent_label',
'RegexLabeller', 'ColReader', 'CategoryMap', 'Categorize', 'Category', 'MultiCategorize', 'MultiCategory',
'OneHotEncode', 'EncodedMultiCategorize', 'RegressionSetup', 'get_c', 'ToTensor', 'IntToFloatTensor',
'broadcast_vec', 'Normalize']
# Cell
from ..torch_basics import *
from .core import *
from .load import *
from .external import *
from sklearn.model_selection import train_test_split
# Cell
def _get_files(p, fs, extensions=None):
p = Path(p)
res = [p/f for f in fs if not f.startswith('.')
and ((not extensions) or f'.{f.split(".")[-1].lower()}' in extensions)]
return res
# Cell
def get_files(path, extensions=None, recurse=True, folders=None, followlinks=True):
"Get all the files in `path` with optional `extensions`, optionally with `recurse`, only in `folders`, if specified."
path = Path(path)
folders=L(folders)
extensions = setify(extensions)
extensions = {e.lower() for e in extensions}
if recurse:
res = []
for i,(p,d,f) in enumerate(os.walk(path, followlinks=followlinks)): # returns (dirpath, dirnames, filenames)
if len(folders) !=0 and i==0: d[:] = [o for o in d if o in folders]
else: d[:] = [o for o in d if not o.startswith('.')]
if len(folders) !=0 and i==0 and '.' not in folders: continue
res += _get_files(p, f, extensions)
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
res = _get_files(path, f, extensions)
return L(res)
# Cell
def FileGetter(suf='', extensions=None, recurse=True, folders=None):
"Create `get_files` partial function that searches path suffix `suf`, only in `folders`, if specified, and passes along args"
def _inner(o, extensions=extensions, recurse=recurse, folders=folders):
return get_files(o/suf, extensions, recurse, folders)
return _inner
# Cell
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
# Cell
def get_image_files(path, recurse=True, folders=None):
"Get image files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=image_extensions, recurse=recurse, folders=folders)
# Cell
def ImageGetter(suf='', recurse=True, folders=None):
"Create `get_image_files` partial that searches suffix `suf` and passes along `kwargs`, only in `folders`, if specified"
def _inner(o, recurse=recurse, folders=folders): return get_image_files(o/suf, recurse, folders)
return _inner
# Cell
def get_text_files(path, recurse=True, folders=None):
"Get text files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=['.txt'], recurse=recurse, folders=folders)
# Cell
class ItemGetter(ItemTransform):
"Creates a proper transform that applies `itemgetter(i)` (even on a tuple)"
_retain = False
def __init__(self, i): self.i = i
def encodes(self, x): return x[self.i]
# Cell
class AttrGetter(ItemTransform):
"Creates a proper transform that applies `attrgetter(nm)` (even on a tuple)"
_retain = False
def __init__(self, nm, default=None): store_attr()
def encodes(self, x): return getattr(x, self.nm, self.default)
# Cell
def RandomSplitter(valid_pct=0.2, seed=None):
"Create function that splits `items` between train/val with `valid_pct` randomly."
def _inner(o):
if seed is not None: torch.manual_seed(seed)
rand_idx = L(list(torch.randperm(len(o)).numpy()))
cut = int(valid_pct * len(o))
return rand_idx[cut:],rand_idx[:cut]
return _inner
# Cell
def TrainTestSplitter(test_size=0.2, random_state=None, stratify=None, train_size=None, shuffle=True):
"Split `items` into random train and test subsets using sklearn train_test_split utility."
def _inner(o, **kwargs):
train,valid = train_test_split(range_of(o), test_size=test_size, random_state=random_state,
stratify=stratify, train_size=train_size, shuffle=shuffle)
return L(train), L(valid)
return _inner
# Cell
def IndexSplitter(valid_idx):
"Split `items` so that `val_idx` are in the validation set and the others in the training set"
def _inner(o):
train_idx = np.setdiff1d(np.array(range_of(o)), np.array(valid_idx))
return L(train_idx, use_list=True), L(valid_idx, use_list=True)
return _inner
# Cell
def _grandparent_idxs(items, name):
def _inner(items, name): return mask2idxs(Path(o).parent.parent.name == name for o in items)
return [i for n in L(name) for i in _inner(items,n)]
# Cell
def GrandparentSplitter(train_name='train', valid_name='valid'):
"Split `items` from the grand parent folder names (`train_name` and `valid_name`)."
def _inner(o):
return _grandparent_idxs(o, train_name),_grandparent_idxs(o, valid_name)
return _inner
# Cell
def FuncSplitter(func):
"Split `items` by result of `func` (`True` for validation, `False` for training set)."
def _inner(o):
val_idx = mask2idxs(func(o_) for o_ in o)
return IndexSplitter(val_idx)(o)
return _inner
# Cell
def MaskSplitter(mask):
"Split `items` depending on the value of `mask`."
def _inner(o): return IndexSplitter(mask2idxs(mask))(o)
return _inner
# Cell
def FileSplitter(fname):
"Split `items` by providing file `fname` (contains names of valid items separated by newline)."
valid = Path(fname).read_text().split('\n')
def _func(x): return x.name in valid
def _inner(o): return FuncSplitter(_func)(o)
return _inner
# Cell
def ColSplitter(col='is_valid'):
"Split `items` (supposed to be a dataframe) by value in `col`"
def _inner(o):
assert isinstance(o, pd.DataFrame), "ColSplitter only works when your items are a pandas DataFrame"
valid_idx = (o.iloc[:,col] if isinstance(col, int) else o[col]).values.astype('bool')
return IndexSplitter(mask2idxs(valid_idx))(o)
return _inner
# Cell
def RandomSubsetSplitter(train_sz, valid_sz, seed=None):
"Take randoms subsets of `splits` with `train_sz` and `valid_sz`"
assert 0 < train_sz < 1
assert 0 < valid_sz < 1
assert train_sz + valid_sz <= 1.
def _inner(o):
if seed is not None: torch.manual_seed(seed)
train_len,valid_len = int(len(o)*train_sz),int(len(o)*valid_sz)
idxs = L(list(torch.randperm(len(o)).numpy()))
return idxs[:train_len],idxs[train_len:train_len+valid_len]
return _inner
# Cell
def parent_label(o):
"Label `item` with the parent folder name."
return Path(o).parent.name
# Cell
class RegexLabeller():
"Label `item` with regex `pat`."
def __init__(self, pat, match=False):
self.pat = re.compile(pat)
self.matcher = self.pat.match if match else self.pat.search
def __call__(self, o):
res = self.matcher(str(o))
assert res,f'Failed to find "{self.pat}" in "{o}"'
return res.group(1)
# Cell
class ColReader(DisplayedTransform):
"Read `cols` in `row` with potential `pref` and `suff`"
def __init__(self, cols, pref='', suff='', label_delim=None):
store_attr()
self.pref = str(pref) + os.path.sep if isinstance(pref, Path) else pref
self.cols = L(cols)
def _do_one(self, r, c):
o = r[c] if isinstance(c, int) else r[c] if c=='name' else getattr(r, c)
if len(self.pref)==0 and len(self.suff)==0 and self.label_delim is None: return o
if self.label_delim is None: return f'{self.pref}{o}{self.suff}'
else: return o.split(self.label_delim) if len(o)>0 else []
def __call__(self, o, **kwargs):
if len(self.cols) == 1: return self._do_one(o, self.cols[0])
return L(self._do_one(o, c) for c in self.cols)
# Cell
class CategoryMap(CollBase):
"Collection of categories with the reverse mapping in `o2i`"
def __init__(self, col, sort=True, add_na=False, strict=False):
if is_categorical_dtype(col):
items = L(col.cat.categories, use_list=True)
#Remove non-used categories while keeping order
if strict: items = L(o for o in items if o in col.unique())
else:
if not hasattr(col,'unique'): col = L(col, use_list=True)
# `o==o` is the generalized definition of non-NaN used by Pandas
items = L(o for o in col.unique() if o==o)
if sort: items = items.sorted()
self.items = '#na#' + items if add_na else items
self.o2i = defaultdict(int, self.items.val2idx()) if add_na else dict(self.items.val2idx())
def map_objs(self,objs):
"Map `objs` to IDs"
return L(self.o2i[o] for o in objs)
def map_ids(self,ids):
"Map `ids` to objects in vocab"
return L(self.items[o] for o in ids)
def __eq__(self,b): return all_equal(b,self)
# Cell
class Categorize(DisplayedTransform):
"Reversible transform of category string to `vocab` id"
loss_func,order=CrossEntropyLossFlat(),1
def __init__(self, vocab=None, sort=True, add_na=False):
if vocab is not None: vocab = CategoryMap(vocab, sort=sort, add_na=add_na)
store_attr()
def setups(self, dsets):
if self.vocab is None and dsets is not None: self.vocab = CategoryMap(dsets, sort=self.sort, add_na=self.add_na)
self.c = len(self.vocab)
def encodes(self, o):
try:
return TensorCategory(self.vocab.o2i[o])
except KeyError as e:
raise KeyError(f"Label '{o}' was not included in the training dataset") from e
def decodes(self, o): return Category (self.vocab [o])
# Cell
class Category(str, ShowTitle): _show_args = {'label': 'category'}
# Cell
class MultiCategorize(Categorize):
"Reversible transform of multi-category strings to `vocab` id"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab=None, add_na=False): super().__init__(vocab=vocab,add_na=add_na,sort=vocab==None)
def setups(self, dsets):
if not dsets: return
if self.vocab is None:
vals = set()
for b in dsets: vals = vals.union(set(b))
self.vocab = CategoryMap(list(vals), add_na=self.add_na)
def encodes(self, o):
if not all(elem in self.vocab.o2i.keys() for elem in o):
diff = [elem for elem in o if elem not in self.vocab.o2i.keys()]
diff_str = "', '".join(diff)
raise KeyError(f"Labels '{diff_str}' were not included in the training dataset")
return TensorMultiCategory([self.vocab.o2i[o_] for o_ in o])
def decodes(self, o): return MultiCategory ([self.vocab [o_] for o_ in o])
# Cell
class MultiCategory(L):
def show(self, ctx=None, sep=';', color='black', **kwargs):
return show_title(sep.join(self.map(str)), ctx=ctx, color=color, **kwargs)
# Cell
class OneHotEncode(DisplayedTransform):
"One-hot encodes targets"
order=2
def __init__(self, c=None): store_attr()
def setups(self, dsets):
if self.c is None: self.c = len(L(getattr(dsets, 'vocab', None)))
if not self.c: warn("Couldn't infer the number of classes, please pass a value for `c` at init")
def encodes(self, o): return TensorMultiCategory(one_hot(o, self.c).float())
def decodes(self, o): return one_hot_decode(o, None)
# Cell
class EncodedMultiCategorize(Categorize):
"Transform of one-hot encoded multi-category that decodes with `vocab`"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab):
super().__init__(vocab, sort=vocab==None)
self.c = len(vocab)
def encodes(self, o): return TensorMultiCategory(tensor(o).float())
def decodes(self, o): return MultiCategory (one_hot_decode(o, self.vocab))
# Cell
class RegressionSetup(DisplayedTransform):
"Transform that floatifies targets"
loss_func=MSELossFlat()
def __init__(self, c=None): store_attr()
def encodes(self, o): return tensor(o).float()
def decodes(self, o): return TitledFloat(o) if o.ndim==0 else TitledTuple(o_.item() for o_ in o)
def setups(self, dsets):
if self.c is not None: return
try: self.c = len(dsets[0]) if hasattr(dsets[0], '__len__') else 1
except: self.c = 0
# Cell
def get_c(dls):
if getattr(dls, 'c', False): return dls.c
if getattr(getattr(dls.train, 'after_item', None), 'c', False): return dls.train.after_item.c
if getattr(getattr(dls.train, 'after_batch', None), 'c', False): return dls.train.after_batch.c
vocab = getattr(dls, 'vocab', [])
if len(vocab) > 0 and is_listy(vocab[-1]): vocab = vocab[-1]
return len(vocab)
# Cell
class ToTensor(Transform):
"Convert item to appropriate tensor class"
order = 5
# Cell
class IntToFloatTensor(DisplayedTransform):
"Transform image to float tensor, optionally dividing by 255 (e.g. for images)."
order = 10 #Need to run after PIL transforms on the GPU
def __init__(self, div=255., div_mask=1): store_attr()
def encodes(self, o:TensorImage): return o.float().div_(self.div)
def encodes(self, o:TensorMask ): return o.long() // self.div_mask
def decodes(self, o:TensorImage): return ((o.clamp(0., 1.) * self.div).long()) if self.div else o
# Cell
def broadcast_vec(dim, ndim, *t, cuda=True):
"Make a vector broadcastable over `dim` (out of `ndim` total) by prepending and appending unit axes"
v = [1]*ndim
v[dim] = -1
f = to_device if cuda else noop
return [f(tensor(o).view(*v)) for o in t]
# Cell
@docs
class Normalize(DisplayedTransform):
"Normalize/denorm batch of `TensorImage`"
parameters,order = L('mean', 'std'),99
def __init__(self, mean=None, std=None, axes=(0,2,3)): store_attr()
@classmethod
def from_stats(cls, mean, std, dim=1, ndim=4, cuda=True): return cls(*broadcast_vec(dim, ndim, mean, std, cuda=cuda))
def setups(self, dl:DataLoader):
if self.mean is None or self.std is None:
x,*_ = dl.one_batch()
self.mean,self.std = x.mean(self.axes, keepdim=True),x.std(self.axes, keepdim=True)+1e-7
def encodes(self, x:TensorImage): return (x-self.mean) / self.std
def decodes(self, x:TensorImage):
f = to_cpu if x.device.type=='cpu' else noop
return (x*f(self.std) + f(self.mean))
_docs=dict(encodes="Normalize batch", decodes="Denormalize batch") | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/data/transforms.py | transforms.py |
__all__ = ['TransformBlock', 'CategoryBlock', 'MultiCategoryBlock', 'RegressionBlock', 'DataBlock']
# Cell
from ..torch_basics import *
from .core import *
from .load import *
from .external import *
from .transforms import *
# Cell
class TransformBlock():
"A basic wrapper that links defaults transforms for the data block API"
def __init__(self, type_tfms=None, item_tfms=None, batch_tfms=None, dl_type=None, dls_kwargs=None):
self.type_tfms = L(type_tfms)
self.item_tfms = ToTensor + L(item_tfms)
self.batch_tfms = L(batch_tfms)
self.dl_type,self.dls_kwargs = dl_type,({} if dls_kwargs is None else dls_kwargs)
# Cell
def CategoryBlock(vocab=None, sort=True, add_na=False):
"`TransformBlock` for single-label categorical targets"
return TransformBlock(type_tfms=Categorize(vocab=vocab, sort=sort, add_na=add_na))
# Cell
def MultiCategoryBlock(encoded=False, vocab=None, add_na=False):
"`TransformBlock` for multi-label categorical targets"
tfm = EncodedMultiCategorize(vocab=vocab) if encoded else [MultiCategorize(vocab=vocab, add_na=add_na), OneHotEncode]
return TransformBlock(type_tfms=tfm)
# Cell
def RegressionBlock(n_out=None):
"`TransformBlock` for float targets"
return TransformBlock(type_tfms=RegressionSetup(c=n_out))
# Cell
from inspect import isfunction,ismethod
# Cell
def _merge_grouper(o):
if isinstance(o, LambdaType): return id(o)
elif isinstance(o, type): return o
elif (isfunction(o) or ismethod(o)): return o.__qualname__
return o.__class__
# Cell
def _merge_tfms(*tfms):
"Group the `tfms` in a single list, removing duplicates (from the same class) and instantiating"
g = groupby(concat(*tfms), _merge_grouper)
return L(v[-1] for k,v in g.items()).map(instantiate)
def _zip(x): return L(x).zip()
# Cell
@docs
@funcs_kwargs
class DataBlock():
"Generic container to quickly build `Datasets` and `DataLoaders`"
get_x=get_items=splitter=get_y = None
blocks,dl_type = (TransformBlock,TransformBlock),TfmdDL
_methods = 'get_items splitter get_y get_x'.split()
_msg = "If you wanted to compose several transforms in your getter don't forget to wrap them in a `Pipeline`."
def __init__(self, blocks=None, dl_type=None, getters=None, n_inp=None, item_tfms=None, batch_tfms=None, **kwargs):
blocks = L(self.blocks if blocks is None else blocks)
blocks = L(b() if callable(b) else b for b in blocks)
self.type_tfms = blocks.attrgot('type_tfms', L())
self.default_item_tfms = _merge_tfms(*blocks.attrgot('item_tfms', L()))
self.default_batch_tfms = _merge_tfms(*blocks.attrgot('batch_tfms', L()))
for b in blocks:
if getattr(b, 'dl_type', None) is not None: self.dl_type = b.dl_type
if dl_type is not None: self.dl_type = dl_type
self.dataloaders = delegates(self.dl_type.__init__)(self.dataloaders)
self.dls_kwargs = merge(*blocks.attrgot('dls_kwargs', {}))
self.n_inp = ifnone(n_inp, max(1, len(blocks)-1))
self.getters = ifnone(getters, [noop]*len(self.type_tfms))
if self.get_x:
if len(L(self.get_x)) != self.n_inp:
raise ValueError(f'get_x contains {len(L(self.get_x))} functions, but must contain {self.n_inp} (one for each input)\n{self._msg}')
self.getters[:self.n_inp] = L(self.get_x)
if self.get_y:
n_targs = len(self.getters) - self.n_inp
if len(L(self.get_y)) != n_targs:
raise ValueError(f'get_y contains {len(L(self.get_y))} functions, but must contain {n_targs} (one for each target)\n{self._msg}')
self.getters[self.n_inp:] = L(self.get_y)
if kwargs: raise TypeError(f'invalid keyword arguments: {", ".join(kwargs.keys())}')
self.new(item_tfms, batch_tfms)
def _combine_type_tfms(self): return L([self.getters, self.type_tfms]).map_zip(
lambda g,tt: (g.fs if isinstance(g, Pipeline) else L(g)) + tt)
def new(self, item_tfms=None, batch_tfms=None):
self.item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
self.batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
return self
@classmethod
def from_columns(cls, blocks=None, getters=None, get_items=None, **kwargs):
if getters is None: getters = L(ItemGetter(i) for i in range(2 if blocks is None else len(L(blocks))))
get_items = _zip if get_items is None else compose(get_items, _zip)
return cls(blocks=blocks, getters=getters, get_items=get_items, **kwargs)
def datasets(self, source, verbose=False):
self.source = source ; pv(f"Collecting items from {source}", verbose)
items = (self.get_items or noop)(source) ; pv(f"Found {len(items)} items", verbose)
splits = (self.splitter or RandomSplitter())(items)
pv(f"{len(splits)} datasets of sizes {','.join([str(len(s)) for s in splits])}", verbose)
return Datasets(items, tfms=self._combine_type_tfms(), splits=splits, dl_type=self.dl_type, n_inp=self.n_inp, verbose=verbose)
def dataloaders(self, source, path='.', verbose=False, **kwargs):
dsets = self.datasets(source, verbose=verbose)
kwargs = {**self.dls_kwargs, **kwargs, 'verbose': verbose}
return dsets.dataloaders(path=path, after_item=self.item_tfms, after_batch=self.batch_tfms, **kwargs)
_docs = dict(new="Create a new `DataBlock` with other `item_tfms` and `batch_tfms`",
datasets="Create a `Datasets` object from `source`",
dataloaders="Create a `DataLoaders` object from `source`")
# Cell
def _short_repr(x):
if isinstance(x, tuple): return f'({", ".join([_short_repr(y) for y in x])})'
if isinstance(x, list): return f'[{", ".join([_short_repr(y) for y in x])}]'
if not isinstance(x, Tensor): return str(x)
if x.numel() <= 20 and x.ndim <=1: return str(x)
return f'{x.__class__.__name__} of size {"x".join([str(d) for d in x.shape])}'
# Cell
def _apply_pipeline(p, x):
print(f" {p}\n starting from\n {_short_repr(x)}")
for f in p.fs:
name = f.name
try:
x = f(x)
if name != "noop": print(f" applying {name} gives\n {_short_repr(x)}")
except Exception as e:
print(f" applying {name} failed.")
raise e
return x
# Cell
from .load import _collate_types
def _find_fail_collate(s):
s = L(*s)
for x in s[0]:
if not isinstance(x, _collate_types): return f"{type(x).__name__} is not collatable"
for i in range_of(s[0]):
try: _ = default_collate(s.itemgot(i))
except:
shapes = [getattr(o[i], 'shape', None) for o in s]
return f"Could not collate the {i}-th members of your tuples because got the following shapes\n{','.join([str(s) for s in shapes])}"
# Cell
@patch
def summary(self: DataBlock, source, bs=4, show_batch=False, **kwargs):
"Steps through the transform pipeline for one batch, and optionally calls `show_batch(**kwargs)` on the transient `Dataloaders`."
print(f"Setting-up type transforms pipelines")
dsets = self.datasets(source, verbose=True)
print("\nBuilding one sample")
for tl in dsets.train.tls:
_apply_pipeline(tl.tfms, get_first(dsets.train.items))
print(f"\nFinal sample: {dsets.train[0]}\n\n")
dls = self.dataloaders(source, bs=bs, verbose=True)
print("\nBuilding one batch")
if len([f for f in dls.train.after_item.fs if f.name != 'noop'])!=0:
print("Applying item_tfms to the first sample:")
s = [_apply_pipeline(dls.train.after_item, dsets.train[0])]
print(f"\nAdding the next {bs-1} samples")
s += [dls.train.after_item(dsets.train[i]) for i in range(1, bs)]
else:
print("No item_tfms to apply")
s = [dls.train.after_item(dsets.train[i]) for i in range(bs)]
if len([f for f in dls.train.before_batch.fs if f.name != 'noop'])!=0:
print("\nApplying before_batch to the list of samples")
s = _apply_pipeline(dls.train.before_batch, s)
else: print("\nNo before_batch transform to apply")
print("\nCollating items in a batch")
try:
b = dls.train.create_batch(s)
b = retain_types(b, s[0] if is_listy(s) else s)
except Exception as e:
print("Error! It's not possible to collate your items in a batch")
why = _find_fail_collate(s)
print("Make sure all parts of your samples are tensors of the same size" if why is None else why)
raise e
if len([f for f in dls.train.after_batch.fs if f.name != 'noop'])!=0:
print("\nApplying batch_tfms to the batch built")
b = to_device(b, dls.device)
b = _apply_pipeline(dls.train.after_batch, b)
else: print("\nNo batch_tfms to apply")
if show_batch: dls.show_batch(**kwargs) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/data/block.py | block.py |
__all__ = ['fa_collate', 'fa_convert', 'SkipItemException', 'DataLoader']
# Cell
from ..torch_basics import *
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter,_DatasetKind
_loaders = (_MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter)
# Cell
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
# Cell
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
# Cell
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
# Cell
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
# Cell
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
# Cell
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.") | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/data/load.py | load.py |
__all__ = ['make_date', 'add_datepart', 'add_elapsed_times', 'cont_cat_split', 'df_shrink_dtypes', 'df_shrink',
'Tabular', 'TabularPandas', 'TabularProc', 'Categorify', 'FillStrategy', 'FillMissing', 'ReadTabBatch',
'TabDataLoader']
# Cell
from ..torch_basics import *
from ..data.all import *
# Cell
pd.set_option('mode.chained_assignment','raise')
# Cell
def make_date(df, date_field):
"Make sure `df[date_field]` is of the right date type."
field_dtype = df[date_field].dtype
if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
field_dtype = np.datetime64
if not np.issubdtype(field_dtype, np.datetime64):
df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)
# Cell
def add_datepart(df, field_name, prefix=None, drop=True, time=False):
"Helper function that adds columns relevant to a date in the column `field_name` of `df`."
make_date(df, field_name)
field = df[field_name]
prefix = ifnone(prefix, re.sub('[Dd]ate$', '', field_name))
attr = ['Year', 'Month', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start',
'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[prefix + n] = getattr(field.dt, n.lower())
# Pandas removed `dt.week` in v1.1.10
week = field.dt.isocalendar().week if hasattr(field.dt, 'isocalendar') else field.dt.week
df.insert(3, prefix+'Week', week)
mask = ~field.isna()
df[prefix + 'Elapsed'] = np.where(mask,field.values.astype(np.int64) // 10 ** 9,None)
if drop: df.drop(field_name, axis=1, inplace=True)
return df
# Cell
def _get_elapsed(df,field_names, date_field, base_field, prefix):
for f in field_names:
day1 = np.timedelta64(1, 'D')
last_date,last_base,res = np.datetime64(),None,[]
for b,v,d in zip(df[base_field].values, df[f].values, df[date_field].values):
if last_base is None or b != last_base:
last_date,last_base = np.datetime64(),b
if v: last_date = d
res.append(((d-last_date).astype('timedelta64[D]') / day1))
df[prefix + f] = res
return df
# Cell
def add_elapsed_times(df, field_names, date_field, base_field):
"Add in `df` for each event in `field_names` the elapsed time according to `date_field` grouped by `base_field`"
field_names = list(L(field_names))
#Make sure date_field is a date and base_field a bool
df[field_names] = df[field_names].astype('bool')
make_date(df, date_field)
work_df = df[field_names + [date_field, base_field]]
work_df = work_df.sort_values([base_field, date_field])
work_df = _get_elapsed(work_df, field_names, date_field, base_field, 'After')
work_df = work_df.sort_values([base_field, date_field], ascending=[True, False])
work_df = _get_elapsed(work_df, field_names, date_field, base_field, 'Before')
for a in ['After' + f for f in field_names] + ['Before' + f for f in field_names]:
work_df[a] = work_df[a].fillna(0).astype(int)
for a,s in zip([True, False], ['_bw', '_fw']):
work_df = work_df.set_index(date_field)
tmp = (work_df[[base_field] + field_names].sort_index(ascending=a)
.groupby(base_field).rolling(7, min_periods=1).sum())
tmp.drop(base_field,1,inplace=True)
tmp.reset_index(inplace=True)
work_df.reset_index(inplace=True)
work_df = work_df.merge(tmp, 'left', [date_field, base_field], suffixes=['', s])
work_df.drop(field_names,1,inplace=True)
return df.merge(work_df, 'left', [date_field, base_field])
# Cell
def cont_cat_split(df, max_card=20, dep_var=None):
"Helper function that returns column names of cont and cat variables from given `df`."
cont_names, cat_names = [], []
for label in df:
if label in L(dep_var): continue
if (np.issubdtype(df[label].dtype, np.integer) and
df[label].unique().shape[0] > max_card or
np.issubdtype(df[label].dtype, np.floating)):
cont_names.append(label)
else: cat_names.append(label)
return cont_names, cat_names
# Cell
def df_shrink_dtypes(df, skip=[], obj2cat=True, int2uint=False):
"Return any possible smaller data types for DataFrame columns. Allows `object`->`category`, `int`->`uint`, and exclusion."
# 1: Build column filter and typemap
excl_types, skip = {'category','datetime64[ns]','bool'}, set(skip)
typemap = {'int' : [(np.dtype(x), np.iinfo(x).min, np.iinfo(x).max) for x in (np.int8, np.int16, np.int32, np.int64)],
'uint' : [(np.dtype(x), np.iinfo(x).min, np.iinfo(x).max) for x in (np.uint8, np.uint16, np.uint32, np.uint64)],
'float' : [(np.dtype(x), np.finfo(x).min, np.finfo(x).max) for x in (np.float32, np.float64, np.longdouble)]
}
if obj2cat: typemap['object'] = 'category' # User wants to categorify dtype('Object'), which may not always save space
else: excl_types.add('object')
new_dtypes = {}
exclude = lambda dt: dt[1].name not in excl_types and dt[0] not in skip
for c, old_t in filter(exclude, df.dtypes.items()):
t = next((v for k,v in typemap.items() if old_t.name.startswith(k)), None)
if isinstance(t, list): # Find the smallest type that fits
if int2uint and t==typemap['int'] and df[c].min() >= 0: t=typemap['uint']
new_t = next((r[0] for r in t if r[1]<=df[c].min() and r[2]>=df[c].max()), None)
if new_t and new_t == old_t: new_t = None
else: new_t = t if isinstance(t, str) else None
if new_t: new_dtypes[c] = new_t
return new_dtypes
# Cell
def df_shrink(df, skip=[], obj2cat=True, int2uint=False):
"Reduce DataFrame memory usage, by casting to smaller types returned by `df_shrink_dtypes()`."
dt = df_shrink_dtypes(df, skip, obj2cat=obj2cat, int2uint=int2uint)
return df.astype(dt)
# Cell
class _TabIloc:
"Get/set rows by iloc and cols by name"
def __init__(self,to): self.to = to
def __getitem__(self, idxs):
df = self.to.items
if isinstance(idxs,tuple):
rows,cols = idxs
cols = df.columns.isin(cols) if is_listy(cols) else df.columns.get_loc(cols)
else: rows,cols = idxs,slice(None)
return self.to.new(df.iloc[rows, cols])
# Cell
class Tabular(CollBase, GetAttr, FilteredBase):
"A `DataFrame` wrapper that knows which cols are cont/cat/y, and returns rows in `__getitem__`"
_default,with_cont='procs',True
def __init__(self, df, procs=None, cat_names=None, cont_names=None, y_names=None, y_block=None, splits=None,
do_setup=True, device=None, inplace=False, reduce_memory=True):
if inplace and splits is not None and pd.options.mode.chained_assignment is not None:
warn("Using inplace with splits will trigger a pandas error. Set `pd.options.mode.chained_assignment=None` to avoid it.")
if not inplace: df = df.copy()
if reduce_memory: df = df_shrink(df)
if splits is not None: df = df.iloc[sum(splits, [])]
self.dataloaders = delegates(self._dl_type.__init__)(self.dataloaders)
super().__init__(df)
self.y_names,self.device = L(y_names),device
if y_block is None and self.y_names:
# Make ys categorical if they're not numeric
ys = df[self.y_names]
if len(ys.select_dtypes(include='number').columns)!=len(ys.columns): y_block = CategoryBlock()
else: y_block = RegressionBlock()
if y_block is not None and do_setup:
if callable(y_block): y_block = y_block()
procs = L(procs) + y_block.type_tfms
self.cat_names,self.cont_names,self.procs = L(cat_names),L(cont_names),Pipeline(procs)
self.split = len(df) if splits is None else len(splits[0])
if do_setup: self.setup()
def new(self, df):
return type(self)(df, do_setup=False, reduce_memory=False, y_block=TransformBlock(),
**attrdict(self, 'procs','cat_names','cont_names','y_names', 'device'))
def subset(self, i): return self.new(self.items[slice(0,self.split) if i==0 else slice(self.split,len(self))])
def copy(self): self.items = self.items.copy(); return self
def decode(self): return self.procs.decode(self)
def decode_row(self, row): return self.new(pd.DataFrame(row).T).decode().items.iloc[0]
def show(self, max_n=10, **kwargs): display_df(self.new(self.all_cols[:max_n]).decode().items)
def setup(self): self.procs.setup(self)
def process(self): self.procs(self)
def loc(self): return self.items.loc
def iloc(self): return _TabIloc(self)
def targ(self): return self.items[self.y_names]
def x_names (self): return self.cat_names + self.cont_names
def n_subsets(self): return 2
def y(self): return self[self.y_names[0]]
def new_empty(self): return self.new(pd.DataFrame({}, columns=self.items.columns))
def to_device(self, d=None):
self.device = d
return self
def all_col_names (self):
ys = [n for n in self.y_names if n in self.items.columns]
return self.x_names + self.y_names if len(ys) == len(self.y_names) else self.x_names
properties(Tabular,'loc','iloc','targ','all_col_names','n_subsets','x_names','y')
# Cell
class TabularPandas(Tabular):
"A `Tabular` object with transforms"
def transform(self, cols, f, all_col=True):
if not all_col: cols = [c for c in cols if c in self.items.columns]
if len(cols) > 0: self[cols] = self[cols].transform(f)
# Cell
def _add_prop(cls, nm):
@property
def f(o): return o[list(getattr(o,nm+'_names'))]
@f.setter
def fset(o, v): o[getattr(o,nm+'_names')] = v
setattr(cls, nm+'s', f)
setattr(cls, nm+'s', fset)
_add_prop(Tabular, 'cat')
_add_prop(Tabular, 'cont')
_add_prop(Tabular, 'y')
_add_prop(Tabular, 'x')
_add_prop(Tabular, 'all_col')
# Cell
class TabularProc(InplaceTransform):
"Base class to write a non-lazy tabular processor for dataframes"
def setup(self, items=None, train_setup=False): #TODO: properly deal with train_setup
super().setup(getattr(items,'train',items), train_setup=False)
# Procs are called as soon as data is available
return self(items.items if isinstance(items,Datasets) else items)
@property
def name(self): return f"{super().name} -- {getattr(self,'__stored_args__',{})}"
# Cell
def _apply_cats (voc, add, c):
if not is_categorical_dtype(c):
return pd.Categorical(c, categories=voc[c.name][add:]).codes+add
return c.cat.codes+add #if is_categorical_dtype(c) else c.map(voc[c.name].o2i)
def _decode_cats(voc, c): return c.map(dict(enumerate(voc[c.name].items)))
# Cell
class Categorify(TabularProc):
"Transform the categorical variables to something similar to `pd.Categorical`"
order = 1
def setups(self, to):
store_attr(classes={n:CategoryMap(to.iloc[:,n].items, add_na=(n in to.cat_names)) for n in to.cat_names}, but='to')
def encodes(self, to): to.transform(to.cat_names, partial(_apply_cats, self.classes, 1))
def decodes(self, to): to.transform(to.cat_names, partial(_decode_cats, self.classes))
def __getitem__(self,k): return self.classes[k]
# Internal Cell
@Categorize
def setups(self, to:Tabular):
if len(to.y_names) > 0:
if self.vocab is None:
self.vocab = CategoryMap(getattr(to, 'train', to).iloc[:,to.y_names[0]].items, strict=True)
else:
self.vocab = CategoryMap(self.vocab, sort=False, add_na=self.add_na)
self.c = len(self.vocab)
return self(to)
@Categorize
def encodes(self, to:Tabular):
to.transform(to.y_names, partial(_apply_cats, {n: self.vocab for n in to.y_names}, 0), all_col=False)
return to
@Categorize
def decodes(self, to:Tabular):
to.transform(to.y_names, partial(_decode_cats, {n: self.vocab for n in to.y_names}), all_col=False)
return to
# Internal Cell
@Normalize
def setups(self, to:Tabular):
store_attr(but='to', means=dict(getattr(to, 'train', to).conts.mean()),
stds=dict(getattr(to, 'train', to).conts.std(ddof=0)+1e-7))
return self(to)
@Normalize
def encodes(self, to:Tabular):
to.conts = (to.conts-self.means) / self.stds
return to
@Normalize
def decodes(self, to:Tabular):
to.conts = (to.conts*self.stds ) + self.means
return to
# Cell
class FillStrategy:
"Namespace containing the various filling strategies."
def median (c,fill): return c.median()
def constant(c,fill): return fill
def mode (c,fill): return c.dropna().value_counts().idxmax()
# Cell
class FillMissing(TabularProc):
"Fill the missing values in continuous columns."
def __init__(self, fill_strategy=FillStrategy.median, add_col=True, fill_vals=None):
if fill_vals is None: fill_vals = defaultdict(int)
store_attr()
def setups(self, dsets):
missing = pd.isnull(dsets.conts).any()
store_attr(but='to', na_dict={n:self.fill_strategy(dsets[n], self.fill_vals[n])
for n in missing[missing].keys()})
self.fill_strategy = self.fill_strategy.__name__
def encodes(self, to):
missing = pd.isnull(to.conts)
for n in missing.any()[missing.any()].keys():
assert n in self.na_dict, f"nan values in `{n}` but not in setup training set"
for n in self.na_dict.keys():
to[n].fillna(self.na_dict[n], inplace=True)
if self.add_col:
to.loc[:,n+'_na'] = missing[n]
if n+'_na' not in to.cat_names: to.cat_names.append(n+'_na')
# Cell
def _maybe_expand(o): return o[:,None] if o.ndim==1 else o
# Cell
class ReadTabBatch(ItemTransform):
"Transform `TabularPandas` values into a `Tensor` with the ability to decode"
def __init__(self, to): self.to = to.new_empty()
def encodes(self, to):
if not to.with_cont: res = (tensor(to.cats).long(),)
else: res = (tensor(to.cats).long(),tensor(to.conts).float())
ys = [n for n in to.y_names if n in to.items.columns]
if len(ys) == len(to.y_names): res = res + (tensor(to.targ),)
if to.device is not None: res = to_device(res, to.device)
return res
def decodes(self, o):
o = [_maybe_expand(o_) for o_ in to_np(o) if o_.size != 0]
vals = np.concatenate(o, axis=1)
try: df = pd.DataFrame(vals, columns=self.to.all_col_names)
except: df = pd.DataFrame(vals, columns=self.to.x_names)
to = self.to.new(df)
return to
# Cell
@typedispatch
def show_batch(x: Tabular, y, its, max_n=10, ctxs=None):
x.show()
# Cell
@delegates()
class TabDataLoader(TfmdDL):
"A transformed `DataLoader` for Tabular data"
do_item = noops
def __init__(self, dataset, bs=16, shuffle=False, after_batch=None, num_workers=0, **kwargs):
if after_batch is None: after_batch = L(TransformBlock().batch_tfms)+ReadTabBatch(dataset)
super().__init__(dataset, bs=bs, shuffle=shuffle, after_batch=after_batch, num_workers=num_workers, **kwargs)
def create_batch(self, b): return self.dataset.iloc[b]
TabularPandas._dl_type = TabDataLoader
# Internal Cell
@EncodedMultiCategorize
def setups(self, to:Tabular):
self.c = len(self.vocab)
return self(to)
@EncodedMultiCategorize
def encodes(self, to:Tabular): return to
@EncodedMultiCategorize
def decodes(self, to:Tabular):
to.transform(to.y_names, lambda c: c==1)
return to
# Internal Cell
@RegressionSetup
def setups(self, to:Tabular):
if self.c is not None: return
self.c = len(to.y_names)
return to
@RegressionSetup
def encodes(self, to:Tabular): return to
@RegressionSetup
def decodes(self, to:Tabular): return to | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/tabular/core.py | core.py |
__all__ = ['emb_sz_rule', 'get_emb_sz', 'TabularModel', 'tabular_config']
# Cell
from ..torch_basics import *
from .core import *
# Cell
def emb_sz_rule(n_cat):
"Rule of thumb to pick embedding size corresponding to `n_cat`"
return min(600, round(1.6 * n_cat**0.56))
# Cell
def _one_emb_sz(classes, n, sz_dict=None):
"Pick an embedding size for `n` depending on `classes` if not given in `sz_dict`."
sz_dict = ifnone(sz_dict, {})
n_cat = len(classes[n])
sz = sz_dict.get(n, int(emb_sz_rule(n_cat))) # rule of thumb
return n_cat,sz
# Cell
def get_emb_sz(to, sz_dict=None):
"Get default embedding size from `TabularPreprocessor` `proc` or the ones in `sz_dict`"
return [_one_emb_sz(to.classes, n, sz_dict) for n in to.cat_names]
# Cell
class TabularModel(Module):
"Basic model for tabular data."
def __init__(self, emb_szs, n_cont, out_sz, layers, ps=None, embed_p=0.,
y_range=None, use_bn=True, bn_final=False, bn_cont=True, act_cls=nn.ReLU(inplace=True)):
ps = ifnone(ps, [0]*len(layers))
if not is_listy(ps): ps = [ps]*len(layers)
self.embeds = nn.ModuleList([Embedding(ni, nf) for ni,nf in emb_szs])
self.emb_drop = nn.Dropout(embed_p)
self.bn_cont = nn.BatchNorm1d(n_cont) if bn_cont else None
n_emb = sum(e.embedding_dim for e in self.embeds)
self.n_emb,self.n_cont = n_emb,n_cont
sizes = [n_emb + n_cont] + layers + [out_sz]
actns = [act_cls for _ in range(len(sizes)-2)] + [None]
_layers = [LinBnDrop(sizes[i], sizes[i+1], bn=use_bn and (i!=len(actns)-1 or bn_final), p=p, act=a)
for i,(p,a) in enumerate(zip(ps+[0.],actns))]
if y_range is not None: _layers.append(SigmoidRange(*y_range))
self.layers = nn.Sequential(*_layers)
def forward(self, x_cat, x_cont=None):
if self.n_emb != 0:
x = [e(x_cat[:,i]) for i,e in enumerate(self.embeds)]
x = torch.cat(x, 1)
x = self.emb_drop(x)
if self.n_cont != 0:
if self.bn_cont is not None: x_cont = self.bn_cont(x_cont)
x = torch.cat([x, x_cont], 1) if self.n_emb != 0 else x_cont
return self.layers(x)
# Cell
@delegates(TabularModel.__init__)
def tabular_config(**kwargs):
"Convenience function to easily create a config for `TabularModel`"
return kwargs | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/tabular/model.py | model.py |
__all__ = ['reverse_text', 'make_vocab', 'TensorText', 'LMTensorText', 'Numericalize', 'LMDataLoader', 'Pad_Input',
'pad_input', 'pad_chunk', 'pad_input_chunk', 'Pad_Chunk', 'SortedDL', 'TextBlock', 'TextDataLoaders']
# Cell
from ..torch_basics import *
from ..data.all import *
from .core import *
# Cell
def reverse_text(x): return x.flip(0)
# Cell
def make_vocab(count, min_freq=3, max_vocab=60000, special_toks=None):
"Create a vocab of `max_vocab` size from `Counter` `count` with items present more than `min_freq`"
vocab = [o for o,c in count.most_common(max_vocab) if c >= min_freq]
special_toks = ifnone(special_toks, defaults.text_spec_tok)
for o in reversed(special_toks): #Make sure all special tokens are in the vocab
if o in vocab: vocab.remove(o)
vocab.insert(0, o)
vocab = vocab[:max_vocab]
return vocab + [f'xxfake' for i in range(0, 8-len(vocab)%8)]
# Cell
class TensorText(TensorBase): pass
class LMTensorText(TensorText): pass
TensorText.__doc__ = "Semantic type for a tensor representing text"
LMTensorText.__doc__ = "Semantic type for a tensor representing text in language modeling"
# Cell
class Numericalize(Transform):
"Reversible transform of tokenized texts to numericalized ids"
def __init__(self, vocab=None, min_freq=3, max_vocab=60000, special_toks=None):
store_attr('vocab,min_freq,max_vocab,special_toks')
self.o2i = None if vocab is None else defaultdict(int, {v:k for k,v in enumerate(vocab)})
def setups(self, dsets):
if dsets is None: return
if self.vocab is None:
count = dsets.counter if getattr(dsets, 'counter', None) is not None else Counter(p for o in dsets for p in o)
if self.special_toks is None and hasattr(dsets, 'special_toks'):
self.special_toks = dsets.special_toks
self.vocab = make_vocab(count, min_freq=self.min_freq, max_vocab=self.max_vocab, special_toks=self.special_toks)
self.o2i = defaultdict(int, {v:k for k,v in enumerate(self.vocab) if v != 'xxfake'})
def encodes(self, o): return TensorText(tensor([self.o2i [o_] for o_ in o]))
def decodes(self, o): return L(self.vocab[o_] for o_ in o)
# Cell
def _maybe_first(o): return o[0] if isinstance(o, tuple) else o
# Cell
def _get_tokenizer(ds):
tok = getattr(ds, 'tokenizer', None)
if isinstance(tok, Tokenizer): return tok
if isinstance(tok, (list,L)):
for t in tok:
if isinstance(t, Tokenizer): return t
# Cell
def _get_lengths(ds):
tok = _get_tokenizer(ds)
if tok is None: return
return tok.get_lengths(ds.items)
# Cell
#TODO: add backward
@delegates()
class LMDataLoader(TfmdDL):
"A `DataLoader` suitable for language modeling"
def __init__(self, dataset, lens=None, cache=2, bs=64, seq_len=72, num_workers=0, **kwargs):
self.items = ReindexCollection(dataset, cache=cache, tfm=_maybe_first)
self.seq_len = seq_len
if lens is None: lens = _get_lengths(dataset)
if lens is None: lens = [len(o) for o in self.items]
self.lens = ReindexCollection(lens, idxs=self.items.idxs)
# The "-1" is to allow for final label, we throw away the end that's less than bs
corpus = round_multiple(sum(lens)-1, bs, round_down=True)
self.bl = corpus//bs #bl stands for batch length
self.n_batches = self.bl//(seq_len) + int(self.bl%seq_len!=0)
self.last_len = self.bl - (self.n_batches-1)*seq_len
self.make_chunks()
super().__init__(dataset=dataset, bs=bs, num_workers=num_workers, **kwargs)
self.n = self.n_batches*bs
def make_chunks(self): self.chunks = Chunks(self.items, self.lens)
def shuffle_fn(self,idxs):
self.items.shuffle()
self.make_chunks()
return idxs
def create_item(self, seq):
if seq>=self.n: raise IndexError
sl = self.last_len if seq//self.bs==self.n_batches-1 else self.seq_len
st = (seq%self.bs)*self.bl + (seq//self.bs)*self.seq_len
txt = self.chunks[st : st+sl+1]
return LMTensorText(txt[:-1]),txt[1:]
@delegates(TfmdDL.new)
def new(self, dataset=None, seq_len=None, **kwargs):
lens = self.lens.coll if dataset is None else None
seq_len = self.seq_len if seq_len is None else seq_len
return super().new(dataset=dataset, lens=lens, seq_len=seq_len, **kwargs)
# Cell
@typedispatch
def show_batch(x: TensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
if trunc_at is not None: samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
ctxs = show_batch[object](x, y, samples, max_n=max_n, ctxs=ctxs, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def show_batch(x: LMTensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
samples = L((s[0].truncate(trunc_at), s[1].truncate(trunc_at)) for s in samples)
return show_batch[TensorText](x, None, samples, ctxs=ctxs, max_n=max_n, trunc_at=None, **kwargs)
# Cell
class Pad_Input(ItemTransform):
def encodes(self,samples, pad_idx=1, pad_fields=0, pad_first=False, backwards=False):
"Function that collect `samples` and adds padding"
self.pad_idx = pad_idx
pad_fields = L(pad_fields)
max_len_l = pad_fields.map(lambda f: max([len(s[f]) for s in samples]))
if backwards: pad_first = not pad_first
def _f(field_idx, x):
if field_idx not in pad_fields: return x
idx = pad_fields.items.index(field_idx) #TODO: remove items if L.index is fixed
sl = slice(-len(x), sys.maxsize) if pad_first else slice(0, len(x))
pad = x.new_zeros(max_len_l[idx]-x.shape[0])+pad_idx
x1 = torch.cat([pad, x] if pad_first else [x, pad])
if backwards: x1 = x1.flip(0)
return retain_type(x1, x)
return [tuple(map(lambda idxx: _f(*idxx), enumerate(s))) for s in samples]
def decodes(self, o:TensorText):
pad_idx = self.pad_idx if hasattr(self,'pad_idx') else 1
return o[o != pad_idx]
pad_input=Pad_Input()
# Cell
def pad_chunk(x,pad_idx=1, pad_first=True, seq_len=72, pad_len=10):
"Pad `x` by adding padding by chunks of size `seq_len`"
l = pad_len - x.shape[0]
pad_chunk = x.new_zeros((l//seq_len) * seq_len) + pad_idx
pad_res = x.new_zeros(l % seq_len) + pad_idx
x1 = torch.cat([pad_chunk, x, pad_res]) if pad_first else torch.cat([x, pad_chunk, pad_res])
return retain_type(x1, x)
# Cell
@delegates(pad_chunk)
def pad_input_chunk(samples, n_inp=1,**kwargs):
"Pad `samples` by adding padding by chunks of size `seq_len`"
max_len = max([len(s[n]) for s in samples for n in range(n_inp)])
padeds = [[pad_chunk(s[n],pad_len=max_len,**kwargs) for n in range(n_inp) ] for s in samples]
return [(*p, *s[n_inp:]) for p,s in zip(padeds,samples)]
# Cell
class Pad_Chunk(DisplayedTransform):
"Pad `samples` by adding padding by chunks of size `seq_len`"
def __init__(self, pad_idx=1, pad_first=True, seq_len=72,decode=True,**kwargs):
store_attr('pad_idx, pad_first, seq_len,seq_len')
super().__init__(**kwargs)
def before_call(self, b):
"Set `self.max_len` before encodes"
self.max_len = max([x.shape[0] for xs in b for x in xs if isinstance(x,TensorText)])
def __call__(self, b, **kwargs):
self.before_call(b)
return super().__call__(tuple(b), **kwargs)
def encodes(self, x:TensorText):
return pad_chunk(x,pad_idx=self.pad_idx, pad_first=self.pad_first, seq_len=self.seq_len, pad_len=self.max_len)
def decodes(self, o:TensorText):
return o[o != self.pad_idx] if self.decode else o
# Cell
def _default_sort(x): return len(x[0])
@delegates(TfmdDL)
class SortedDL(TfmdDL):
"A `DataLoader` that goes throught the item in the order given by `sort_func`"
def __init__(self, dataset, sort_func=None, res=None, **kwargs):
super().__init__(dataset, **kwargs)
self.sort_func = _default_sort if sort_func is None else sort_func
if res is None and self.sort_func == _default_sort: res = _get_lengths(dataset)
self.res = [self.sort_func(self.do_item(i)) for i in range_of(self.dataset)] if res is None else res
if len(self.res) > 0: self.idx_max = np.argmax(self.res)
def get_idxs(self):
idxs = super().get_idxs()
if self.shuffle: return idxs
return sorted(idxs, key=lambda i: self.res[i], reverse=True)
def shuffle_fn(self,idxs):
idxs = np.random.permutation(len(self.dataset))
idx_max = np.where(idxs==self.idx_max)[0][0]
idxs[0],idxs[idx_max] = idxs[idx_max],idxs[0]
sz = self.bs*50
chunks = [idxs[i:i+sz] for i in range(0, len(idxs), sz)]
chunks = [sorted(s, key=lambda i: self.res[i], reverse=True) for s in chunks]
sort_idx = np.concatenate(chunks)
sz = self.bs
batches = [sort_idx[i:i+sz] for i in range(0, len(sort_idx), sz)]
sort_idx = np.concatenate(np.random.permutation(batches[1:-1])) if len(batches) > 2 else np.array([],dtype=np.int)
sort_idx = np.concatenate((batches[0], sort_idx) if len(batches)==1 else (batches[0], sort_idx, batches[-1]))
return iter(sort_idx)
@delegates(TfmdDL.new)
def new(self, dataset=None, **kwargs):
if 'val_res' in kwargs and kwargs['val_res'] is not None: res = kwargs['val_res']
else: res = self.res if dataset is None else None
return super().new(dataset=dataset, res=res, **kwargs)
# Cell
class TextBlock(TransformBlock):
"A `TransformBlock` for texts"
@delegates(Numericalize.__init__)
def __init__(self, tok_tfm, vocab=None, is_lm=False, seq_len=72, backwards=False, **kwargs):
type_tfms = [tok_tfm, Numericalize(vocab, **kwargs)]
if backwards: type_tfms += [reverse_text]
return super().__init__(type_tfms=type_tfms,
dl_type=LMDataLoader if is_lm else SortedDL,
dls_kwargs={'seq_len': seq_len} if is_lm else {'before_batch': Pad_Chunk(seq_len=seq_len)})
@classmethod
@delegates(Tokenizer.from_df, keep=True)
def from_df(cls, text_cols, vocab=None, is_lm=False, seq_len=72, backwards=False, min_freq=3, max_vocab=60000, **kwargs):
"Build a `TextBlock` from a dataframe using `text_cols`"
return cls(Tokenizer.from_df(text_cols, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
backwards=backwards, min_freq=min_freq, max_vocab=max_vocab)
@classmethod
@delegates(Tokenizer.from_folder, keep=True)
def from_folder(cls, path, vocab=None, is_lm=False, seq_len=72, backwards=False, min_freq=3, max_vocab=60000, **kwargs):
"Build a `TextBlock` from a `path`"
return cls(Tokenizer.from_folder(path, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
backwards=backwards, min_freq=min_freq, max_vocab=max_vocab)
# Cell
class TextDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for NLP problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, text_vocab=None, is_lm=False,
tok_tfm=None, seq_len=72, backwards=False, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
blocks = [TextBlock.from_folder(path, text_vocab, is_lm, seq_len, backwards) if tok_tfm is None else TextBlock(tok_tfm, text_vocab, is_lm, seq_len, backwards)]
if not is_lm: blocks.append(CategoryBlock(vocab=vocab))
get_items = partial(get_text_files, folders=[train,valid]) if valid_pct is None else get_text_files
dblock = DataBlock(blocks=blocks,
get_items=get_items,
splitter=splitter,
get_y=None if is_lm else parent_label)
return cls.from_dblock(dblock, path, path=path, seq_len=seq_len, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, text_col=0, label_col=1, label_delim=None, y_block=None,
text_vocab=None, is_lm=False, valid_col=None, tok_tfm=None, seq_len=72, backwards=False, **kwargs):
"Create from `df` in `path` with `valid_pct`"
blocks = [TextBlock.from_df(text_col, text_vocab, is_lm, seq_len, backwards) if tok_tfm is None else TextBlock(tok_tfm, text_vocab, is_lm, seq_len, backwards)]
if y_block is None and not is_lm:
blocks.append(MultiCategoryBlock if is_listy(label_col) and len(label_col) > 1 else CategoryBlock)
if y_block is not None and not is_lm: blocks += (y_block if is_listy(y_block) else [y_block])
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=blocks,
get_x=ColReader("text"),
get_y=None if is_lm else ColReader(label_col, label_delim=label_delim),
splitter=splitter)
return cls.from_dblock(dblock, df, path=path, seq_len=seq_len, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `csv` file in `path/csv_fname`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
TextDataLoaders.from_csv = delegates(to=TextDataLoaders.from_df)(TextDataLoaders.from_csv) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/text/data.py | data.py |
__all__ = ['match_embeds', 'load_ignore_keys', 'clean_raw_keys', 'load_model_text', 'TextLearner', 'decode_spec_tokens',
'LMLearner', 'language_model_learner', 'text_classifier_learner']
# Cell
from ..basics import *
from .core import *
from .data import *
from .models.core import *
from .models.awdlstm import *
from ..callback.rnn import *
from ..callback.progress import *
# Cell
def match_embeds(old_wgts, old_vocab, new_vocab):
"Convert the embedding in `old_wgts` to go from `old_vocab` to `new_vocab`."
bias, wgts = old_wgts.get('1.decoder.bias', None), old_wgts['0.encoder.weight']
wgts_m = wgts.mean(0)
new_wgts = wgts.new_zeros((len(new_vocab),wgts.size(1)))
if bias is not None:
bias_m = bias.mean(0)
new_bias = bias.new_zeros((len(new_vocab),))
old_o2i = old_vocab.o2i if hasattr(old_vocab, 'o2i') else {w:i for i,w in enumerate(old_vocab)}
for i,w in enumerate(new_vocab):
idx = old_o2i.get(w, -1)
new_wgts[i] = wgts[idx] if idx>=0 else wgts_m
if bias is not None: new_bias[i] = bias[idx] if idx>=0 else bias_m
old_wgts['0.encoder.weight'] = new_wgts
if '0.encoder_dp.emb.weight' in old_wgts: old_wgts['0.encoder_dp.emb.weight'] = new_wgts.clone()
old_wgts['1.decoder.weight'] = new_wgts.clone()
if bias is not None: old_wgts['1.decoder.bias'] = new_bias
return old_wgts
# Cell
def _get_text_vocab(dls):
vocab = dls.vocab
if isinstance(vocab, L): vocab = vocab[0]
return vocab
# Cell
def load_ignore_keys(model, wgts):
"Load `wgts` in `model` ignoring the names of the keys, just taking parameters in order"
sd = model.state_dict()
for k1,k2 in zip(sd.keys(), wgts.keys()): sd[k1].data = wgts[k2].data.clone()
return model.load_state_dict(sd)
# Cell
def _rm_module(n):
t = n.split('.')
for i in range(len(t)-1, -1, -1):
if t[i] == 'module':
t.pop(i)
break
return '.'.join(t)
# Cell
#For previous versions compatibility, remove for release
def clean_raw_keys(wgts):
keys = list(wgts.keys())
for k in keys:
t = k.split('.module')
if f'{_rm_module(k)}_raw' in keys: del wgts[k]
return wgts
# Cell
#For previous versions compatibility, remove for release
def load_model_text(file, model, opt, with_opt=None, device=None, strict=True):
"Load `model` from `file` along with `opt` (if available, and if `with_opt`)"
distrib_barrier()
if isinstance(device, int): device = torch.device('cuda', device)
elif device is None: device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = set(state)=={'model', 'opt'}
model_state = state['model'] if hasopt else state
get_model(model).load_state_dict(clean_raw_keys(model_state), strict=strict)
if hasopt and ifnone(with_opt,True):
try: opt.load_state_dict(state['opt'])
except:
if with_opt: warn("Could not load the optimizer state.")
elif with_opt: warn("Saved filed doesn't contain an optimizer state.")
# Cell
@delegates(Learner.__init__)
class TextLearner(Learner):
"Basic class for a `Learner` in NLP."
def __init__(self, dls, model, alpha=2., beta=1., moms=(0.8,0.7,0.8), **kwargs):
super().__init__(dls, model, moms=moms, **kwargs)
self.add_cbs(rnn_cbs())
def save_encoder(self, file):
"Save the encoder to `file` in the model directory"
if rank_distrib(): return # don't save if child proc
encoder = get_model(self.model)[0]
if hasattr(encoder, 'module'): encoder = encoder.module
torch.save(encoder.state_dict(), join_path_file(file, self.path/self.model_dir, ext='.pth'))
def load_encoder(self, file, device=None):
"Load the encoder `file` from the model directory, optionally ensuring it's on `device`"
encoder = get_model(self.model)[0]
if device is None: device = self.dls.device
if hasattr(encoder, 'module'): encoder = encoder.module
distrib_barrier()
wgts = torch.load(join_path_file(file,self.path/self.model_dir, ext='.pth'), map_location=device)
encoder.load_state_dict(clean_raw_keys(wgts))
self.freeze()
return self
def load_pretrained(self, wgts_fname, vocab_fname, model=None):
"Load a pretrained model and adapt it to the data vocabulary."
old_vocab = load_pickle(vocab_fname)
new_vocab = _get_text_vocab(self.dls)
distrib_barrier()
wgts = torch.load(wgts_fname, map_location = lambda storage,loc: storage)
if 'model' in wgts: wgts = wgts['model'] #Just in case the pretrained model was saved with an optimizer
wgts = match_embeds(wgts, old_vocab, new_vocab)
load_ignore_keys(self.model if model is None else model, clean_raw_keys(wgts))
self.freeze()
return self
#For previous versions compatibility. Remove at release
@delegates(load_model_text)
def load(self, file, with_opt=None, device=None, **kwargs):
if device is None: device = self.dls.device
if self.opt is None: self.create_opt()
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
load_model_text(file, self.model, self.opt, device=device, **kwargs)
return self
# Cell
def decode_spec_tokens(tokens):
"Decode the special tokens in `tokens`"
new_toks,rule,arg = [],None,None
for t in tokens:
if t in [TK_MAJ, TK_UP, TK_REP, TK_WREP]: rule = t
elif rule is None: new_toks.append(t)
elif rule == TK_MAJ:
new_toks.append(t[:1].upper() + t[1:].lower())
rule = None
elif rule == TK_UP:
new_toks.append(t.upper())
rule = None
elif arg is None:
try: arg = int(t)
except: rule = None
else:
if rule == TK_REP: new_toks.append(t * arg)
else: new_toks += [t] * arg
return new_toks
# Cell
class LMLearner(TextLearner):
"Add functionality to `TextLearner` when dealing with a language model"
def predict(self, text, n_words=1, no_unk=True, temperature=1., min_p=None, no_bar=False,
decoder=decode_spec_tokens, only_last_word=False):
"Return `text` and the `n_words` that come after"
self.model.reset()
idxs = idxs_all = self.dls.test_dl([text]).items[0].to(self.dls.device)
if no_unk: unk_idx = self.dls.vocab.index(UNK)
for _ in (range(n_words) if no_bar else progress_bar(range(n_words), leave=False)):
with self.no_bar(): preds,_ = self.get_preds(dl=[(idxs[None],)])
res = preds[0][-1]
if no_unk: res[unk_idx] = 0.
if min_p is not None:
if (res >= min_p).float().sum() == 0:
warn(f"There is no item with probability >= {min_p}, try a lower value.")
else: res[res < min_p] = 0.
if temperature != 1.: res.pow_(1 / temperature)
idx = torch.multinomial(res, 1).item()
idxs = idxs_all = torch.cat([idxs_all, idxs.new([idx])])
if only_last_word: idxs = idxs[-1][None]
num = self.dls.train_ds.numericalize
tokens = [num.vocab[i] for i in idxs_all if num.vocab[i] not in [BOS, PAD]]
sep = self.dls.train_ds.tokenizer.sep
return sep.join(decoder(tokens))
@delegates(Learner.get_preds)
def get_preds(self, concat_dim=1, **kwargs): return super().get_preds(concat_dim=1, **kwargs)
# Cell
from .models.core import _model_meta
# Cell
def _get_text_vocab(dls):
vocab = dls.vocab
if isinstance(vocab, L): vocab = vocab[0]
return vocab
# Cell
@delegates(Learner.__init__)
def language_model_learner(dls, arch, config=None, drop_mult=1., backwards=False, pretrained=True, pretrained_fnames=None, **kwargs):
"Create a `Learner` with a language model from `dls` and `arch`."
vocab = _get_text_vocab(dls)
model = get_language_model(arch, len(vocab), config=config, drop_mult=drop_mult)
meta = _model_meta[arch]
learn = LMLearner(dls, model, loss_func=CrossEntropyLossFlat(), splitter=meta['split_lm'], **kwargs)
url = 'url_bwd' if backwards else 'url'
if pretrained or pretrained_fnames:
if pretrained_fnames is not None:
fnames = [learn.path/learn.model_dir/f'{fn}.{ext}' for fn,ext in zip(pretrained_fnames, ['pth', 'pkl'])]
else:
if url not in meta:
warn("There are no pretrained weights for that architecture yet!")
return learn
model_path = untar_data(meta[url] , c_key='model')
try: fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
except IndexError: print(f'The model in {model_path} is incomplete, download again'); raise
learn = learn.load_pretrained(*fnames)
return learn
# Cell
@delegates(Learner.__init__)
def text_classifier_learner(dls, arch, seq_len=72, config=None, backwards=False, pretrained=True, drop_mult=0.5, n_out=None,
lin_ftrs=None, ps=None, max_len=72*20, y_range=None, **kwargs):
"Create a `Learner` with a text classifier from `dls` and `arch`."
vocab = _get_text_vocab(dls)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
model = get_text_classifier(arch, len(vocab), n_out, seq_len=seq_len, config=config, y_range=y_range,
drop_mult=drop_mult, lin_ftrs=lin_ftrs, ps=ps, max_len=max_len)
meta = _model_meta[arch]
learn = TextLearner(dls, model, splitter=meta['split_clas'], **kwargs)
url = 'url_bwd' if backwards else 'url'
if pretrained:
if url not in meta:
warn("There are no pretrained weights for that architecture yet!")
return learn
model_path = untar_data(meta[url], c_key='model')
try: fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
except IndexError: print(f'The model in {model_path} is incomplete, download again'); raise
learn = learn.load_pretrained(*fnames, model=learn.model[0])
learn.freeze()
return learn
# Cell
@typedispatch
def show_results(x: LMTensorText, y, samples, outs, ctxs=None, max_n=10, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
for i,l in enumerate(['input', 'target']):
ctxs = [b.show(ctx=c, label=l, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [b.show(ctx=c, label='pred', **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs,range(max_n))]
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def show_results(x: TensorText, y, samples, outs, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def plot_top_losses(x: TensorText, y:TensorCategory, samples, outs, raws, losses, trunc_at=150, **kwargs):
rows = get_empty_df(len(samples))
samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
for i,l in enumerate(['input', 'target']):
rows = [b.show(ctx=c, label=l, **kwargs) for b,c in zip(samples.itemgot(i),rows)]
outs = L(o + (TitledFloat(r.max().item()), TitledFloat(l.item())) for o,r,l in zip(outs, raws, losses))
for i,l in enumerate(['predicted', 'probability', 'loss']):
rows = [b.show(ctx=c, label=l, **kwargs) for b,c in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows)) | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/text/learner.py | learner.py |
__all__ = ['UNK', 'PAD', 'BOS', 'EOS', 'FLD', 'TK_REP', 'TK_WREP', 'TK_UP', 'TK_MAJ', 'spec_add_spaces',
'rm_useless_spaces', 'replace_rep', 'replace_wrep', 'fix_html', 'replace_all_caps', 'replace_maj',
'lowercase', 'replace_space', 'BaseTokenizer', 'SpacyTokenizer', 'WordTokenizer', 'TokenizeWithRules',
'tokenize1', 'parallel_tokenize', 'fn_counter_pkl', 'fn_lengths_pkl', 'tokenize_folder', 'tokenize_files',
'tokenize_texts', 'tokenize_df', 'tokenize_csv', 'load_tokenized_csv', 'Tokenizer', 'eu_langs',
'SentencePieceTokenizer', 'SubwordTokenizer']
# Cell
from ..torch_basics import *
from ..data.all import *
# Cell
import spacy,html
from spacy.symbols import ORTH
# Cell
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
# Cell
#nbdev_comment _all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
# Cell
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
# Cell
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
# Cell
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
# Cell
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
# Cell
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
# Cell
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
# Cell
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
# Cell
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
# Cell
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
# Cell
def replace_maj(t):
"Replace tokens in Sentence Case by their lower version and add `TK_MAJ` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
# Cell
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
# Cell
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
# Cell
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
# Cell
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def __call__(self, items): return (t.split(self.split_char) for t in items)
# Cell
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, buf_sz=5000):
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in self.special_toks: nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.pipe,self.buf_sz = nlp.pipe,buf_sz
def __call__(self, items):
return (L(doc).attrgot('text') for doc in self.pipe(map(str,items), batch_size=self.buf_sz))
# Cell
WordTokenizer = SpacyTokenizer
# Cell
class TokenizeWithRules:
"A wrapper around `tok` which applies `rules`, then tokenizes, then applies `post_rules`"
def __init__(self, tok, rules=None, post_rules=None):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok
def __call__(self, batch):
return (L(o).map(self.post_f) for o in self.tok(maps(*self.rules, batch)))
# Cell
@delegates(TokenizeWithRules)
def tokenize1(text, tok, **kwargs):
"Call `TokenizeWithRules` with a single text"
return first(TokenizeWithRules(tok=tok, **kwargs)([text]))
# Cell
def parallel_tokenize(items, tok=None, rules=None, n_workers=defaults.cpus, **kwargs):
"Calls optional `setup` on `tok` before launching `TokenizeWithRules` using `parallel_gen"
if tok is None: tok = WordTokenizer()
if hasattr(tok, 'setup'): tok.setup(items, rules)
return parallel_gen(TokenizeWithRules, items, tok=tok, rules=rules, n_workers=n_workers, **kwargs)
# Cell
fn_counter_pkl = 'counter.pkl'
fn_lengths_pkl = 'lengths.pkl'
# Cell
def _tokenize_files(func, files, path, output_dir=None, output_names=None, n_workers=defaults.cpus, rules=None, tok=None,
encoding='utf8', skip_if_exists=False):
"Tokenize text `files` in parallel using `n_workers`"
if tok is None: tok = WordTokenizer()
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
if skip_if_exists and output_dir.exists(): return output_dir
output_dir.mkdir(exist_ok=True)
if output_names is None: output_names = L(output_dir/f.relative_to(path) for f in files)
rules = partial(Path.read_text, encoding=encoding) + L(ifnone(rules, defaults.text_proc_rules.copy()))
lengths,counter = {},Counter()
for i,tok in parallel_tokenize(files, tok, rules, n_workers=n_workers):
out = func(i,output_dir)
out.mk_write(' '.join(tok))
lengths[str(files[i].relative_to(path))] = len(tok)
counter.update(tok)
save_pickle(output_dir/fn_lengths_pkl, lengths)
save_pickle(output_dir/fn_counter_pkl, counter)
return output_dir
# Cell
@delegates(_tokenize_files)
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, skip_if_exists=True, **kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
files = get_files(path, extensions=extensions, recurse=True, folders=folders)
def _f(i,output_dir): return output_dir/files[i].relative_to(path)
return _tokenize_files(_f, files, path, skip_if_exists=skip_if_exists, **kwargs)
# Cell
@delegates(_tokenize_files)
def tokenize_files(files, path, output_dir, output_names=None, **kwargs):
"Tokenize text `files` in parallel using `n_workers`"
if output_names is None: output_names = L(output_dir/f.relative_to(path) for f in files)
def _f(i,output_dir): return output_dir/output_names[i]
return _tokenize_files(_f, files, path, output_dir=output_dir, **kwargs)
# Cell
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
# Cell
def tokenize_texts(texts, n_workers=defaults.cpus, rules=None, tok=None):
"Tokenize `texts` in parallel using `n_workers`"
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
outputs = L(parallel_tokenize(texts, tok=tok, rules=rules, n_workers=n_workers)
).sorted().itemgot(1)
return outputs
# Cell
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok=None, tok_text_col="text"):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers` and stores them in `df[tok_text_col]`"
text_cols = [df.columns[c] if isinstance(c, int) else c for c in L(text_cols)]
#mark_fields defaults to False if there is one column of texts, True if there are multiple
if mark_fields is None: mark_fields = len(text_cols)>1
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok, rules, n_workers=n_workers)
).sorted().itemgot(1)
other_cols = df.columns[~df.columns.isin(text_cols)]
res = df[other_cols].copy()
res[tok_text_col] = outputs
res[f'{tok_text_col}_length'] = [len(o) for o in outputs]
return res,Counter(outputs.concat())
# Cell
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok=None, header='infer', chunksize=50000):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tokenize_df(dfp, text_cols, n_workers=n_workers, rules=rules,
mark_fields=mark_fields, tok=tok)
out.text = out.text.str.join(' ')
out.to_csv(outname, header=(None,header)[i==0], index=False, mode=('a','w')[i==0])
cnt.update(c)
save_pickle(outname.with_suffix('.pkl'), cnt)
# Cell
def load_tokenized_csv(fname):
"Utility function to quickly load a tokenized csv ans the corresponding counter"
fname = Path(fname)
out = pd.read_csv(fname)
for txt_col in out.columns[1:-1]:
out[txt_col] = tuple(out[txt_col].str.split(' '))
return out,load_pickle(fname.with_suffix('.pkl'))
# Cell
class Tokenizer(Transform):
"Provides a consistent `Transform` interface to tokenizers operating on `DataFrame`s and folders"
input_types = (str, list, L, tuple, Path)
def __init__(self, tok, rules=None, counter=None, lengths=None, mode=None, sep=' '):
if isinstance(tok,type): tok=tok()
store_attr('tok,counter,lengths,mode,sep')
self.rules = defaults.text_proc_rules if rules is None else rules
@classmethod
@delegates(tokenize_df, keep=True)
def from_df(cls, text_cols, tok=None, rules=None, sep=' ', **kwargs):
if tok is None: tok = WordTokenizer()
res = cls(tok, rules=rules, mode='df')
res.kwargs,res.train_setup = merge({'tok': tok}, kwargs),False
res.text_cols,res.sep = text_cols,sep
return res
@classmethod
@delegates(tokenize_folder, keep=True)
def from_folder(cls, path, tok=None, rules=None, **kwargs):
path = Path(path)
if tok is None: tok = WordTokenizer()
output_dir = tokenize_folder(path, tok=tok, rules=rules, **kwargs)
res = cls(tok, counter=load_pickle(output_dir/fn_counter_pkl),
lengths=load_pickle(output_dir/fn_lengths_pkl), rules=rules, mode='folder')
res.path,res.output_dir = path,output_dir
return res
def setups(self, dsets):
if not self.mode == 'df' or not isinstance(dsets.items, pd.DataFrame): return
dsets.items,count = tokenize_df(dsets.items, self.text_cols, rules=self.rules, **self.kwargs)
if self.counter is None: self.counter = count
return dsets
def encodes(self, o:Path):
if self.mode=='folder' and str(o).startswith(str(self.path)):
tok = self.output_dir/o.relative_to(self.path)
return L(tok.read_text().split(' '))
else: return self._tokenize1(o.read_text())
def encodes(self, o:str): return self._tokenize1(o)
def _tokenize1(self, o): return first(self.tok([compose(*self.rules)(o)]))
def get_lengths(self, items):
if self.lengths is None: return None
if self.mode == 'df':
if isinstance(items, pd.DataFrame) and 'text_lengths' in items.columns: return items['text_length'].values
if self.mode == 'folder':
try:
res = [self.lengths[str(Path(i).relative_to(self.path))] for i in items]
if len(res) == len(items): return res
except: return None
def decodes(self, o): return TitledStr(self.sep.join(o))
# Cell
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
# Cell
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"SentencePiece tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece!=0.1.90,!=0.1.91`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return max(res,29)
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1 --minloglevel=2",
f"--user_defined_symbols={','.join(spec_tokens)} --hard_vocab_limit=false"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules=None):
from sentencepiece import SentencePieceProcessor
if rules is None: rules = []
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(maps(*rules, items), total=len(items), leave=False):
f.write(f'{t}\n')
sp_model = self.train(raw_text_path)
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
return {'sp_model': sp_model}
def __call__(self, items):
if self.tok is None: self.setup(items)
for t in items: yield self.tok.EncodeAsPieces(t)
# Cell
SubwordTokenizer = SentencePieceTokenizer | zwyfastai | /zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/text/core.py | core.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.