file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
python/tvm/contrib/popen_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Multiprocessing via Popen.
This module provides a multi-processing pool backed by Popen.
with additional timeout support.
"""
import os
import sys
import struct
import threading
import subprocess
import concurrent.futures
from enum import IntEnum
from collections import namedtuple
import pickle
def kill_child_processes(pid):
"""Kill all child processes recursively for a given pid.
Parameters
----------
pid : int
The given parameter id.
"""
# pylint: disable=import-outside-toplevel
import psutil
try:
parent = psutil.Process(pid)
children = parent.children(recursive=True)
except psutil.NoSuchProcess:
return
for process in children:
try:
process.kill()
except psutil.NoSuchProcess:
pass
class StatusKind(IntEnum):
"""Running and return value status."""
RUNNING = 0
COMPLETE = 1
EXCEPTION = 2
TIMEOUT = 3
class MapResult(namedtuple("MapResult", ["status", "value"])):
"""Result of map_with_error_catching.
Parameters
----------
status : StatusKind
The status of the result.
value : Any
The result value.
"""
__slots__ = []
class PopenWorker:
"""A subprocess worker via Popen.
PopenWorker provides a low-level
API to interact with a separate process via Popen.
Parameters
----------
initializer: callable or None
A callable initializer, or None
initargs: Tuple[object]
A tuple of args for the initializer
maximum_uses: Optional[int]
The maximum number of times a process can be used before being recycled,
i.e. killed and restarted. If `None`, the process will be reused until
an operation times out.
stdout: Union[None, int, IO[Any]]
The standard output streams handler specified for the popen process.
stderr: Union[None, int, IO[Any]]
The standard error streams handler specified for the popen process.
"""
def __init__(self, initializer=None, initargs=(), maximum_uses=None, stdout=None, stderr=None):
self._proc = None
self._initializer = initializer
self._initargs = initargs
self._maximum_uses = maximum_uses
self._remaining_uses = None
self._stdout = stdout
self._stderr = stderr
if self._initializer is not None and not callable(self._initializer):
raise TypeError("initializer must be callable for PopenWorker")
def __del__(self):
try:
self.kill()
except ImportError:
pass
def kill(self):
"""Kill the current running process and cleanup.
Note
----
The worker can start a new process when send is called again.
"""
if self._proc is not None:
# allow gracefully shutdown
try:
self._writer.close()
except IOError:
pass
try:
self._reader.close()
except IOError:
pass
# kill all child processes recursively
try:
kill_child_processes(self._proc.pid)
except TypeError:
pass
try:
self._proc.kill()
except OSError:
pass
# Join the child process to avoid zombie processes
self.join(timeout=1.0)
self._proc = None
self._remaining_uses = None
def _start(self):
"""Start a new subprocess if nothing is available"""
if self._proc is not None:
return
# connect subprocess with a pair of pipes
main_read, worker_write = os.pipe()
worker_read, main_write = os.pipe()
cmd = [sys.executable, "-m", "tvm.exec.popen_worker"]
if sys.platform == "win32":
# pylint: disable=import-outside-toplevel
import msvcrt
worker_read_handle = msvcrt.get_osfhandle(worker_read)
worker_write_handle = msvcrt.get_osfhandle(worker_write)
os.set_handle_inheritable(worker_read_handle, True)
os.set_handle_inheritable(worker_write_handle, True)
cmd += [str(worker_read_handle), str(worker_write_handle)]
self._proc = subprocess.Popen(
cmd, close_fds=False, stdout=self._stdout, stderr=self._stderr
)
else:
cmd += [str(worker_read), str(worker_write)]
self._proc = subprocess.Popen(
cmd, pass_fds=(worker_read, worker_write), stdout=self._stdout, stderr=self._stderr
)
# close worker side of the pipe
os.close(worker_read)
os.close(worker_write)
self._reader = os.fdopen(main_read, "rb")
self._writer = os.fdopen(main_write, "wb")
def join(self, timeout=None):
"""Join the current process worker before it terminates.
Parameters
----------
timeout: Optional[number]
Timeout value, block at most timeout seconds if it
is a positive number.
"""
if self._proc:
try:
self._proc.wait(timeout)
except subprocess.TimeoutExpired:
pass
def is_alive(self):
"""Check if the process is alive"""
if self._proc:
return self._proc.poll() is None
return False
def send(self, fn, args=(), kwargs=None, timeout=None):
"""Send a new function task fn(*args, **kwargs) to the subprocess.
Parameters
----------
fn : function
The function to be invoked.
args : list
Positional argument.
kwargs : dict
Keyword arguments
timeout : float
Timeout value when executing the function
Note
----
The caller must call recv before calling the next send in
order to make sure the timeout and child process exit
won't affect the later requests.
"""
# use cloud pickle
# pylint: disable=import-outside-toplevel
import cloudpickle
if self._proc is not None and self._maximum_uses and self._remaining_uses == 0:
# Time to recycle the process.
self.kill()
if self._proc is None:
self._start()
# init
if self._initializer is not None:
self.send(self._initializer, self._initargs)
self.recv()
# N.B. The initializer doesn't count as a "use"
self._remaining_uses = self._maximum_uses
kwargs = {} if not kwargs else kwargs
data = cloudpickle.dumps((fn, args, kwargs, timeout), protocol=pickle.HIGHEST_PROTOCOL)
try:
self._writer.write(struct.pack("<i", len(data)))
self._writer.write(data)
self._writer.flush()
except IOError:
pass
if self._remaining_uses:
self._remaining_uses -= 1
def _child_process_error(self):
"""Raise a child process error."""
# kill and lazily restart the process in the next send.
self.kill()
return ChildProcessError("Subprocess terminated")
def recv(self):
"""Receive the result of the last send.
Returns
-------
result: object
The result of the last send.
Raises
------
ChildProcessError: if the child process exited abnormally.
TimeoutError: if timeout happens
Exception: if other exception happens during the execution.
"""
# pylint: disable=import-outside-toplevel
import cloudpickle
try:
len_data = self._reader.read(4)
except IOError:
raise self._child_process_error()
if len(len_data) == 0:
raise self._child_process_error()
try:
recv_bytes = struct.unpack("<i", len_data)[0]
status, value = cloudpickle.loads(self._reader.read(recv_bytes))
except IOError:
raise self._child_process_error()
if status == StatusKind.COMPLETE:
return value
if status == StatusKind.EXCEPTION:
raise value
assert status == StatusKind.TIMEOUT
# kill and lazily restart the process in the next send.
self.kill()
raise TimeoutError()
class PopenPoolExecutor:
"""An parallel executor backed by Popen processes.
Parameters
----------
max_worker : int
Maximum number of workers
timeout : float
Timeout value for each function submit.
initializer: callable or None
A callable initializer, or None
initargs: Tuple[object]
A tuple of args for the initializer
maximum_process_uses: Optional[int]
The maximum number of times each process can be used before being recycled,
i.e. killed and restarted. If `None`, processes will be reused until an
operation times out.
stdout: Union[None, int, IO[Any]]
The standard output streams handler specified for the workers in the pool.
stderr: Union[None, int, IO[Any]]
The standard error streams handler specified for the workers in the pool.
Note
----
If max_workers is NONE then the number returned by
os.cpu_count() is used. This method aligns with the
behavior of multiprocessing.pool().
"""
def __init__(
self,
max_workers=None,
timeout=None,
initializer=None,
initargs=(),
maximum_process_uses=None,
stdout=None,
stderr=None,
):
if max_workers is None:
max_workers = os.cpu_count()
# Use an internal thread pool to send to popen workers
self._threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
self._timeout = timeout
self._worker_map = {}
self._lock = threading.Lock()
self._initializer = initializer
self._initargs = initargs
self._maximum_process_uses = maximum_process_uses
self._stdout = stdout
self._stderr = stderr
if self._initializer is not None and not callable(self._initializer):
raise TypeError("initializer must be callable for PopenPoolExecutor")
def __del__(self):
self._lock.acquire()
for worker in self._worker_map.values():
try:
worker.kill()
except ImportError:
pass
self._lock.release()
self._threadpool.shutdown()
def _worker_run(self, fn, args, kwargs):
"""Internal thread runner."""
self._lock.acquire()
tid = threading.get_ident()
if tid not in self._worker_map:
proc = PopenWorker(
self._initializer,
self._initargs,
self._maximum_process_uses,
self._stdout,
self._stderr,
)
self._worker_map[tid] = proc
else:
proc = self._worker_map[tid]
self._lock.release()
proc.send(fn, args, kwargs, self._timeout)
return proc.recv()
def _worker_run_with_error_catching(self, fn, args, kwargs) -> MapResult:
# pylint: disable=broad-except
try:
return MapResult(status=StatusKind.COMPLETE, value=self._worker_run(fn, args, kwargs))
except TimeoutError as exception:
return MapResult(status=StatusKind.TIMEOUT, value=exception)
except Exception as exception:
return MapResult(status=StatusKind.EXCEPTION, value=exception)
def submit(self, fn, *args, **kwargs) -> concurrent.futures.Future:
"""Submit a new function job to the pool
Parameters
----------
fn : function
The function to be invoked.
args : list
Positional argument.
kwargs : dict
Keyword arguments
Returns
-------
future : concurrent.futures.Future
A future that can be used to access the result.
"""
# pylint: disable=unnecessary-lambda
worker = lambda *args: self._worker_run(*args)
return self._threadpool.submit(worker, fn, args, kwargs)
def map_with_error_catching(self, fn, iterator):
"""Same as map, but catches exceptions and return them instead.
Parameters
----------
fn : function
The function to be invoked.
iterator : Iterator
Input iterator.
Returns
-------
out_iter : Iterator[MapResult]
The result iterator.
"""
worker = lambda x: self._worker_run_with_error_catching(fn, (x,), None)
return self._threadpool.map(worker, iterator)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/random.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to random library."""
import tvm
from tvm import te
import tvm._ffi
def randint(low, high, size, dtype="int32"):
"""Return random integers from low (inclusive) to high (exclusive).
Return random integers from the "discrete uniform" distribution of the
specified dtype in the "half-open" interval [low, high).
Parameters
----------
low : int
Lowest (signed) integer to be drawn from the distribution
high : int
One above the largest (signed) integer to be drawn from the distribution
Returns
-------
out : Tensor
A tensor with specified size and dtype
"""
assert "int" in dtype, "the type of randint output must be int or uint"
return te.extern(
size,
[],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.random.randint", int(low), int(high), outs[0]
),
dtype=dtype,
)
def uniform(low, high, size):
"""Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval [low, high)
(includes low, but excludes high). In other words, any value within the
given interval is equally likely to be drawn by uniform.
Parameters
----------
low : float
Lower boundary of the output interval. All values generated will be
greater than or equal to low.
high : float
Upper boundary of the output interval. All values generated will be
less than high.
size : tuple of ints
Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
samples are drawn.
Returns
-------
out : Tensor
A tensor with specified size and dtype.
"""
return te.extern(
size,
[],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.random.uniform", float(low), float(high), outs[0]
),
dtype="float32",
)
def normal(loc, scale, size):
"""Draw samples from a normal distribution.
Return random samples from a normal distribution.
Parameters
----------
loc : float
loc of the distribution.
scale : float
Standard deviation of the distribution.
size : tuple of ints
Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
samples are drawn.
Returns
------
out : Tensor
A tensor with specified size and dtype
"""
return te.extern(
size,
[],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.random.normal", float(loc), float(scale), outs[0]
),
dtype="float32",
)
tvm._ffi._init_api("tvm.contrib.random")
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/relay_viz/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Relay IR Visualizer"""
from typing import Dict
import tvm
from tvm import relay
from .interface import (
Plotter,
VizGraph,
VizParser,
)
from .terminal import (
TermPlotter,
TermVizParser,
)
from .dot import (
DotPlotter,
DotVizParser,
)
class RelayVisualizer:
"""Relay IR Visualizer
Parameters
----------
relay_mod: tvm.IRModule
Relay IR module.
relay_param: None | Dict[str, tvm.runtime.NDArray]
Relay parameter dictionary. Default `None`.
plotter: Plotter
An instance of class inheriting from Plotter interface.
Default is an instance of `terminal.TermPlotter`.
parser: VizParser
An instance of class inheriting from VizParser interface.
Default is an instance of `terminal.TermVizParser`.
"""
def __init__(
self,
relay_mod: tvm.IRModule,
relay_param: Dict[str, tvm.runtime.NDArray] = None,
plotter: Plotter = None,
parser: VizParser = None,
):
self._plotter = plotter if plotter is not None else TermPlotter()
self._relay_param = relay_param if relay_param is not None else {}
self._parser = parser if parser is not None else TermVizParser()
global_vars = relay_mod.get_global_vars()
graph_names = []
# If we have main function, put it to the first.
# Then main function can be shown on the top.
for gv_node in global_vars:
if gv_node.name_hint == "main":
graph_names.insert(0, gv_node.name_hint)
else:
graph_names.append(gv_node.name_hint)
node_to_id = {}
# callback to generate an unique string-ID for nodes.
# node_count_offset ensure each node ID is still unique across subgraph.
node_count_offset = 0
def traverse_expr(node):
if node in node_to_id:
return
node_to_id[node] = str(len(node_to_id) + node_count_offset)
for name in graph_names:
node_count_offset += len(node_to_id)
node_to_id.clear()
relay.analysis.post_order_visit(relay_mod[name], traverse_expr)
graph = self._plotter.create_graph(name)
self._add_nodes(graph, node_to_id)
def _add_nodes(self, graph: VizGraph, node_to_id: Dict[relay.Expr, str]):
"""add nodes and to the graph.
Parameters
----------
graph : VizGraph
a VizGraph for nodes to be added to.
node_to_id : Dict[relay.expr, str]
a mapping from nodes to an unique ID.
relay_param : Dict[str, tvm.runtime.NDarray]
relay parameter dictionary.
"""
for node in node_to_id:
viz_node, viz_edges = self._parser.get_node_edges(node, self._relay_param, node_to_id)
if viz_node is not None:
graph.node(viz_node)
for edge in viz_edges:
graph.edge(edge)
def render(self, filename: str = None) -> None:
self._plotter.render(filename=filename)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/relay_viz/dot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Visualize Relay IR by Graphviz DOT language."""
from typing import (
Any,
Callable,
Dict,
)
from .interface import (
DefaultVizParser,
Plotter,
VizEdge,
VizGraph,
VizNode,
)
try:
import graphviz
except ImportError:
# add "from None" to silence
# "During handling of the above exception, another exception occurred"
raise ImportError(
"The graphviz package is required for DOT renderer. "
"Please install it first. For example, pip3 install graphviz"
) from None
DotVizParser = DefaultVizParser
class DotGraph(VizGraph):
"""DOT graph for relay IR.
See also :py:class:`tvm.contrib.relay_viz.dot.DotPlotter`
Parameters
----------
name: str
name of this graph.
graph_attr: Optional[Dict[str, str]]
key-value pairs for the graph.
node_attr: Optional[Dict[str, str]]
key-value pairs for all nodes.
edge_attr: Optional[Dict[str, str]]
key-value pairs for all edges.
get_node_attr: Optional[Callable[[VizNode], Dict[str, str]]]
A callable returning attributes for the node.
"""
def __init__(
self,
name: str,
graph_attr: Dict[str, str] = None,
node_attr: Dict[str, str] = None,
edge_attr: Dict[str, str] = None,
get_node_attr: Callable[[VizNode], Dict[str, str]] = None,
):
self._name = name
self._get_node_attr = self._default_get_node_attr
if get_node_attr is not None:
self._get_node_attr = get_node_attr
# graphviz recognizes the subgraph as a cluster subgraph
# by the name starting with "cluster" (all lowercase)
self._digraph = graphviz.Digraph(
name=f"cluster_{self._name}",
graph_attr=graph_attr,
node_attr=node_attr,
edge_attr=edge_attr,
)
self._digraph.attr(label=self._name)
def node(self, viz_node: VizNode) -> None:
"""Add a node to the underlying graph.
Nodes in a Relay IR Module are expected to be added in the post-order.
Parameters
----------
viz_node : VizNode
A `VizNode` instance.
"""
self._digraph.node(
viz_node.identity,
f"{viz_node.type_name}\n{viz_node.detail}",
**self._get_node_attr(viz_node),
)
def edge(self, viz_edge: VizEdge) -> None:
"""Add an edge to the underlying graph.
Parameters
----------
viz_edge : VizEdge
A `VizEdge` instance.
"""
self._digraph.edge(viz_edge.start, viz_edge.end)
@property
def digraph(self):
return self._digraph
@staticmethod
def _default_get_node_attr(node: VizNode):
if "Var" in node.type_name:
return {"shape": "ellipse"}
return {"shape": "box"}
class DotPlotter(Plotter):
"""DOT language graph plotter
The plotter accepts various graphviz attributes for graphs, nodes, and edges.
Please refer to https://graphviz.org/doc/info/attrs.html for available attributes.
Parameters
----------
graph_attr: Optional[Dict[str, str]]
key-value pairs for all graphs.
node_attr: Optional[Dict[str, str]]
key-value pairs for all nodes.
edge_attr: Optional[Dict[str, str]]
key-value pairs for all edges.
get_node_attr: Optional[Callable[[VizNode], Dict[str, str]]]
A callable returning attributes for a specific node.
render_kwargs: Optional[Dict[str, Any]]
keyword arguments directly passed to `graphviz.Digraph.render()`.
Examples
--------
.. code-block:: python
from tvm.contrib import relay_viz
from tvm.relay.testing import resnet
mod, param = resnet.get_workload(num_layers=18)
# graphviz attributes
graph_attr = {"color": "red"}
node_attr = {"color": "blue"}
edge_attr = {"color": "black"}
# VizNode is passed to the callback.
# We want to color NCHW conv2d nodes. Also give Var a different shape.
def get_node_attr(node):
if "nn.conv2d" in node.type_name and "NCHW" in node.detail:
return {
"fillcolor": "green",
"style": "filled",
"shape": "box",
}
if "Var" in node.type_name:
return {"shape": "ellipse"}
return {"shape": "box"}
# Create plotter and pass it to viz. Then render the graph.
dot_plotter = relay_viz.DotPlotter(
graph_attr=graph_attr,
node_attr=node_attr,
edge_attr=edge_attr,
get_node_attr=get_node_attr)
viz = relay_viz.RelayVisualizer(
mod,
relay_param=param,
plotter=dot_plotter,
parser=relay_viz.DotVizParser())
viz.render("hello")
"""
def __init__(
self,
graph_attr: Dict[str, str] = None,
node_attr: Dict[str, str] = None,
edge_attr: Dict[str, str] = None,
get_node_attr: Callable[[VizNode], Dict[str, str]] = None,
render_kwargs: Dict[str, Any] = None,
):
self._name_to_graph = {}
self._graph_attr = graph_attr
self._node_attr = node_attr
self._edge_attr = edge_attr
self._get_node_attr = get_node_attr
self._render_kwargs = {} if render_kwargs is None else render_kwargs
def create_graph(self, name):
self._name_to_graph[name] = DotGraph(
name, self._graph_attr, self._node_attr, self._edge_attr, self._get_node_attr
)
return self._name_to_graph[name]
def render(self, filename: str = None):
"""render the graph generated from the Relay IR module.
This function is a thin wrapper of `graphviz.Digraph.render()`.
"""
# Create or update the filename
if filename is not None:
self._render_kwargs["filename"] = filename
# default cleanup
if "cleanup" not in self._render_kwargs:
self._render_kwargs["cleanup"] = True
root_graph = graphviz.Digraph()
for graph in self._name_to_graph.values():
root_graph.subgraph(graph.digraph)
root_graph.render(**self._render_kwargs)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/relay_viz/interface.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Abstract class used by :py:class:`tvm.contrib.relay_viz.RelayVisualizer`."""
import abc
from typing import (
Dict,
Union,
Tuple,
List,
)
import tvm
from tvm import relay
UNKNOWN_TYPE = "unknown"
class VizNode:
"""VizNode carry node information for `VizGraph` interface.
Parameters
----------
node_id: str
Unique identifier for this node.
node_type: str
Type of this node.
node_detail: str
Any supplement for this node such as attributes.
"""
def __init__(self, node_id: str, node_type: str, node_detail: str):
self._id = node_id
self._type = node_type
self._detail = node_detail
@property
def identity(self) -> str:
return self._id
@property
def type_name(self) -> str:
return self._type
@property
def detail(self) -> str:
return self._detail
def __repr__(self) -> str:
detail = self._detail.replace("\n", ", ")
return f"VizNode(identity: {self._id}, type_name: {self._type}, detail: {detail}"
class VizEdge:
"""VizEdge connect two `VizNode`.
Parameters
----------
start_node: str
The identifier of the node starting the edge.
end_node: str
The identifier of the node ending the edge.
"""
def __init__(self, start_node: str, end_node: str):
self._start_node = start_node
self._end_node = end_node
@property
def start(self) -> str:
return self._start_node
@property
def end(self) -> str:
return self._end_node
class VizParser(abc.ABC):
"""VizParser parses out a VizNode and VizEdges from a `relay.Expr`."""
@abc.abstractmethod
def get_node_edges(
self,
node: relay.Expr,
relay_param: Dict[str, tvm.runtime.NDArray],
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
"""Get VizNode and VizEdges for a `relay.Expr`.
Parameters
----------
node : relay.Expr
relay.Expr which will be parsed and generate a node and edges.
relay_param: Dict[str, tvm.runtime.NDArray]
relay parameters dictionary.
node_to_id : Dict[relay.Expr, str]
This is a mapping from relay.Expr to a unique id, generated by `RelayVisualizer`.
Returns
-------
rv1 : Union[VizNode, None]
VizNode represent the relay.Expr. If the relay.Expr is not intended to introduce a node
to the graph, return None.
rv2 : List[VizEdge]
a list of VizEdges to describe the connectivity of the relay.Expr.
Can be empty list to indicate no connectivity.
"""
class VizGraph(abc.ABC):
"""Abstract class for graph, which is composed of nodes and edges."""
@abc.abstractmethod
def node(self, viz_node: VizNode) -> None:
"""Add a node to the underlying graph.
Nodes in a Relay IR Module are expected to be added in the post-order.
Parameters
----------
viz_node : VizNode
A `VizNode` instance.
"""
@abc.abstractmethod
def edge(self, viz_edge: VizEdge) -> None:
"""Add an edge to the underlying graph.
Parameters
----------
viz_edge : VizEdge
A `VizEdge` instance.
"""
class DefaultVizParser(VizParser):
"""DefaultVizParser provde a set of logics to parse a various relay types.
These logics are inspired and heavily based on
`visualize` function in https://tvm.apache.org/2020/07/14/bert-pytorch-tvm
"""
def get_node_edges(
self,
node: relay.Expr,
relay_param: Dict[str, tvm.runtime.NDArray],
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
if isinstance(node, relay.Function):
return self._function(node, node_to_id)
if isinstance(node, relay.expr.Call):
return self._call(node, node_to_id)
if isinstance(node, relay.expr.Var):
return self._var(node, relay_param, node_to_id)
if isinstance(node, relay.expr.Tuple):
return self._tuple(node, node_to_id)
if isinstance(node, relay.expr.TupleGetItem):
return self._tuple_get_item(node, node_to_id)
if isinstance(node, relay.expr.Constant):
return self._constant(node, node_to_id)
# GlobalVar possibly mean another global relay function,
# which is expected to in "Graph" level, not in "Node" level.
if isinstance(node, (relay.expr.GlobalVar, tvm.ir.Op)):
return None, []
viz_node = VizNode(node_to_id[node], UNKNOWN_TYPE, f"don't know how to parse {type(node)}")
viz_edges = []
return viz_node, viz_edges
def _var(
self,
node: relay.Expr,
relay_param: Dict[str, tvm.runtime.NDArray],
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
"""Render rule for a relay var node"""
node_id = node_to_id[node]
name_hint = node.name_hint
node_detail = f"name_hint: {name_hint}"
node_type = "Var(Param)" if name_hint in relay_param else "Var(Input)"
if node.type_annotation is not None:
if hasattr(node.type_annotation, "shape"):
shape = tuple(map(int, node.type_annotation.shape))
dtype = node.type_annotation.dtype
node_detail = f"{node_detail}\nshape: {shape}\ndtype: {dtype}"
else:
node_detail = f"{node_detail}\ntype_annotation: {node.type_annotation}"
# only node
viz_node = VizNode(node_id, node_type, node_detail)
viz_edges = []
return viz_node, viz_edges
def _function(
self,
node: relay.Expr,
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
"""Render rule for a relay function node"""
node_details = []
name = ""
func_attrs = node.attrs
if func_attrs:
node_details = [f"{k}: {func_attrs.get_str(k)}" for k in func_attrs.keys()]
# "Composite" might from relay.transform.MergeComposite
if "Composite" in func_attrs.keys():
name = func_attrs["Composite"]
node_id = node_to_id[node]
# Body -> FunctionNode
viz_node = VizNode(node_id, f"Func {name}", "\n".join(node_details))
viz_edges = [VizEdge(node_to_id[node.body], node_id)]
return viz_node, viz_edges
def _call(
self,
node: relay.Expr,
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
"""Render rule for a relay call node"""
node_id = node_to_id[node]
op_name = UNKNOWN_TYPE
node_detail = []
if isinstance(node.op, tvm.ir.Op):
op_name = node.op.name
if node.attrs:
node_detail = [f"{k}: {node.attrs.get_str(k)}" for k in node.attrs.keys()]
elif isinstance(node.op, relay.Function):
func_attrs = node.op.attrs
op_name = "Anonymous Func"
if func_attrs:
node_detail = [f"{k}: {func_attrs.get_str(k)}" for k in func_attrs.keys()]
# "Composite" might from relay.transform.MergeComposite
if "Composite" in func_attrs.keys():
op_name = func_attrs["Composite"]
elif isinstance(node.op, relay.GlobalVar):
op_name = "GlobalVar"
node_detail = [f"GlobalVar.name_hint: {node.op.name_hint}"]
else:
op_name = str(type(node.op)).split(".")[-1].split("'")[0]
# Arguments -> CallNode
viz_node = VizNode(node_id, f"Call {op_name}", "\n".join(node_detail))
args = [node_to_id[arg] for arg in node.args]
viz_edges = [VizEdge(arg, node_id) for arg in args]
return viz_node, viz_edges
def _tuple(
self,
node: relay.Expr,
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
node_id = node_to_id[node]
# Fields -> TupleNode
viz_node = VizNode(node_id, "Tuple", "")
viz_edges = [VizEdge(node_to_id[field], node_id) for field in node.fields]
return viz_node, viz_edges
def _tuple_get_item(
self,
node: relay.Expr,
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
node_id = node_to_id[node]
# Tuple -> TupleGetItemNode
viz_node = VizNode(node_id, f"TupleGetItem", f"idx: {node.index}")
viz_edges = [VizEdge(node_to_id[node.tuple_value], node_id)]
return viz_node, viz_edges
def _constant(
self,
node: relay.Expr,
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
node_id = node_to_id[node]
node_detail = f"shape: {node.data.shape}, dtype: {node.data.dtype}"
# only node
viz_node = VizNode(node_id, "Const", node_detail)
viz_edges = []
return viz_node, viz_edges
class Plotter(abc.ABC):
"""Plotter can render a collection of Graph interfaces to a file."""
@abc.abstractmethod
def create_graph(self, name: str) -> VizGraph:
"""Create a VizGraph
Parameters
----------
name : str
the name of the graph
Return
------
rv1: an instance of class inheriting from VizGraph interface.
"""
@abc.abstractmethod
def render(self, filename: str) -> None:
"""Render the graph as a file.
Parameters
----------
filename : str
see the definition of implemented class.
"""
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/relay_viz/terminal.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Visualize Relay IR in AST text-form."""
from collections import deque
from typing import (
Dict,
Union,
Tuple,
List,
)
import tvm
from tvm import relay
from .interface import (
DefaultVizParser,
Plotter,
VizEdge,
VizGraph,
VizNode,
VizParser,
)
class TermVizParser(VizParser):
"""`TermVizParser` parse nodes and edges for `TermPlotter`."""
def __init__(self):
self._default_parser = DefaultVizParser()
def get_node_edges(
self,
node: relay.Expr,
relay_param: Dict[str, tvm.runtime.NDArray],
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
"""Parse a node and edges from a relay.Expr."""
if isinstance(node, relay.Call):
return self._call(node, node_to_id)
if isinstance(node, relay.Let):
return self._let(node, node_to_id)
if isinstance(node, relay.GlobalVar):
return self._global_var(node, node_to_id)
if isinstance(node, relay.If):
return self._if(node, node_to_id)
if isinstance(node, tvm.ir.Op):
return self._op(node, node_to_id)
if isinstance(node, relay.Function):
return self._function(node, node_to_id)
# Leverage logics from default parser.
return self._default_parser.get_node_edges(node, relay_param, node_to_id)
def _call(self, node, node_to_id):
node_id = node_to_id[node]
viz_node = VizNode(node_id, "Call", "")
viz_edges = [VizEdge(node_to_id[node.op], node_id)]
for arg in node.args:
arg_id = node_to_id[arg]
viz_edges.append(VizEdge(arg_id, node_id))
return viz_node, viz_edges
def _let(self, node, node_to_id):
node_id = node_to_id[node]
viz_node = VizNode(node_id, "Let", "(var, val, body)")
viz_edges = [
VizEdge(node_to_id[node.var], node_id),
VizEdge(node_to_id[node.value], node_id),
VizEdge(node_to_id[node.body], node_id),
]
return viz_node, viz_edges
def _global_var(self, node, node_to_id):
node_id = node_to_id[node]
viz_node = VizNode(node_id, "GlobalVar", node.name_hint)
viz_edges = []
return viz_node, viz_edges
def _if(self, node, node_to_id):
node_id = node_to_id[node]
viz_node = VizNode(node_id, "If", "(cond, true, false)")
viz_edges = [
VizEdge(node_to_id[node.cond], node_id),
VizEdge(node_to_id[node.true_branch], node_id),
VizEdge(node_to_id[node.false_branch], node_id),
]
return viz_node, viz_edges
def _op(self, node, node_to_id):
node_id = node_to_id[node]
op_name = node.name
viz_node = VizNode(node_id, op_name, "")
viz_edges = []
return viz_node, viz_edges
def _function(self, node, node_to_id):
node_id = node_to_id[node]
viz_node = VizNode(node_id, "Func", str(node.params))
viz_edges = [VizEdge(node_to_id[node.body], node_id)]
return viz_node, viz_edges
class TermNode:
"""TermNode is aimed to generate text more suitable for terminal visualization."""
def __init__(self, viz_node: VizNode):
self.type = viz_node.type_name
# We don't want too many lines in a terminal.
self.other_info = viz_node.detail.replace("\n", ", ")
class TermGraph(VizGraph):
"""Terminal graph for a relay IR Module
Parameters
----------
name: str
name of this graph.
"""
def __init__(self, name: str):
self._name = name
# A graph in adjacency list form.
# The key is source node, and the value is a list of destination nodes.
self._graph = {}
# a hash table for quick searching.
self._id_to_term_node = {}
# node_id in reversed post order
# That mean, root is the first node.
self._node_id_rpo = deque()
def node(self, viz_node: VizNode) -> None:
"""Add a node to the underlying graph.
Nodes in a Relay IR Module are expected to be added in the post-order.
Parameters
----------
viz_node : VizNode
A `VizNode` instance.
"""
self._node_id_rpo.appendleft(viz_node.identity)
if viz_node.identity not in self._graph:
# Add the node into the graph.
self._graph[viz_node.identity] = []
# Create TermNode from VizNode
node = TermNode(viz_node)
self._id_to_term_node[viz_node.identity] = node
def edge(self, viz_edge: VizEdge) -> None:
"""Add an edge to the terminal graph.
Parameters
----------
viz_edge : VizEdge
A `VizEdge` instance.
"""
# Take CallNode as an example, instead of "arguments point to CallNode",
# we want "CallNode points to arguments" in ast-dump form.
#
# The direction of edge is typically controlled by the implemented VizParser.
# Reverse start/end here simply because we leverage default parser implementation.
if viz_edge.end in self._graph:
self._graph[viz_edge.end].append(viz_edge.start)
else:
self._graph[viz_edge.end] = [viz_edge.start]
def render(self) -> str:
"""Draw a terminal graph
Returns
-------
rv1: str
text representing a graph.
"""
lines = []
seen_node = set()
def gen_line(indent, n_id):
if (indent, n_id) in seen_node:
return
seen_node.add((indent, n_id))
conn_symbol = ["|--", "`--"]
last = len(self._graph[n_id]) - 1
for i, next_n_id in enumerate(self._graph[n_id]):
node = self._id_to_term_node[next_n_id]
lines.append(
f"{indent}{conn_symbol[1 if i==last else 0]}{node.type} {node.other_info}"
)
next_indent = indent
# increase indent for the next level.
next_indent += " " if (i == last) else "| "
gen_line(next_indent, next_n_id)
first_node_id = self._node_id_rpo[0]
first_node = self._id_to_term_node[first_node_id]
lines.append(f"@{self._name}({first_node.other_info})")
gen_line("", first_node_id)
return "\n".join(lines)
class TermPlotter(Plotter):
"""Terminal plotter"""
def __init__(self):
self._name_to_graph = {}
def create_graph(self, name):
self._name_to_graph[name] = TermGraph(name)
return self._name_to_graph[name]
def render(self, filename):
"""If filename is None, print to stdio. Otherwise, write to the filename."""
lines = []
for name in self._name_to_graph:
text_graph = self._name_to_graph[name].render()
lines.append(text_graph)
if filename is None:
print("\n".join(lines))
else:
with open(filename, "w") as out_file:
out_file.write("\n".join(lines))
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/rocblas.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to rocBLAS libraries."""
import tvm
from tvm import te
def matmul(lhs, rhs, transa=False, transb=False):
"""Create an extern op that compute matrix mult of A and rhs with rocBLAS
Parameters
----------
lhs : Tensor
The left matrix operand
rhs : Tensor
The right matrix operand
transa : bool
Whether transpose lhs
transb : bool
Whether transpose rhs
Returns
-------
C : Tensor
The result tensor.
"""
n = lhs.shape[1] if transa else lhs.shape[0]
m = rhs.shape[0] if transb else rhs.shape[1]
return te.extern(
(n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.rocblas.matmul", ins[0], ins[1], outs[0], transa, transb
),
name="C",
)
def batch_matmul(lhs, rhs, transa=False, transb=False):
"""Create an extern op that compute matrix mult of A and rhs with rocBLAS
Parameters
----------
lhs : Tensor
The left batched matrix operand
rhs : Tensor
The right batched matrix operand
transa : bool
Whether transpose lhs
transb : bool
Whether transpose rhs
Returns
-------
C : Tensor
The result tensor.
"""
batch_size = lhs.shape[0]
assert batch_size == rhs.shape[0]
n = lhs.shape[2] if transa else lhs.shape[1]
m = rhs.shape[1] if transb else rhs.shape[2]
return te.extern(
(batch_size, n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.rocblas.batch_matmul", ins[0], ins[1], outs[0], transa, transb
),
name="C",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/rocm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility for ROCm backend"""
import subprocess
from os.path import join, exists
import tvm._ffi
from tvm._ffi.base import py_str
import tvm.runtime
import tvm.target
from . import utils
def find_lld(required=True):
"""Find ld.lld in system.
Parameters
----------
required : bool
Whether it is required,
runtime error will be raised if the compiler is required.
Returns
-------
valid_list : list of str
List of possible paths.
Note
----
This function will first search ld.lld that
matches the major llvm version that built with tvm
"""
lld_list = []
major = tvm.target.codegen.llvm_version_major(allow_none=True)
if major is not None:
lld_list += ["ld.lld-%d.0" % major]
lld_list += ["ld.lld-%d" % major]
lld_list += ["ld.lld"]
valid_list = [utils.which(x) for x in lld_list]
valid_list = [x for x in valid_list if x]
if not valid_list and required:
raise RuntimeError("cannot find ld.lld, candidates are: " + str(lld_list))
return valid_list
def rocm_link(in_file, out_file, lld=None):
"""Link relocatable ELF object to shared ELF object using lld
Parameters
----------
in_file : str
Input file name (relocatable ELF object file)
out_file : str
Output file name (shared ELF object file)
lld : str, optional
The lld linker, if not specified,
we will try to guess the matched clang version.
"""
# if our result has undefined symbols, it will fail to load
# (hipModuleLoad/hipModuleLoadData), but with a somewhat opaque message
# so we have ld.lld check this here.
# If you get a complaint about missing symbols you might want to check the
# list of bitcode files below.
args = [
lld if lld is not None else find_lld()[0],
"--no-undefined",
"-shared",
in_file,
"-o",
out_file,
]
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Linking error using ld.lld:\n"
msg += py_str(out)
raise RuntimeError(msg)
@tvm._ffi.register_func("tvm_callback_rocm_link")
def callback_rocm_link(obj_bin):
"""Links object file generated from LLVM to HSA Code Object
Parameters
----------
obj_bin : bytearray
The object file
Return
------
cobj_bin : bytearray
The HSA Code Object
"""
tmp_dir = utils.tempdir()
tmp_obj = tmp_dir.relpath("rocm_kernel.o")
tmp_cobj = tmp_dir.relpath("rocm_kernel.co")
with open(tmp_obj, "wb") as out_file:
out_file.write(bytes(obj_bin))
rocm_link(tmp_obj, tmp_cobj)
cobj_bin = bytearray(open(tmp_cobj, "rb").read())
return cobj_bin
@tvm._ffi.register_func("tvm_callback_rocm_bitcode_path")
def callback_rocm_bitcode_path(rocdl_dir=None):
"""Utility function to find ROCm device library bitcodes
Parameters
----------
rocdl_dir : str
The path to rocm library directory
The default value is the standard location
"""
# seems link order matters.
if rocdl_dir is None:
if exists("/opt/rocm/amdgcn/bitcode/"):
rocdl_dir = "/opt/rocm/amdgcn/bitcode/" # starting with rocm 3.9
else:
rocdl_dir = "/opt/rocm/lib/" # until rocm 3.8
bitcode_names = [
"oclc_daz_opt_on",
"ocml",
"hc",
"irif", # this does not exist in rocm 3.9, drop eventually
"ockl",
"oclc_correctly_rounded_sqrt_off",
"oclc_correctly_rounded_sqrt_on",
"oclc_daz_opt_off",
"oclc_finite_only_off",
"oclc_finite_only_on",
"oclc_isa_version_803", # todo (t-vi): an alternative might be to scan for the
"oclc_isa_version_900", # isa version files (if the linker throws out
"oclc_isa_version_906", # the unneeded ones or we filter for the arch we need)
"oclc_unsafe_math_off",
"oclc_unsafe_math_on",
"oclc_wavefrontsize64_on",
]
bitcode_files = []
for n in bitcode_names:
p = join(rocdl_dir, n + ".bc") # rocm >= 3.9
if not exists(p): # rocm <= 3.8
p = join(rocdl_dir, n + ".amdgcn.bc")
if exists(p):
bitcode_files.append(p)
elif "isa_version" not in n and n not in {"irif"}:
raise RuntimeError("could not find bitcode " + n)
return tvm.runtime.convert(bitcode_files)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/rpc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Deprecation RPC module"""
# pylint: disable=unused-import
from __future__ import absolute_import as _abs
import warnings
from ..rpc import Server, RPCSession, LocalSession, TrackerSession, connect, connect_tracker
warnings.warn(
"Please use tvm.rpc instead of tvm.conrtib.rpc. tvm.contrib.rpc is going to be removed in 0.5",
DeprecationWarning,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/sdaccel.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility for Interacting with SDAccel Tools"""
import subprocess
import os
import tvm._ffi
from . import utils
@tvm._ffi.register_func("tvm_callback_sdaccel_compile")
def compile_vhls(kernel_info, device_name):
"""Compile Vivado HLS code for SDAccel.
Parameters
----------
kernel_info : list of (str, str)
List of kernel information. The kernel information is a tuple of
function name and source code.
device_name : str
The name of the target device
Return
------
xclbin : bytearray
The bytearray of the xclbin
"""
tmp_dir = utils.tempdir()
sdk = os.environ.get("XILINX_SDX", None)
xocc = os.path.join(sdk, "bin/xocc") if sdk else "xocc"
target = os.environ.get(
"XCL_TARGET", "sw_emu" if os.environ.get("XCL_EMULATION_MODE") else "hw"
)
advanced_params = [
"--xp",
"param:compiler.preserveHlsOutput=1",
"--xp",
"param:compiler.generateExtraRunData=true",
]
platform = device_name
if not platform:
platform = os.environ.get("XCL_PLATFORM", os.environ.get("AWS_PLATFORM"))
if platform is None:
raise RuntimeError("No Xilinx device specified.")
tmp_xo_files = []
for funcname, code in kernel_info:
funcname = funcname.value
code = code.value
tmp_cpp = tmp_dir.relpath(funcname + ".cpp")
tmp_xo = tmp_dir.relpath(funcname + ".xo")
with open(tmp_cpp, "wb") as out_file:
out_file.write(bytes(code))
# build xo
args = (
[xocc, "-c", "-t", target, "--platform", platform, "-o", tmp_xo, "-k", funcname]
+ advanced_params
+ [tmp_cpp]
)
returncode = subprocess.call(args)
if returncode != 0:
raise RuntimeError("Compile error")
tmp_xo_files.append(tmp_xo)
# build xclbin
tmp_xclbin = tmp_dir.relpath("output.xclbin")
args = (
[xocc, "-l", "-t", target, "--platform", platform, "-o", tmp_xclbin]
+ tmp_xo_files
+ advanced_params
)
returncode = subprocess.call(args)
if returncode != 0:
raise RuntimeError("Link error")
return bytearray(open(tmp_xclbin, "rb").read())
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/sparse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tensor and Operation class for computation declaration."""
# pylint: disable=invalid-name
import warnings
import numpy as _np
from tvm.runtime import ndarray as _nd
from tvm import te
from tvm.tir import expr as _expr
from tvm.te import tensor as _tensor
float32 = "float32"
itype = "int32"
class CSRNDArray(object):
"""Sparse tensor object in CSR format."""
def __init__(self, arg1, device=None, shape=None):
"""Construct a sparse matrix in CSR format.
Parameters
----------
arg1 : numpy.ndarray or a tuple with (data, indices, indptr)
The corresponding a dense numpy array,
or a tuple for constructing a sparse matrix directly.
device: Device
The corresponding device.
shape : tuple of int
The shape of the array
"""
if isinstance(arg1, tuple):
assert len(arg1) == 3
self.data, self.indices, self.indptr = arg1
self.shape = shape
elif isinstance(arg1, _np.ndarray):
source_array = arg1
ridx, cidx = _np.nonzero(source_array)
data = source_array[ridx, cidx]
self.data = _nd.array(data, device)
indices = _np.nonzero(source_array)[1].astype(itype)
self.indices = _nd.array(indices, device)
indptr = [0] + _np.apply_along_axis(
_np.count_nonzero, axis=1, arr=source_array
).tolist()
indptr = _np.cumsum(_np.array(indptr, itype)).astype(itype)
self.indptr = _nd.array(indptr, device)
self.shape = source_array.shape
else:
raise RuntimeError(
"Construct CSRNDArray with either a tuple (data, indices, indptr) "
"or a numpy.array, can't handle type %s." % (type(arg1),)
)
self.stype = "csr"
self.dtype = self.data.dtype
assert self.shape is not None
assert isinstance(self.data, _nd.NDArray)
assert isinstance(self.indices, _nd.NDArray)
assert str(self.indices.dtype) == "int32" or str(self.indices.dtype) == "int64", str(
self.indices.dtype
)
assert isinstance(self.indptr, _nd.NDArray)
assert str(self.indptr.dtype) == "int32" or str(self.indptr.dtype) == "int64", str(
self.indptr.dtype
)
def asnumpy(self):
"""Construct a full matrix and convert it to numpy array. This API will be deprecated
in TVM v0.8 release. Please use `numpy` instead."""
warnings.warn(
"CSRNDArray.asnumpy() will be deprecated in TVM v0.8 release. "
"Please use CSRNDArray.numpy() instead.",
DeprecationWarning,
)
return self.numpy()
def numpy(self):
"""Construct a full matrix and convert it to numpy array."""
full = _np.zeros(self.shape, self.dtype)
ridx = _np.diff(self.indptr.numpy())
ridx = _np.hstack((_np.ones((v,), itype) * i for i, v in enumerate(ridx)))
full[ridx, self.indices.numpy().astype(itype)] = self.data.numpy()
return full
def array(source_array, device=None, shape=None, stype="csr"):
"""Construct a sparse NDArray from numpy.ndarray"""
ret = None
if stype == "csr":
ret = CSRNDArray(source_array, shape=shape, device=device)
else:
raise NotImplementedError("stype=%s is not supported yet." % (stype,))
return ret
class SparsePlaceholderOp(object):
"""Placeholder class for sparse tensor representations."""
def __init__(self, shape, nonzeros, dtype, name):
# pylint: disable=unused-argument
"""Contructing a bare bone structure for a sparse matrix
Parameters
----------
shape: Tuple of Expr
The shape of the tensor
nonzeros: int
The number of non-zero values
dtype: str, optional
The data type of the tensor
name: str, optional
The name hint of the tensor
"""
self.shape = shape
self.dtype = dtype
self.name = name
self.stype = "unknown"
class CSRPlaceholderOp(SparsePlaceholderOp):
"""Placeholder class for CSR based sparse tensor representation."""
def __init__(self, shape, nonzeros, dtype, name):
"""Contructing a bare bone structure for a csr_matrix
Parameters
----------
shape: Tuple of Expr
The shape of the tensor
nonzeros: int
The number of non-zero values
dtype: str, optional
The data type of the tensor
name: str, optional
The name hint of the tensor
"""
SparsePlaceholderOp.__init__(self, shape, nonzeros, dtype, name)
self.stype = "csr"
self.data = te.placeholder((nonzeros,), dtype=dtype, name=self.name + "_data")
self.indices = te.placeholder((nonzeros,), dtype=itype, name=self.name + "_indices")
self.indptr = te.placeholder((self.shape[0] + 1,), dtype=itype, name=self.name + "_indptr")
assert isinstance(self.data, _tensor.Tensor)
assert isinstance(self.indices, _tensor.Tensor)
assert isinstance(self.indptr, _tensor.Tensor)
def placeholder(shape, nonzeros=None, dtype=None, name="placeholder", stype=None):
"""Construct an empty sparse tensor object.
Parameters
----------
shape: Tuple of Expr
The shape of the tensor
nonzeros: int
The number of non-zero values
dtype: str, optional
The data type of the tensor
name: str, optional
The name hint of the tensor
stype: str, optional
The name storage type of the sparse tensor (e.g. csr, coo, ell)
Returns
-------
tensor: SparsePlaceholderOp
The created sparse tensor placeholder
"""
shape = (shape,) if isinstance(shape, _expr.PrimExpr) else shape
nonzeros = 0 if nonzeros is None else nonzeros
dtype = float32 if dtype is None else dtype
stype = "csr" if stype is None else stype
ret = None
if stype == "csr":
ret = CSRPlaceholderOp(shape=shape, nonzeros=nonzeros, dtype=dtype, name=name)
else:
raise NotImplementedError("stype=%s is not supported yet." % (stype,))
return ret
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/spirv.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility for Interacting with SPIRV Tools"""
import subprocess
import os
from . import utils
from .._ffi.base import py_str
def optimize(spv_bin):
"""Optimize SPIRV using spirv-opt via CLI
Note that the spirv-opt is still experimental.
Parameters
----------
spv_bin : bytearray
The spirv file
Return
------
cobj_bin : bytearray
The HSA Code Object
"""
tmp_dir = utils.tempdir()
tmp_in = tmp_dir.relpath("input.spv")
tmp_out = tmp_dir.relpath("output.spv")
with open(tmp_in, "wb") as out_file:
out_file.write(bytes(spv_bin))
sdk = os.environ.get("VULKAN_SDK", None)
cmd = os.path.join(sdk, "bin/spirv-opt") if sdk else "spirv-opt"
args = [cmd, "-O", tmp_in, "-o", tmp_out]
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Opitmizationerror using spirv-opt:\n"
msg += py_str(out)
raise RuntimeError(msg)
return bytearray(open(tmp_out, "rb").read())
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/stackvm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Dummy StackVM build function."""
# pylint: disable=invalid-name
from __future__ import absolute_import as _abs
import shutil
def build(output, files):
"""Simply copy StackVM output to the destination.
Parameters
----------
output : str
The target StackVM file.
files : list
A single self-contained StackVM module file.
"""
if len(files) == 0:
raise RuntimeError("StackVM artifact must be provided")
if len(files) > 1:
raise RuntimeError("Unexpected multiple StackVM artifacts")
shutil.copy(files[0], output)
# assign output format
build.output_format = "stackvm"
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/tar.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Util to invoke tarball in the system."""
# pylint: disable=invalid-name
from __future__ import absolute_import as _abs
import os
import shutil
import subprocess
from . import utils
from .._ffi.base import py_str
def tar(output, files):
"""Create tarball containing all files in root.
Parameters
----------
output : str
The target shared library.
files : list
List of files to be bundled.
"""
cmd = ["tar"]
cmd += ["-czf"]
temp = utils.tempdir()
fset = set()
for fname in files:
base = os.path.basename(fname)
if base in fset:
raise ValueError("duplicate file name %s" % base)
fset.add(base)
shutil.copy(fname, temp.relpath(base))
cmd += [output]
cmd += ["-C", temp.temp_dir]
cmd += temp.listdir()
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Tar error:\n"
msg += py_str(out)
raise RuntimeError(msg)
# assign output format
tar.output_format = "tar"
def untar(tar_file, directory):
"""Unpack all tar files into the directory
Parameters
----------
tar_file : str
The source tar file.
directory : str
The target directory
"""
cmd = ["tar"]
cmd += ["-xf"]
cmd += [tar_file]
cmd += ["-C", directory]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Tar error:\n"
msg += py_str(out)
raise RuntimeError(msg)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/target/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/target/coreml.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
"""Utility to compile CoreML models"""
import os
import shutil
import tvm._ffi
from ...relay.expr_functor import ExprVisitor
from .. import xcode, coreml_runtime
def _convert_add(builder, name, inputs, outputs, args, attrs):
builder.add_elementwise(name=name, input_names=inputs, output_name=outputs[0], mode="ADD")
def _convert_multiply(builder, name, inputs, outputs, args, attrs):
builder.add_elementwise(name=name, input_names=inputs, output_name=outputs[0], mode="MULTIPLY")
def _convert_clip(builder, name, inputs, outputs, args, attrs):
builder.add_clip(
name=name,
input_name=inputs[0],
output_name=outputs[0],
min_value=attrs.a_min,
max_value=attrs.a_max,
)
def _convert_batch_flatten(builder, name, inputs, outputs, args, attrs):
builder.add_flatten_to_2d(name=name, input_name=inputs[0], output_name=outputs[0])
def _convert_expand_dims(builder, name, inputs, outputs, args, attrs):
if attrs.axis >= 0:
axes = list(range(attrs.axis, attrs.axis + attrs.num_newaxis))
else:
axes = list(range(attrs.axis - attrs.num_newaxis + 1, attrs.axis + 1))
builder.add_expand_dims(name=name, input_name=inputs[0], output_name=outputs[0], axes=axes)
def _convert_relu(builder, name, inputs, outputs, args, attrs):
builder.add_activation(
name=name, non_linearity="RELU", input_name=inputs[0], output_name=outputs[0]
)
def _convert_softmax(builder, name, inputs, outputs, args, attrs):
builder.add_softmax_nd(
name=name, input_name=inputs[0], output_name=outputs[0], axis=int(attrs["axis"])
)
def _convert_conv2d(builder, name, inputs, outputs, args, attrs):
weight = args[1].data.numpy()
if attrs["kernel_layout"] == "OIHW":
# convert to 'HWIO'
weight = weight.transpose([2, 3, 1, 0])
kh, kw, kc, oc = weight.shape
builder.add_convolution(
name=name,
kernel_channels=kc,
output_channels=oc,
height=kh,
width=kw,
stride_height=int(attrs["strides"][0]),
stride_width=int(attrs["strides"][0]),
border_mode="valid",
groups=int(attrs["groups"]),
W=weight,
b=None,
has_bias=False,
input_name=inputs[0],
output_name=outputs[0],
dilation_factors=[int(v) for v in attrs["dilation"]],
padding_top=int(attrs["padding"][0]),
padding_bottom=int(attrs["padding"][2]),
padding_left=int(attrs["padding"][1]),
padding_right=int(attrs["padding"][3]),
)
def _convert_global_avg_pool2d(builder, name, inputs, outputs, args, attrs):
builder.add_pooling(
name=name,
height=1,
width=1,
stride_height=1,
stride_width=1,
layer_type="AVERAGE",
padding_type="VALID",
input_name=inputs[0],
output_name=outputs[0],
is_global=True,
)
_convert_map = {
"add": _convert_add,
"multiply": _convert_multiply,
"clip": _convert_clip,
"expand_dims": _convert_expand_dims,
"nn.relu": _convert_relu,
"nn.batch_flatten": _convert_batch_flatten,
"nn.softmax": _convert_softmax,
"nn.conv2d": _convert_conv2d,
"nn.global_avg_pool2d": _convert_global_avg_pool2d,
}
class CodegenCoreML(ExprVisitor):
"""
A visitor to traverse subgraphs and build Core ML models.
"""
def __init__(self, model_name, function):
import coremltools
from coremltools.models.neural_network import NeuralNetworkBuilder
ExprVisitor.__init__(self)
self.model_name = model_name
self.function = function
self.out_map = {}
self.model_inputs_ = []
self.buf_idx_ = 0
# Update inputs and outputs after we visit all the nodes.
# Set dummy values for now.
# TODO: support multiple outputs
inputs = [
(
"",
coremltools.models.datatypes.Array(
1,
),
)
for _ in self.function.params
]
outputs = [
(
"",
coremltools.models.datatypes.Array(
1,
),
)
]
self.builder = NeuralNetworkBuilder(inputs, outputs, disable_rank5_shape_mapping=True)
def visit_constant(self, const):
output = "buf_" + str(self.buf_idx_)
self.builder.add_load_constant_nd(
name=output,
output_name=output,
constant_value=const.data.numpy(),
shape=const.data.shape,
)
self.buf_idx_ = self.buf_idx_ + 1
self.out_map[const] = [output]
def visit_var(self, var):
name = var.name_hint
shape = [int(n) for n in var.type_annotation.shape]
dtype = var.type_annotation.dtype
self.model_inputs_.append((name, shape, dtype))
self.out_map[var] = [name]
def visit_call(self, call):
inputs = []
for arg in call.args:
super().visit(arg)
for out in self.out_map[arg]:
inputs.append(out)
outputs = ["buf_" + str(self.buf_idx_)]
op_name = call.op.name
layer_name = op_name + "_" + str(self.buf_idx_)
assert op_name in _convert_map, "{} is not supported".format(op_name)
_convert_map[op_name](self.builder, layer_name, inputs, outputs, call.args, call.attrs)
self.buf_idx_ = self.buf_idx_ + 1
self.out_map[call] = outputs
def compile(self, out_dir):
"""
Build a Core ML model and compile it with Xcode toolchain.
"""
import coremltools
from coremltools.proto.Model_pb2 import ArrayFeatureType
FEATURE_TYPE_MAP = {
"float32": ArrayFeatureType.FLOAT32,
"float64": ArrayFeatureType.DOUBLE,
"int32": ArrayFeatureType.INT32,
}
input_names, input_dims, input_dtypes = zip(*self.model_inputs_)
self.builder.set_input(input_names, input_dims)
for i, dtype in enumerate(input_dtypes):
assert dtype in FEATURE_TYPE_MAP
input_desc = self.builder.spec.description.input
input_desc[i].type.multiArrayType.dataType = FEATURE_TYPE_MAP[dtype]
output_dim = [int(n) for n in self.function.ret_type.shape]
self.builder.set_output(self.out_map[self.function.body], [output_dim])
for i, dtype in enumerate([self.function.ret_type.dtype]):
assert dtype in FEATURE_TYPE_MAP
output_desc = self.builder.spec.description.output
output_desc[i].type.multiArrayType.dataType = FEATURE_TYPE_MAP[dtype]
model = coremltools.models.MLModel(self.builder.spec)
xcode.compile_coreml(model, self.model_name, out_dir)
@tvm._ffi.register_func("relay.ext.coremlcompiler")
def coreml_compiler(func):
"""
Create a CoreML runtime from a Relay module.
"""
assert isinstance(func, tvm.relay.function.Function)
model_dir = os.getcwd()
name = str(func.attrs.global_symbol)
builder = CodegenCoreML(name, func)
builder.visit(func.body)
mlmodelc_path = "{}/{}.mlmodelc".format(model_dir, name)
if os.path.exists(mlmodelc_path):
shutil.rmtree(mlmodelc_path)
builder.compile(model_dir)
dev = tvm.cpu(0)
return coreml_runtime.create(name, mlmodelc_path, dev).module
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/target/onnx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines, redefined-builtin
"""Relay to ONNX codegen """
import os
import struct
import copy
import numpy
import onnx
import onnx.utils
from onnx import numpy_helper, OperatorSetIdProto, defs
from onnx import TensorProto
import tvm
from tvm import relay
import tvm._ffi
from tvm.relay.expr_functor import ExprVisitor
from tvm.relay.ty import TupleType, TensorType
ONNX_OPSET_VERSONS_SUPPORTED = [11]
def run_onnx_optimizer(onnx_model):
"""Run ONNX's optimization routines.
ONNX Optimizer was moved to an external library in
version 1.9. Attempt to use the optimizer in onnx if
it is available, fall back to the standalone
onnxoptimizer otherwise, and return the model
unoptimized if neither are available.
"""
try:
onnx_polish_model = onnx.utils.polish_model
except AttributeError:
pass
else:
return onnx_polish_model(onnx_model)
try:
# pylint: disable=import-outside-toplevel
import onnxoptimizer
except ImportError:
pass
else:
return onnxoptimizer.optimize(onnx_model)
return onnx_model
def tvm_array_to_list(arr):
return tuple(x.value for x in arr)
def get_onnx_version():
return onnx.__version__
def get_node_shape(node):
return tuple("Any" if isinstance(i, tvm.tir.Any) else int(i) for i in node.shape)
def infer_type(node):
"""A method to infer the type of a relay expression."""
mod = tvm.IRModule.from_expr(node)
mod = relay.transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(node, relay.Function) else entry.body
def call_node_infer_type(node):
"""infer the output types of call node"""
infer_out = infer_type(node)
out_type = infer_out._checked_type_
if isinstance(out_type, TensorType):
types = [out_type]
elif isinstance(out_type, TupleType):
types = list(out_type.fields)
else:
raise RuntimeError(
"Unsupported output type %s in operator %s" % (type(out_type), node.op.nae)
)
return types
def add_input(data, name, prefix, model_container):
input_name = "{}_{}".format(prefix, name)
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[data.dtype]
tensor_value_info = onnx.helper.make_tensor_value_info(input_name, dtype, shape=data.shape)
model_container.add_inputs([tensor_value_info])
data_tensor = numpy_helper.from_array(data, input_name)
model_container.add_initializers([data_tensor])
return input_name
class OpConverter(object):
"""Operator converter Base Class."""
@classmethod
def convert_attributes(cls, attrs):
"""convert Relay attributes to ONNX attributes.
The derived classes should implement this method
if attributes are required by the operator
otherwise by default no attributes are passed
"""
return {}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
onnx_node = onnx.helper.make_node(
cls.__name__, node_entry["input_names"], node_entry["output_names"], **attrs
)
model_container.add_nodes([onnx_node])
def rename(op_name):
"""This method creates dynamic operator of name op_name with empty attributes"""
return type(op_name, (OpConverter,), {})
class Reshape(object):
"""Operator converter for Reshape."""
@classmethod
def convert(cls, node_entry, model_container, node_dict):
"""Converts Relay operator Reshape to ONNX operator.
Relay operator accepts shape as attribute but ONNX operator
accepts it as a input.
"""
name = node_entry["name"]
shape = numpy.asarray(
[a.value for a in node_entry["relay_node"].attrs.newshape], dtype=numpy.int64
)
input_names = [
node_entry["input_names"][0],
add_input(shape, name, "shape", model_container),
]
node = onnx.helper.make_node(cls.__name__, input_names, node_entry["output_names"])
model_container.add_nodes([node])
class Conv(OpConverter):
"""Operator converter for Conv."""
@classmethod
def convert_attributes(cls, attrs):
return {
"group": attrs.get_int("groups"),
"pads": attrs.get_int_tuple("padding"),
"strides": attrs.get_int_tuple("strides"),
"dilations": attrs.get_int_tuple("dilation"),
"kernel_shape": attrs.get_int_tuple("kernel_size"),
}
class ConvTranspose(OpConverter):
"""Operator converter for ConvTranspose."""
@classmethod
def convert_attributes(cls, attrs):
return {
"group": attrs.get_int("groups"),
"pads": attrs.get_int_tuple("padding"),
"strides": attrs.get_int_tuple("strides"),
"dilations": attrs.get_int_tuple("dilation"),
"kernel_shape": attrs.get_int_tuple("kernel_size"),
"output_padding": attrs.get_int_tuple("output_padding"),
}
class MaxPool(OpConverter):
"""Operator converter for MaxPool."""
@classmethod
def convert_attributes(cls, attrs):
return {
"pads": attrs.get_int_tuple("padding"),
"strides": attrs.get_int_tuple("strides"),
"kernel_shape": attrs.get_int_tuple("pool_size"),
"ceil_mode": 1 if attrs.ceil_mode else 0,
}
class Transpose(OpConverter):
"""Operator converter for Transpose."""
@classmethod
def convert_attributes(cls, attrs):
return {"perm": attrs.get_int_tuple("axes")} if attrs["axes"] else {}
class MatMul(OpConverter):
"""Operator converter for MatMul."""
@classmethod
def convert(cls, node_entry, model_container, node_dict):
inter_output_name = "inter{}".format(node_entry["name"])
transpose_node = onnx.helper.make_node(
Transpose.__name__, [node_entry["input_names"][1]], [inter_output_name], perm=(1, 0)
)
model_container.add_nodes([transpose_node])
inputs = [node_entry["input_names"][0], inter_output_name]
matmul_node = onnx.helper.make_node(cls.__name__, inputs, node_entry["output_names"])
model_container.add_nodes([matmul_node])
class Flatten(OpConverter):
"""Operator converter for Flatten."""
@classmethod
def convert_attributes(cls, attrs):
return {
"axis": 1,
}
class BatchNormalization(OpConverter):
"""Operator converter for BatchNormalization."""
@classmethod
def convert_attributes(cls, attrs):
return {
"epsilon": float(attrs.get_str("epsilon")),
"axis": float(attrs.get_int("axis")),
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
"""Converts Relay operator batch_norm to ONNX operator.
Relay operator has property axis to handle data in NHWC format.
"""
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
transpose_out_name = node_entry["input_names"][0]
inter_output_names = [node_entry["output_names"][0]]
# axis==3 means channel is specified along the 3rd axis
if attrs["axis"] == 3:
transpose_out_name = "transpose_{}".format(node_entry["name"])
node_transposed = onnx.helper.make_node(
Transpose.__name__,
[node_entry["input_names"][0]],
[transpose_out_name],
perm=[0, 3, 1, 2],
)
model_container.add_nodes([node_transposed])
inter_output_names = ["batch_norm_{}".format(node_entry["name"])]
input_names = [transpose_out_name] + node_entry["input_names"][1:]
batch_norm_node = onnx.helper.make_node(
cls.__name__, input_names, inter_output_names, epsilon=attrs["epsilon"]
)
model_container.add_nodes([batch_norm_node])
if attrs["axis"] == 3:
node_transposed = onnx.helper.make_node(
Transpose.__name__,
inter_output_names,
[node_entry["output_names"][0]],
perm=[0, 2, 3, 1],
)
model_container.add_nodes([node_transposed])
class Dropout(OpConverter):
"""Operator converter for Dropout."""
@classmethod
def convert_attributes(cls, attrs):
return {
"ratio": float(attrs.get_str("rate")),
}
class AveragePool(MaxPool):
"""Operator converter for AveragePool."""
class Concat(OpConverter):
"""Operator converter for Concat."""
@classmethod
def convert_attributes(cls, attrs):
return {
"axis": attrs.get_int("axis"),
}
class BiasAdd(OpConverter):
"""Operator converter for BiasAdd."""
@classmethod
def convert(cls, node_entry, model_container, node_dict):
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node_entry can not be a Tuple"
input_node = input_node[0]
data_ndim = len(input_node["types"][0].shape)
axis = node_entry["relay_node"].attrs.get_int("axis")
if axis < 0:
axis = axis + data_ndim
new_axes = data_ndim - axis - 1
if new_axes:
inter_output_name = "inter{}".format(node_entry["name"])
unsqueeze_node = onnx.helper.make_node(
"Unsqueeze",
[node_entry["input_names"][1]],
[inter_output_name],
axes=tuple(range(1, new_axes + 1)),
)
model_container.add_nodes([unsqueeze_node])
else:
inter_output_name = node_entry["input_names"][1]
inputs = [node_entry["input_names"][0], inter_output_name]
matmul_node = onnx.helper.make_node("Add", inputs, node_entry["output_names"])
model_container.add_nodes([matmul_node])
class ReduceMean(OpConverter):
"""Operator converter for ReduceMean."""
@classmethod
def convert_attributes(cls, attrs):
return {
"axes": attrs.axis,
"keepdims": 0 if bool(attrs.get_int("keepdims", 0)) is False else 1,
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node can not be a Tuple"
input_node = input_node[0]
shape = input_node["types"][0].shape
axis = node_entry["relay_node"].attrs.axis
axis = list(range(shape.size())) if not axis else tvm_array_to_list(axis)
exclude = 0 if not bool(node_entry["relay_node"].attrs.exclude) else 1
keepdims = 0 if not bool(node_entry["relay_node"].attrs.keepdims) else 1
if exclude:
all_axis = list(range(len(shape)))
axis = set(all_axis) - set(axis)
node = onnx.helper.make_node(
cls.__name__,
node_entry["input_names"],
node_entry["output_names"],
axes=axis,
keepdims=keepdims,
)
model_container.add_nodes([node])
class Pad(OpConverter):
"""Operator converter for Pad."""
@classmethod
def convert_attributes(cls, attrs):
before = []
after = []
for axis_pads in attrs.pad_width:
before.append(axis_pads[0])
after.append(axis_pads[1])
pads = before + after
pads = numpy.asarray(pads, dtype=pads[0].dtype)
return {
"pads": pads,
"mode": attrs.get_str("pad_mode"),
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
"""Converts Relay operator Pad to ONNX operator.
Relay operator accepts pads as attribute but ONNX operator
accepts it as a input.
"""
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
name = node_entry["name"]
pad_data = numpy.asarray(attrs["pads"], dtype=attrs["pads"][0].dtype).astype(numpy.int64)
input_names = [
node_entry["input_names"][0],
add_input(pad_data, name, "pads", model_container),
node_entry["input_names"][1],
]
node = onnx.helper.make_node(
cls.__name__, input_names, node_entry["output_names"], mode=attrs["mode"]
)
model_container.add_nodes([node])
class Softmax(OpConverter):
"""Operator converter for SoftMax."""
@classmethod
def convert_attributes(cls, attrs):
return {
"axis": attrs.axis,
}
class Squeeze(OpConverter):
"""Operator converter for Squeeze."""
@classmethod
def convert_attributes(cls, attrs):
return {
"axes": attrs.axis,
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node can not be a Tuple"
input_node = input_node[0]
shape = input_node["types"][0].shape
axis = node_entry["relay_node"].attrs.get_int("axis")
if not axis:
axis = []
for axis_idx, val in enumerate(shape):
if val.value == 1:
axis.append(axis_idx)
else:
axis = node_entry["relay_node"].attrs.get_int_tuple("axis")
node = onnx.helper.make_node(
cls.__name__, node_entry["input_names"], node_entry["output_names"], axes=axis
)
model_container.add_nodes([node])
class Slice(OpConverter):
"""Operator converter for Slice."""
@classmethod
def convert_attributes(cls, attrs):
return {
"starts": attrs.get_int_tuple("begin"),
"ends": attrs.get_int_tuple("end"),
"steps": attrs.get_int_tuple("strides"),
"slice_mode": attrs.get_str("slice_mode"),
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
name = node_entry["name"]
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node can not be a Tuple"
input_node = input_node[0]
shape = input_node["types"][0].shape
starts = list(attrs["starts"])
ends = list(attrs["ends"])
steps = list(attrs["steps"])
starts += [0] * (len(shape) - len(starts))
ends += [shape[i] + 1 for i in range(len(ends), len(shape))]
axes = list(range(len(shape)))
if attrs["slice_mode"] == "size":
ends = [
starts[i] + (shape[i] + 1 if ends[i] < 0 else ends[i]) for i in range(len(shape))
]
steps = [1] * len(shape)
else:
steps += [1] * (len(shape) - len(steps))
starts = numpy.asarray(starts).astype(numpy.int64)
ends = numpy.asarray(ends).astype(numpy.int64)
axes = numpy.asarray(axes).astype(numpy.int64)
steps = numpy.asarray(steps).astype(numpy.int64)
input_names = []
input_names.append(add_input(starts, name, "starts", model_container))
input_names.append(add_input(ends, name, "ends", model_container))
input_names.append(add_input(axes, name, "axes", model_container))
input_names.append(add_input(steps, name, "steps", model_container))
input_names = [node_entry["input_names"][0]] + input_names
slice_node = onnx.helper.make_node(cls.__name__, input_names, node_entry["output_names"])
model_container.add_nodes([slice_node])
class Split(OpConverter):
"""Operator converter for Split."""
@classmethod
def convert_attributes(cls, attrs):
indices_or_sections = attrs["indices_or_sections"]
if isinstance(indices_or_sections, (list, tvm.ir.container.Array)):
indices_or_sections = attrs.get_int_tuple("indices_or_sections")
if isinstance(indices_or_sections, tvm.ir.PrimExpr):
indices_or_sections = indices_or_sections.value
return {
"indices_or_section": indices_or_sections,
"axis": attrs.get_int("axis"),
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node can not be a Tuple"
input_node = input_node[0]
shape = get_node_shape(input_node["types"][0])
indices_or_sect = attrs["indices_or_section"]
axis = attrs["axis"]
axis_length = shape[axis]
if isinstance(indices_or_sect, int):
split = [axis_length // indices_or_sect] * indices_or_sect
else:
split = []
for i in range(len(indices_or_sect) + 1):
if i == 0:
split.append(indices_or_sect[0])
elif i == len(indices_or_sect):
split.append(axis_length - indices_or_sect[-1])
else:
split.append(indices_or_sect[i] - indices_or_sect[i - 1])
slice_node = onnx.helper.make_node(
cls.__name__,
node_entry["input_names"],
node_entry["output_names"],
split=split,
axis=axis,
)
model_container.add_nodes([slice_node])
class LayoutTransform(OpConverter):
"""Operator converter for Layouttransform"""
@classmethod
def convert_attributes(cls, attrs):
src_layout = attrs.get_str("src_layout")
dst_layout = attrs.get_str("dst_layout")
perm = [src_layout.index(c) for c in dst_layout]
return {"perm": tuple(perm)}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
onnx_node = onnx.helper.make_node(
"Transpose", node_entry["input_names"], node_entry["output_names"], **attrs
)
model_container.add_nodes([onnx_node])
class Clip(OpConverter):
"""Operator converter for Clip."""
@classmethod
def convert_attributes(cls, attrs):
return {"min": attrs.a_min, "max": attrs.a_max}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
name = node_entry["name"]
min_val = numpy.asarray(attrs["min"]).astype(numpy.float32)
max_val = numpy.asarray(attrs["max"]).astype(numpy.float32)
input_names = []
input_names.append(add_input(min_val, name, "min", model_container))
input_names.append(add_input(max_val, name, "max", model_container))
input_names = [node_entry["input_names"][0]] + input_names
node = onnx.helper.make_node(cls.__name__, input_names, node_entry["output_names"])
model_container.add_nodes([node])
class Expand(OpConverter):
"""Operator converter for Expand_dims."""
@classmethod
def convert_attributes(cls, attrs):
return {"axis": attrs.axis, "num_newaxis": attrs.num_newaxis}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
name = node_entry["name"]
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node_entry can not be a Tuple"
input_node = input_node[0]
data_shape = input_node["types"][0].shape
new_shape = list(data_shape)
for _ in range(attrs["num_newaxis"]):
new_shape.insert(attrs["axis"], 1)
new_shape = numpy.asarray(new_shape).astype(numpy.int64)
input_names = []
input_names.append(add_input(new_shape, name, "shape", model_container))
input_names = [node_entry["input_names"][0]] + input_names
node = onnx.helper.make_node(cls.__name__, input_names, node_entry["output_names"])
model_container.add_nodes([node])
class ConstantOfShapeZeros(OpConverter):
"""Operator converter for ConstantOfShape."""
@classmethod
def convert_attributes(cls, attrs):
return {"value": 0}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node can not be a Tuple"
input_node = input_node[0]
dtype = input_node["types"][0].dtype
name = node_entry["name"]
shape = [val.value for val in input_node["types"][0].shape]
shape = numpy.asarray(shape).astype(numpy.int64)
input_names = []
input_names.append(add_input(shape, name, "shape", model_container))
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(dtype)]
tensor_value = onnx.helper.make_tensor("value", dtype, [1], [attrs["value"]])
node = onnx.helper.make_node(
"ConstantOfShape", input_names, node_entry["output_names"], value=tensor_value
)
model_container.add_nodes([node])
class ConstantOfShapeOnes(ConstantOfShapeZeros):
"""Operator converter for ConstantOfShape."""
@classmethod
def convert_attributes(cls, attrs):
return {"value": 1}
class LRN(OpConverter):
"""Operator converter for LRN."""
@classmethod
def convert_attributes(cls, attrs):
"""axis attr is not supported as an argument in onnx.
Onnx only supports axis=1 (channels)."""
if attrs.get_int("axis") != 1:
raise RuntimeError(
"Unsupported axis %s in operator relay lrn operator. "
"Only axis = 1 is supported by Onnx." % (attrs.get_int("axis"))
)
return {"alpha": attrs.alpha, "beta": attrs.beta, "bias": attrs.bias, "size": attrs.size}
class Cast(OpConverter):
"""Operator converter for Cast."""
@classmethod
def convert_attributes(cls, attrs):
return {"to": getattr(TensorProto, attrs.dtype.upper())}
class Resize(OpConverter):
"""Operator converter for Resize."""
@classmethod
def convert_attributes(cls, attrs):
method = attrs.get_str("method")
if method == "nearest_neighbor":
mode = b"nearest"
elif "linear" in method: # linear / bilinear
mode = b"linear"
elif "cubic" in method: # cubic / bicubic
mode = b"cubic"
else:
raise RuntimeError("Unsupported method %s in operator Resize" % method)
coord_trans = attrs.get_str("coordinate_transformation_mode")
if coord_trans == "half_pixel":
coord_trans = b"half_pixel"
elif coord_trans == "align_corners":
coord_trans = b"align_corners"
elif coord_trans == "asymmetric":
coord_trans = b"asymmetric"
else:
raise RuntimeError(
"Unsupported coordinate transform mode %s in operator Resize" % coord_trans
)
rounding_method = attrs.get_str("rounding_method")
if rounding_method == "round":
rounding_method = b"round_prefer_ceil"
elif rounding_method == "floor":
rounding_method = b"floor"
elif rounding_method == "ceil":
rounding_method = b"ceil"
else:
raise RuntimeError(
"Unsupported rounding method %s in operator Resize" % rounding_method
)
size = attrs.get_int_tuple("size")
return {
"mode": mode,
"coord_trans": coord_trans,
"size": size,
"nearest_mode": rounding_method,
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
name = node_entry["name"]
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node can not be a Tuple"
input_node = input_node[0]
input_shape = input_node["types"][0].shape
# (TBD) needed in opset 11
roi = [0] * len(input_shape) + [1] * len(input_shape)
roi_array = numpy.asarray(roi).astype(numpy.float64)
roi_node = add_input(roi_array, name, "roi", model_container)
out_size = attrs["size"]
# (onnx) rank of scale / size must match rank of X
# relay size node contains only spatial dimensions
# pad with 1s to match rank
match_rank_pad = len(input_shape) - len(out_size)
out_size_full_rank = input_shape[:match_rank_pad] + list(out_size)
out_size_array = numpy.asarray(out_size_full_rank).astype(numpy.int64)
input_size_array = numpy.asarray(list(input_shape)).astype(numpy.int64)
scale_array = numpy.divide(out_size_array, input_size_array).astype(numpy.float32)
scale_node = add_input(scale_array, name, "scales", model_container)
input_names = [node_entry["input_names"][0], roi_node, scale_node]
resize_node = onnx.helper.make_node(
cls.__name__,
input_names,
node_entry["output_names"],
mode=attrs["mode"],
coordinate_transformation_mode=attrs["coord_trans"],
nearest_mode=attrs["nearest_mode"],
)
model_container.add_nodes([resize_node])
relay_to_onnx_op_mapping = {
"reshape": Reshape,
"nn.conv2d": Conv,
"nn.conv2d_transpose": ConvTranspose,
"add": rename("Add"),
"nn.relu": rename("Relu"),
"transpose": Transpose,
"nn.dense": MatMul,
"nn.max_pool2d": MaxPool,
"nn.batch_flatten": Flatten,
"multiply": rename("Mul"),
"nn.bias_add": BiasAdd,
"nn.batch_norm": BatchNormalization,
"nn.global_avg_pool2d": rename("GlobalAveragePool"),
"concatenate": Concat,
"nn.dropout": Dropout,
"nn.avg_pool2d": AveragePool,
"divide": rename("Div"),
"mean": ReduceMean,
"nn.pad": Pad,
"nn.softmax": Softmax,
"squeeze": Squeeze,
"strided_slice": Slice,
"greater": rename("Greater"),
"less": rename("Less"),
"equal": rename("Equal"),
"zeros_like": ConstantOfShapeZeros,
"ones_like": ConstantOfShapeOnes,
"subtract": rename("Sub"),
"split": Split,
"exp": rename("Exp"),
"layout_transform": LayoutTransform,
"clip": Clip,
"expand_dims": Expand,
"nn.lrn": LRN,
"sigmoid": rename("Sigmoid"),
"copy": rename("Identity"),
"round": rename("Round"),
"cast": Cast,
"image.resize2d": Resize,
}
class ModelContainer(object):
"""A container class to hold different attributes of ONNX model graph"""
def __init__(self, name, opset_version):
self._name = name
self._opset_version = opset_version
self._inputs = []
self._outputs = []
self._nodes = []
self._initializers = []
def add_inputs(self, inputs):
self._inputs.extend(inputs)
def add_outputs(self, outputs):
self._outputs.extend(outputs)
def add_nodes(self, nodes):
self._nodes.extend(nodes)
def add_initializers(self, initializers):
self._initializers.extend(initializers)
def _get_opsets(self):
opsets = []
imp = OperatorSetIdProto()
imp.version = self._opset_version
opsets.append(imp)
return opsets
def make_model(self):
"""Creates the onnx model from the graph"""
onnx_graph = onnx.helper.make_graph(
self._nodes, self._name, self._inputs, self._outputs, self._initializers
)
kwargs = {}
kwargs["opset_imports"] = self._get_opsets()
kwargs["producer_name"] = "TVM Relay"
kwargs["producer_version"] = tvm.__version__
return onnx.helper.make_model(onnx_graph, **kwargs)
class RelayToONNXConverter(ExprVisitor):
"""A helper class to traverse the Relay graph and convert Relay nodes to ONNX model
Parameters
----------
name : str
name of the model
params : dict
dict of the parameter names and NDarray values
opset_version : int
target onnx opset version
"""
def __init__(self, name, params, opset_version):
super().__init__()
self._name = name
self._mc = ModelContainer(name, opset_version)
self._params = params
self._node_dict = {}
self._node_count = 0
self.last_node = None
@classmethod
def _get_node_entry(cls, relay_node, name):
return {
"relay_node": relay_node,
"inputs": [relay_node], # inputs in the form of relay nodes
"types": [], # output types in case of call nodes else self type
"name": name, # name of the node
"input_names": [name], # input names in case of call nodes else self name
"output_names": [name], # output names in case of call nodes else self name
"op": None, # op name in case of call node else None
}
def convert_to_onnx(self, func):
"""Traverse Relay graph and generate a ONNX model"""
self.visit(func)
self._add_output(self._node_dict[self.last_node])
model = self._mc.make_model()
return run_onnx_optimizer(model)
def visit(self, expr):
self._node_count += 1
super().visit(expr)
def visit_constant(self, const):
node_index = self._node_count
name = self._name + "_const_" + str(node_index)
node_entry = self._get_node_entry(const, name)
node_entry["types"] = [const.checked_type]
self._add_constant_input(node_entry, node_index)
self._node_dict[const] = [node_entry]
def visit_var(self, var):
node_index = self._node_count
node_entry = self._get_node_entry(var, var.name_hint)
node_entry["types"] = [var.type_annotation]
self._add_input(node_entry, node_index)
self._node_dict[var] = [node_entry]
def visit_tuple(self, tup):
self._node_dict[tup] = []
for f in tup.fields:
self.visit(f)
self._node_dict[tup].extend(self._node_dict[f])
self.last_node = tup
def visit_tuple_getitem(self, t):
self.visit(t.tuple_value)
tup_node = self._node_dict[t.tuple_value]
if len(tup_node) > 1:
self._node_dict[t] = tup_node[t.index]
else:
node_entry = copy.deepcopy(tup_node[0])
output_names = [node_entry["output_names"][t.index]]
node_entry["output_names"] = output_names
self._node_dict[t] = [node_entry]
self.last_node = t
def visit_call(self, call):
node_index = self._node_count
op = call.op
name = "{}_{}".format(op, node_index)
node_entry = self._get_node_entry(call, name)
node_entry["op"] = op
node_entry["input_names"] = []
node_entry["inputs"] = []
node_entry["output_names"] = None
for input_arg in call.args:
self.visit(input_arg)
input_names = []
for arg_node_entry in self._node_dict[input_arg]:
input_names.extend(arg_node_entry["output_names"])
node_entry["input_names"].extend(input_names)
node_entry["inputs"].extend([input_arg])
node_entry["types"] = call_node_infer_type(call)
node_entry["output_names"] = []
for i in range(len(node_entry["types"])):
node_entry["output_names"].append(name + str(i))
self.last_node = call
self._add_node(node_entry, node_index)
self._node_dict[call] = [node_entry]
def _add_node(self, node_entry, idx):
"""Convert Relay operator node to ONNX operator and add it to container nodes list"""
if node_entry["op"].name not in relay_to_onnx_op_mapping:
raise NotImplementedError(
"Currently the operator '{0}' is " "not supported.".format(node_entry["op"].name)
)
converter = relay_to_onnx_op_mapping[node_entry["op"].name]()
return converter.convert(node_entry, self._mc, self._node_dict)
def _add_params(self, node_entry, idx):
"""Add param value to initializer and name to inputs"""
param_name = node_entry["name"]
assert (
param_name in self._params
), "The parameter {0} is not present" "in params dict provided.".format(param_name)
value = self._params[param_name]
numpy_array = value.numpy()
tensor = numpy_helper.from_array(numpy_array, param_name)
self._mc.add_initializers([tensor])
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy_array.dtype]
input = onnx.helper.make_tensor_value_info(param_name, dtype, shape=numpy_array.shape)
self._mc.add_inputs([input])
def _add_constant_input(self, node_entry, idx):
"""Create named input for constant and add it to container inputs.
If input is a parameter then add to param
"""
node = node_entry["relay_node"]
param_name = node_entry["name"]
self._params[param_name] = node.data
self._add_params(node_entry, idx)
def _add_input(self, node_entry, idx):
"""Add input node to container inputs. If input is a parameter then add to param"""
if node_entry["name"] in self._params:
self._add_params(node_entry, idx)
else:
node_type = node_entry["types"][0]
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(node_type.dtype)]
input = onnx.helper.make_tensor_value_info(
node_entry["name"], dtype, shape=get_node_shape(node_type)
)
self._mc.add_inputs([input])
def _add_output(self, node_entries):
"""Add output node to container outputs."""
for node_entry in node_entries:
for node_type, output_name in zip(node_entry["types"], node_entry["output_names"]):
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(node_type.dtype)]
output = onnx.helper.make_tensor_value_info(
output_name, dtype, shape=get_node_shape(node_type)
)
self._mc.add_outputs([output])
def to_onnx(relay_ir, params, name, opset_version=11, path=None):
"""Convert a Relay Function Module into an equivalent ONNX and serialize it to the path
Parameters
----------
relay_ir : tvm.ir.IRModule or tvm.relay.Function
The relay module object
params : dict
dict of the parameter names and NDarray values
name : str
name of the output ONNX graph
opset_version : int
target onnx opset version
path : str
The path where ONNX model will be saved
Returns
-------
onnx_model : onnx.ModelProto
converted ONNX model as a ModelProto.
"""
if opset_version not in ONNX_OPSET_VERSONS_SUPPORTED:
raise NotImplementedError("Currently only opset version 11 is supported.")
if opset_version > defs.onnx_opset_version():
raise Exception(
"The ONNX package installed of version {} does not support the opset "
"version {}. Upgrade the ONNX package to latest version.".format(
get_onnx_version(), opset_version
)
)
func = relay_ir["main"] if isinstance(relay_ir, tvm.ir.IRModule) else relay_ir
converter = RelayToONNXConverter(name, params, opset_version)
onnx_model = converter.convert_to_onnx(func)
if path:
onnx.save(onnx_model, path)
return onnx_model
@tvm._ffi.register_func("relay.ext.onnx")
def onnx_compiler(func):
"""Create a runtime module for ONNX from Relay Function
:param func: Relay function
:return: runtime module for ONNX
"""
assert isinstance(func, tvm.relay.function.Function)
name = str(func.attrs.global_symbol)
model = to_onnx(func, {}, name)
const_vars = [const.name for const in model.graph.initializer]
name_bytes = bytes(name, "utf-8")
name_size = struct.pack("I", len(name_bytes))
model_serialized = model.SerializeToString()
model_size = struct.pack("I", model.ByteSize())
data = b"" + name_size + name_bytes + model_size + model_serialized
runtime_func = "runtime.ONNXModuleCreate"
fcreate = tvm._ffi.get_global_func(runtime_func)
return fcreate(data.hex(), name, const_vars)
@tvm._ffi.register_func("relay.ext.onnx.save_to_file")
def save_to_file(hex_str, path=None, fmt="onnx"):
"""Store the ONNX subgraphs in the path folder
:param hex_str: Subgrah names and corresponding serialized onnx hex string
:param path: path to which ONNX files to be stored
It is assumed that path exists
:param fmt: extension of the files to be stored
"""
onnx_ir = bytes.fromhex(hex_str)
offset = 0
while offset < len(onnx_ir):
stop = offset + 4
(name_size,) = struct.unpack("I", onnx_ir[offset:stop])
name = onnx_ir[stop : stop + name_size].decode("utf-8")
stop = stop + name_size
(model_size,) = struct.unpack("I", onnx_ir[stop : stop + 4])
stop = stop + 4
model_serialized = onnx_ir[stop : stop + model_size]
offset = stop + model_size
model_onnx = onnx.load_model_from_string(model_serialized)
onnx.save(model_onnx, "{}{}{}.{}".format(path, os.path.sep, name, fmt))
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/target/vitis_ai.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
"""Utility to offload (sub-)models to Vitis-AI"""
import warnings
import importlib
from tvm.relay.expr import Tuple, Call, TupleGetItem
import tvm._ffi
# Placeholder for PyXIR module
pyxir = None
def vitis_ai_available():
"""Return whether Vitis AI tools are available"""
pyxir_spec = importlib.util.find_spec("pyxir")
if not tvm.get_global_func("tvm.vitis_ai_runtime.from_xgraph", True) or pyxir_spec is None:
return False
return True
class CodegenVitisAI:
"""Traverse Relay expression and convert into PyXIR XGraph format
Parameters
----------
function : Function
The Relay function
dpu_target : str
The Vitis AI DPU target identifier
"""
def __init__(self, function, dpu_target):
global pyxir
try:
if pyxir is None:
pyxir = __import__("pyxir")
__import__("pyxir.frontend.tvm")
except ImportError:
# add "from None" to silence
# "During handling of the above exception, another exception occurred"
raise ImportError(
"The pyxir package is required for the Vitis AI backend. "
"Please install it first. "
"Help: (https://tvm.apache.org/docs/deploy/vitis_ai.html) "
) from None
self.function = function
self.dpu_target = dpu_target
self.params = {}
def build(self):
""" "Convert the Relay expression to a PyXIR XGraph to instantiate
the Vitis AI runtime
Returns
-------
xgraph_str : str
Serialized XGraph
"""
xgraph = pyxir.frontend.tvm.from_relay(
self.function, params=self.params, postprocessing=None
)
xgraph = pyxir.partition(xgraph, targets=[self.dpu_target])
output_relay_ids = self.get_output_names()
layers = xgraph.get_layers()
# Get the output tensor names using XGraph and output Relay ids
out_tensor_names = ["unknown_name"] * len(output_relay_ids)
for layer in layers:
if not layer.internal:
for relay_id in layer.attrs["relay_id"]:
if relay_id in output_relay_ids:
out_tensor_names[output_relay_ids.index(relay_id)] = layer.name
break
if any([name == "unkown_name" for name in out_tensor_names]):
raise ValueError(
"During codegeneration the loading of subexpression"
" failed due to output tensor name mismatch in Relay PyXIR interface."
)
xgraph.meta_attrs["tvm_out_tensors"] = out_tensor_names
xgraph_str = pyxir.get_xgraph_str(xgraph)
return xgraph_str
def get_output_names(self):
"""Get output names from Relay expression"""
func = self.function
output_relay_ids = []
expr = func.body
if isinstance(expr, Tuple):
for field in expr.fields:
output_relay_ids.append(hash(field))
elif isinstance(expr, Call):
output_relay_ids.append(hash(expr))
elif isinstance(expr, TupleGetItem):
output_relay_ids.append(hash(expr.tuple_value))
else:
raise ValueError("Vitis-AI codegen does not support {} as output".format(type(expr)))
return output_relay_ids
@tvm._ffi.register_func("relay.ext.vitis_ai")
def vitis_ai_compiler(ref):
"""Create a Vitis-AI runtime from the provided Relay expression"""
assert isinstance(ref, tvm.relay.function.Function)
name = str(ref.attrs.global_symbol)
pass_context = tvm.get_global_func("transform.GetCurrentPassContext")()
cfg = (
pass_context.config["relay.ext.vitis_ai.options"]
if "relay.ext.vitis_ai.options" in pass_context.config
else None
)
# Backward compatibility with old pass context configs
if cfg is None:
warnings.warn(
"You are using a deprecated way of passing build configs (e.g."
" `relay.ext.vitis_ai.options.target`). Check out the Vitis AI "
" documentation here: https://tvm.apache.org/docs/deploy/vitis_ai.html"
" to switch to recommended way for passing build configs."
)
# The target Vitis-AI accelerator device
dpu_target = (
str(pass_context.config["relay.ext.vitis_ai.options.target"])
if "relay.ext.vitis_ai.options.target" in pass_context.config
else None
)
# (Optional configs) The build and work directories to be used by Vitis-AI
vai_build_dir = (
str(pass_context.config["relay.ext.vitis_ai.options.build_dir"])
if "relay.ext.vitis_ai.options.build_dir" in pass_context.config
else tvm.contrib.utils.tempdir().relpath("")
)
vai_work_dir = (
str(pass_context.config["relay.ext.vitis_ai.options.work_dir"])
if "relay.ext.vitis_ai.options.work_dir" in pass_context.config
else tvm.contrib.utils.tempdir().relpath("")
)
# (Optional configs) Export and load PyXIR runtime module to file if provided. This is
# used to compile and quantize a model on the host and deploy it at the edge
export_runtime_module = (
str(pass_context.config["relay.ext.vitis_ai.options.export_runtime_module"])
if "relay.ext.vitis_ai.options.export_runtime_module" in pass_context.config
else ""
)
load_runtime_module = (
str(pass_context.config["relay.ext.vitis_ai.options.load_runtime_module"])
if "relay.ext.vitis_ai.options.load_runtime_module" in pass_context.config
else ""
)
else:
dpu_target = cfg.dpu if cfg.dpu else None
# (Optional configs) The build and work directories to be used by Vitis AI
vai_build_dir = cfg.build_dir if cfg.build_dir else tvm.contrib.utils.tempdir().relpath("")
# (Optional configs) Export and load PyXIR runtime module to file if provided. This is
# used to compile and quantize a model on the host and deploy it at the edge
vai_work_dir = cfg.work_dir if cfg.work_dir else tvm.contrib.utils.tempdir().relpath("")
export_runtime_module = cfg.export_runtime_module
load_runtime_module = cfg.load_runtime_module
# Config checks
if load_runtime_module and dpu_target is not None:
warnings.warn(
"Both `load_runtime_module` and `dpu` configs were specified."
" The `load_runtime_module` points to a prebuilt runtime module with"
" an internal DPU target so the `dpu` config will be ignored"
)
if load_runtime_module and "relay.ext.vitis_ai.options.build_dir" in pass_context.config:
warnings.warn(
"Both `load_runtime_module` and `build_dir` configs were specified."
" The `load_runtime_module` points to a prebuilt runtime module with"
" an internal build directory so the `build_dir` config will be ignored"
)
if load_runtime_module and "relay.ext.vitis_ai.options.work_dir" in pass_context.config:
warnings.warn(
"Both `load_runtime_module` and `work_dir` configs were specified."
" The `load_runtime_module` points to a prebuilt runtime module with"
" an internal work directory so the `work_dir` config will be ignored"
)
# If load_runtime_module is not set, we will build the PyXIR runtime module from scratch
if load_runtime_module == "":
# Convert Relay expression into XGraph and do partitioning inside PyXIR
codegen = CodegenVitisAI(ref, dpu_target)
xgraph_str = codegen.build()
runtime_func = "tvm.vitis_ai_runtime.from_xgraph"
fcreate = tvm._ffi.get_global_func(runtime_func)
return fcreate(
name, xgraph_str, dpu_target, vai_build_dir, vai_work_dir, export_runtime_module
)
runtime_func = "tvm.vitis_ai_runtime.from_rt_mod"
fcreate = tvm._ffi.get_global_func(runtime_func)
return fcreate(name, load_runtime_module, export_runtime_module)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/tedd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-outside-toplevel
"""Tensor Expression Debug Display (TEDD), visualizing Tensor Expression"""
import html
import json
import warnings
from graphviz import Digraph
from graphviz import Source
import tvm
TVMDD_TABLE_BODY_WIDTH = 30
# Must match enum IterVarType defined in include/tvm/expr.h
ITERVAR_TYPE_STRING_MAP = {
0: ("kDataPar", "#FFFFFF"),
1: ("kThreadIndex", "#2980B9"),
2: ("kCommReduce", "#FAD7A0"),
3: ("kOrdered", "#D35400"),
4: ("kOpaque", "#ABB2B9"),
5: ("kUnrolled", "#D2B4DE"),
6: ("kVectorized", "#AED6F1"),
7: ("kParallelized", "#F5B7B1"),
8: ("kTensorized", "#A9DFBF"),
}
PALETTE = {
0: "#000000",
1: "#922B21",
2: "#76448A",
3: "#1F618D",
4: "#148F77",
5: "#B7950B",
6: "#AF601A",
7: "#F5B7B1",
8: "#A9DFBF",
}
PALETTE_SIZE = 9
def dom_path_to_string(dom_path, prefix=""):
path_string = prefix
for index in dom_path:
path_string = path_string + "_" + str(index)
return path_string
def insert_dot_id(sch):
"""Insert unique ID for each node in the DOM tree.
They are used as Dot node ID.
"""
for stage_idx, stage in enumerate(sch["stages"]):
dom_path = [stage_idx]
stage["id"] = dom_path_to_string(dom_path, stage["type"])
for itervar_idx, itervar in enumerate(stage["all_itervars"]):
dom_path = [stage_idx, itervar_idx]
itervar["id"] = dom_path_to_string(dom_path, itervar["type"])
for rel_idx, rel in enumerate(stage["relations"]):
dom_path = [stage_idx, rel_idx]
rel["id"] = dom_path_to_string(dom_path, rel["type"])
for tensor_idx, tensor in enumerate(stage["output_tensors"]):
dom_path = [stage_idx, tensor_idx]
tensor["id"] = dom_path_to_string(dom_path, tensor["type"])
return sch
class ObjectManager:
"""A helper class tracking schedule objects, e.g. stage, IterVar,
relationship, and tensor, to their DOM path."""
def __init__(self, sch):
self.dict = {}
for stage_idx, stage in enumerate(sch.stages):
self.dict[stage] = [stage_idx]
for itervar_idx, itervar in enumerate(stage.all_iter_vars):
self.dict[itervar] = [stage_idx, itervar_idx]
for rel_idx, rel in enumerate(stage.relations):
self.dict[rel] = [stage_idx, rel_idx]
for tensor_idx in range(stage.op.num_outputs):
self.dict[frozenset({stage.op.name, tensor_idx})] = [stage_idx, tensor_idx]
def get_dom_path(self, obj):
if obj is None:
return None
assert obj in self.dict, "Node is no found."
return self.dict[obj]
def get_or_create_dot_id(obj, prefix="", assert_on_missing=False):
"""If obj's ID has been registered, return it.
If not, either assert or create a unique and legal ID, register and
return it, according to assert_on_missing.
ID must be a unique and legal Dotty ID.
Parameters
----------
obj : objet
Serve as the key to the ID.
prefix : string
Prefix to attach to the ID. Usually use obj's non-unique
name as prefix.
assert_on_missing : bool
Assert or not if object doesn't have a registered ID.
"""
prefix = prefix.replace(".", "_")
if not hasattr(get_or_create_dot_id, "obj_id_dict"):
get_or_create_dot_id.obj_id_dict = {}
if obj not in get_or_create_dot_id.obj_id_dict:
if assert_on_missing:
assert False, "dot_id " + str(obj) + " has not been registered."
else:
get_or_create_dot_id.obj_id_dict[obj] = prefix + hex(id(obj))
return get_or_create_dot_id.obj_id_dict[obj]
def get_port_id(is_input, index):
return "I_" + str(index) if is_input else "O_" + str(index)
def get_itervar_type_info(iter_type):
assert iter_type < len(ITERVAR_TYPE_STRING_MAP), "Unknown IterVar type: " + str(iter_type)
return ITERVAR_TYPE_STRING_MAP[iter_type]
def get_itervar_label_color(itervar, iv_type):
type_info = get_itervar_type_info(iv_type)
return (
linebrk(str(itervar["name"]) + "(" + type_info[0] + ")", TVMDD_TABLE_BODY_WIDTH),
type_info[1],
)
def linebrk(s, n):
"""Break input string s with <br/> for every n charactors."""
result = ""
j = 0
for i, c in enumerate(s):
if j == n and i != len(s) - 1:
result = result + "\n"
j = 0
j = j + 1
result = result + c
result = html.escape(str(result), quote=True)
result = result.replace("\n", "<br/>")
return result
def create_graph(name="", rankdir="BT"):
graph = Digraph(name=name)
graph.graph_attr["rankdir"] = rankdir
return graph
def itervar_label(itervar, index, index_color, label):
return (
'<TR><TD PORT="'
+ itervar["id"]
+ '" BGCOLOR="'
+ index_color
+ '">'
+ str(index)
+ '</TD><TD BGCOLOR="white" PORT="itervar">'
+ label
+ "<br/>"
+ str(itervar["properties"]["range"])
+ "</TD></TR>"
)
def stage_label(stage):
return stage["name"] + "<br/>Scope: " + stage["properties"]["scope"]
def legend_label():
"""Generate legend labels."""
label = '<<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="4">'
for iter_type in ITERVAR_TYPE_STRING_MAP:
name, color = ITERVAR_TYPE_STRING_MAP[iter_type]
label += (
'<TR><TD BGCOLOR="' + color + '"></TD>' + '<TD BGCOLOR="white">' + name + "</TD></TR>"
)
label += "</TABLE>>"
return label
def leaf_itervars(stage):
filtered = filter(lambda x: (x["index"] >= 0), stage["all_itervars"])
return sorted(filtered, key=lambda x: x["index"])
def legend_dot(g):
with g.subgraph(name="cluster_legend") as subgraph:
subgraph.attr(label="Legend")
label = legend_label()
subgraph.node("legend", label, shape="none", margin="0")
def extract_dom_for_viz(sch, need_range=True):
json_str = dump_json(sch, need_range)
s = json.loads(json_str)
s = insert_dot_id(s)
return s
def dump_graph(dot_string, show_svg=True, dot_file_path="", output_dot_string=False):
"""Output dot_string in various formats."""
if dot_file_path:
try:
dot_file = open(dot_file_path, "w+")
dot_file.write(dot_string)
dot_file.close()
except IOError:
print("Cannot open file: " + dot_file_path)
if show_svg:
from IPython.display import display
from IPython.display import SVG
src = Source(dot_string)
display(SVG(src.pipe(format="svg")))
if output_dot_string:
return dot_string
return None
def dump_json(sch, need_range):
"""Serialize data for visualization from a schedule in JSON format.
Parameters
----------
sch : schedule
The schedule object to serialize
Returns
-------
json : string
Serialized JSON string
"""
def encode_itervar(itervar, stage, index, range_map):
"""Extract and encode IterVar visualization data to a dictionary"""
ivrange = range_map[itervar] if range_map is not None and itervar in range_map else None
bind_thread = None
tensor_intrin = None
if itervar in stage.iter_var_attrs:
attr = stage.iter_var_attrs[itervar]
iv_type = attr.iter_type
# binding
bind_thread = str(attr.bind_thread.var) if attr.bind_thread is not None else None
# tensorization
if attr.tensor_intrin is not None:
tensor_intrin = str(attr.tensor_intrin.body)
# remove the final \n
tensor_intrin = tensor_intrin[0:-1] if tensor_intrin[-1] == "\n" else tensor_intrin
else:
tensor_intrin = None
else:
iv_type = itervar.iter_type
itervar_dict = {
"type": "IterVar",
"index": index,
"name": str(itervar.var),
"itervar_type": iv_type,
"properties": {
"thread": bind_thread,
"intrin": tensor_intrin,
"range": str(ivrange) if ivrange is not None else "range(N/A)",
},
}
return itervar_dict
def encode_itervars(stage, range_map):
"""Extract and encode IterVars visualization data from a stage to a dictionary"""
def get_leaf_itervar_index(itervar, leaf_iv):
for leaf_index, ivar in enumerate(leaf_iv):
if ivar == itervar:
return leaf_index
return -1
itervars = []
for itervar in stage.all_iter_vars:
leaf_index = get_leaf_itervar_index(itervar, stage.leaf_iter_vars)
itervars.append(encode_itervar(itervar, stage, leaf_index, range_map))
return itervars
def encode_itervar_relation(obj_manager, rel):
"""Extract and encode IterVar Relationship visualization data to a dictionary"""
rel_type = type(rel)
if rel_type is tvm.te.schedule.Split:
node_type = "Split_Relation"
rel_dict = {
"type": node_type,
"parent": obj_manager.get_dom_path(rel.parent),
"outer": obj_manager.get_dom_path(rel.outer),
"inner": obj_manager.get_dom_path(rel.inner),
}
elif rel_type is tvm.te.schedule.Fuse:
node_type = "Fuse_Relation"
rel_dict = {
"type": node_type,
"fused": obj_manager.get_dom_path(rel.fused),
"outer": obj_manager.get_dom_path(rel.outer),
"inner": obj_manager.get_dom_path(rel.inner),
}
elif rel_type is tvm.te.schedule.Singleton:
node_type = "Singleton_Relation"
rel_dict = {
"type": node_type,
"iter": obj_manager.get_dom_path(rel.iter),
}
else:
return None
return rel_dict
def encode_itervar_relations(obj_manager, stage):
relations = []
for i in range(len(stage.relations)):
rel = encode_itervar_relation(obj_manager, stage.relations[i])
if rel is not None:
relations.append(rel)
return relations
def encode_tensor(obj_manager, tensor, stage):
"""Extract and encode tensor visualization data to a dictionary"""
tensor_dict = {
"type": "Tensor",
"source": obj_manager.get_dom_path(stage),
"value_index": tensor.value_index,
"shape": str(tensor.op.output(tensor.value_index).shape),
"data_type": tensor.op.output(tensor.value_index).dtype,
}
return tensor_dict
def encode_tensors(obj_manager, stage):
tensors = []
for i in range(stage.op.num_outputs):
tensor = stage.op.output(i)
tensors.append(encode_tensor(obj_manager, tensor, stage))
tensors.sort(key=lambda tensor: tensor["value_index"])
return tensors
def encode_stage(obj_manager, stage, range_map):
"""Extract and encode stage visualization data to a dictionary"""
stage_dict = {
"type": "Stage",
"name": stage.op.name,
"attaching_to": obj_manager.get_dom_path(stage.attach_ivar),
"compute": str(stage.op.body) if hasattr(stage.op, "body") else None,
"properties": {
"scope": stage.scope,
},
"all_itervars": encode_itervars(stage, range_map),
"relations": encode_itervar_relations(obj_manager, stage),
"input_tensors": [
obj_manager.get_dom_path(frozenset({tensor.op.name, tensor.value_index}))
for tensor in stage.op.input_tensors
],
"output_tensors": encode_tensors(obj_manager, stage),
}
return stage_dict
def encode_schedule(sch, need_range):
"""Extract and encode data from a schedule for visualization to a nested dictionary.
It is useful for JSON to serialize schedule.
Parameters
----------
sch : schedule
The schedule object to extract
Returns
-------
dict : dictionary
A nested dictionary
"""
assert isinstance(
sch, tvm.te.schedule.Schedule
), "Input is not a tvm.te.schedule.Schedule object."
range_map = None
if need_range:
try:
range_map = tvm.te.schedule.InferBound(sch)
except tvm._ffi.base.TVMError as expt:
warnings.warn(
"Ranges are not available, because InferBound fails with the following error:\n"
+ str(expt)
)
obj_manager = ObjectManager(sch)
stages = []
for stage in sch.stages:
stages.append(encode_stage(obj_manager, stage, range_map))
return {
"type": "Schedule",
"stages": stages,
}
return json.dumps(sch, default=lambda s: encode_schedule(s, need_range))
def viz_schedule_tree(sch, show_svg=False, dot_file_path="", output_dot_string=False):
"""Top level API to render schedule tree
Parameters
----------
sch : schedule
The schedule object to visualize
show_svg : bool
Display graph as SVG, useful for Jupyter notebooks.
dot_file_path : string
Dot file to save the graph.
output_dot_string : bool
Return dot file content or an empty string.
Returns
-------
dot_string : string
Dot file content or an empty string according to output_dot_string
Examples
--------
The following code writes a schedule tree to a dot file.
.. code-block:: python
tedd.viz_schedule_tree(s, dot_file_path = '/tmp/example.dot')
Use the following code to render a SVG graph in a Jupyter notebook.
.. code-block:: python
tedd.viz_schedule_tree(s, show_svg = True)
"""
def create_schedule_tree_graph(name=""):
return create_graph(name=name, rankdir="BT")
def root_dot(g):
g.node("ROOT", "ROOT", shape="oval", margin="0")
def stage_node_dot(g, stage):
node_label = stage_node_label(stage)
g.node(stage["id"], node_label, shape="none", margin="0")
def stage_node_label(stage):
"""Return a html format label for the given stage."""
label = (
'<<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" '
'CELLPADDING="4"> <TR><TD BGCOLOR="lightgrey" '
'COLSPAN="2" PORT="stage">' + stage_label(stage) + "</TD></TR>"
)
for leafiv in leaf_itervars(stage):
iv_type = leafiv["itervar_type"]
var_attr_label = ""
if "thread" in leafiv["properties"] and leafiv["properties"]["thread"] is not None:
var_attr_label = (
var_attr_label
+ '<br/><font color="#2980B9">('
+ str(leafiv["properties"]["thread"])
+ ")</font>"
)
if "intrin" in leafiv["properties"] and leafiv["properties"]["intrin"] is not None:
var_attr_label = (
var_attr_label
+ "<br/>"
+ linebrk(
"(tensor_intrin:" + str(leafiv["properties"]["intrin"]) + ")",
TVMDD_TABLE_BODY_WIDTH,
)
)
var_label, color = get_itervar_label_color(leafiv, iv_type)
label += itervar_label(leafiv, leafiv["index"], color, var_label + var_attr_label)
if stage["compute"] is not None:
label += (
'<TR><TD COLSPAN="2">'
+ linebrk(str(stage["compute"]), TVMDD_TABLE_BODY_WIDTH)
+ "</TD></TR>"
)
label += "</TABLE>>"
return label
def compute_at_dot(g, stage):
"""If the given stage attaches to another stage, create an edge from it
stage to its attach point; otherwise, create an edge to the ROOT.
"""
src = stage["id"]
dst = (
dom_path_to_string([stage["attaching_to"][0]], "Stage")
+ ":"
+ dom_path_to_string(stage["attaching_to"], "IterVar")
if stage["attaching_to"] is not None
else "ROOT"
)
color = (
PALETTE[stage["attaching_to"][1] + 1]
if stage["attaching_to"] is not None and stage["attaching_to"][1] < PALETTE_SIZE - 1
else PALETTE[0]
)
g.edge(src, dst, color=color)
graph = create_schedule_tree_graph("Schedule Tree")
s = extract_dom_for_viz(sch)
legend_dot(graph)
for stage in s["stages"]:
stage_node_dot(graph, stage)
for stage in s["stages"]:
compute_at_dot(graph, stage)
root_dot(graph)
return dump_graph(graph.source, show_svg, dot_file_path, output_dot_string)
def viz_itervar_relationship_graph(sch, show_svg=False, dot_file_path="", output_dot_string=False):
"""Top level API to render IterVar relationship graph
Parameters
----------
sch : schedule
The schedule object to visualize
show_svg : bool
Display graph as SVG, useful for Jupyter notebooks.
dot_file_path : string
Dot file to save the graph.
output_dot_string : bool
Return dot file content or an empty string.
Examples
--------
The following code writes Ian tervar relationship graph to a dot file.
.. code-block:: python
tedd.viz_def viz_itervar_relationship_graph(sch,
(s, dot_file_path = '/tmp/example.dot')
Use the following code to render a SVG graph in a Jupyter notebook.
.. code-block:: python
tedd.viz_def viz_itervar_relationship_graph(sch,
(s, show_svg = True)
"""
def create_itervar_relation_graph(name=""):
return create_graph(name=name, rankdir="TB")
def itervar_node_dot(g, itervar, iv_type, index):
label = itervar_node_label(itervar, iv_type, index)
g.node(itervar["id"], label, shape="none", margin="0")
def itervar_node_label(itervar, iv_type, index):
label = (
'<<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" '
'CELLPADDING="4">'
+ itervar_label(
itervar,
index,
get_itervar_label_color(itervar, iv_type)[1],
get_itervar_label_color(itervar, iv_type)[0],
)
+ "</TABLE>>"
)
return label
def itervar_relation_node_dot(g, node_id, node_label, input_ports, output_ports):
label = itervar_relation_node_label(node_label, input_ports, output_ports)
g.node(node_id, label, shape="none", margin="0")
def itervar_relation_node_label(node_label, input_ports, output_ports):
"""Return a html format label for an itervar relationship node
including node_label and input/output ports.
"""
label = '<<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" ' 'CELLPADDING="4">' + "<TR>"
max_port_num = max(len(input_ports), len(output_ports))
for i in range(max_port_num):
if i < len(input_ports):
input_port = input_ports[i]
label += '<TD BGCOLOR="lightgrey" PORT="' + input_port + '">' + input_port + "</TD>"
else:
label += '<TD BGCOLOR="white"></TD>'
label += "</TR>"
label += (
'<TR><TD BGCOLOR="white" COLSPAN="'
+ str(max_port_num)
+ '" PORT="relation">'
+ node_label
+ "</TD></TR>"
)
label += "<TR>"
for i in range(max_port_num):
if i < len(output_ports):
output_port = output_ports[i]
label += (
'<TD BGCOLOR="lightgrey" PORT="' + output_port + '">' + output_port + "</TD>"
)
else:
label += '<TD BGCOLOR="white"></TD>'
label += "</TR>"
label += "</TABLE>>"
return label
def itervar_relation_dot(g, node, node_id):
"""Create an itervar relationship node."""
node_type = node["type"]
if node_type == "Split_Relation":
node_type = "Split"
itervar_relation_node_dot(g, node_id, node_type, ["Input"], ["Outer", "Inner"])
parent = dom_path_to_string(node["parent"], "IterVar")
outer = dom_path_to_string(node["outer"], "IterVar")
inner = dom_path_to_string(node["inner"], "IterVar")
g.edge(parent + ":itervar", node_id + ":Input")
g.edge(node_id + ":Outer", outer + ":itervar")
g.edge(node_id + ":Inner", inner + ":itervar")
elif node_type == "Fuse_Relation":
node_type = "Fuse"
itervar_relation_node_dot(g, node_id, node_type, ["Outer", "Inner"], ["Fused"])
fused = dom_path_to_string(node["fused"], "IterVar")
outer = dom_path_to_string(node["outer"], "IterVar")
inner = dom_path_to_string(node["inner"], "IterVar")
g.edge(outer + ":itervar", node_id + ":Outer")
g.edge(inner + ":itervar", node_id + ":Inner")
g.edge(node_id + ":Fused", fused + ":itervar")
elif node_type == "Singleton_Relation":
node_type = "Singleton"
itervar_relation_node_dot(g, node_id, node_type, [], ["Iter"])
itervar = dom_path_to_string(node["inner"], "IterVar")
g.edge(node_id + ":Iter", itervar + ":itervar")
else:
assert False, "Unknown IterVarRelationNode: " + node_type
def stage_node_dot(g, stage):
"""Create a stage node."""
with g.subgraph(name="cluster_" + stage["id"]) as subgraph:
subgraph.attr(label=stage["name"])
if stage["all_itervars"]:
for itervar in stage["all_itervars"]:
iv_type = itervar["itervar_type"]
itervar_node_dot(subgraph, itervar, iv_type, itervar["index"])
for rel in stage["relations"]:
node_id = rel["id"]
itervar_relation_dot(subgraph, rel, node_id)
else:
subgraph.node(stage["name"] + "_placeholder", style="invis")
graph = create_itervar_relation_graph("IterVar Relationship Graph")
s = extract_dom_for_viz(sch)
legend_dot(graph)
for stage in s["stages"]:
stage_node_dot(graph, stage)
return dump_graph(graph.source, show_svg, dot_file_path, output_dot_string)
def viz_dataflow_graph(sch, show_svg=False, dot_file_path="", output_dot_string=False):
"""Top level API to render dataflow graph
Parameters
----------
sch : schedule
The schedule object to visualize
show_svg : bool
Display graph as SVG, useful for Jupyter notebooks.
dot_file_path : string
Dot file to save the graph.
output_dot_string : bool
Return dot file content or an empty string.
Examples
--------
The following code writes a dataflow graph to a dot file.
.. code-block:: python
tedd.viz_dataflow_graph(s, dot_file_path = '/tmp/example.dot')
Use the following code to render a SVG graph in a Jupyter notebook.
.. code-block:: python
tedd.viz_dataflow_graph(s, show_svg = True)"""
def create_dataflow_graph(name=""):
return create_graph(name=name, rankdir="LR")
def tensor_node_dot(g, tensor):
"""Create a tensor node."""
label = tensor_node_label(tensor)
g.node(tensor["id"], label, shape="oval", margin="0")
def tensor_node_label(tensor):
"""Return a html format label for the given tensor."""
label = str(tensor["shape"]) + "\n" + str(tensor["data_type"])
return label
def stage_node_dot(g, stage):
"""Create a stage node."""
label = stage_node_label(stage)
g.node(stage["id"], label, shape="none", margin="0")
def stage_node_label(stage):
"""Return a html format label for the given stage."""
rows = max(1, max(len(stage["output_tensors"]), len(stage["input_tensors"])))
label = '<<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" ' 'CELLPADDING="4">'
for i in range(rows):
label += "<TR>"
if i < len(stage["input_tensors"]):
port_id = get_port_id(True, i)
label += (
'<TD BGCOLOR="lightgrey" COLSPAN="2" PORT="' + port_id + '">' + str(i) + "</TD>"
)
else:
label += '<TD BGCOLOR="white" COLSPAN="2"></TD>'
if i == 0:
label += (
'<TD BGCOLOR="white" COLSPAN="2" ROWSPAN="'
+ str(rows)
+ '">'
+ stage_label(stage)
+ "</TD>"
)
if i < len(stage["output_tensors"]):
port_id = get_port_id(False, i)
label += (
'<TD BGCOLOR="lightgrey" COLSPAN="2" PORT="' + port_id + '">' + str(i) + "</TD>"
)
else:
label += '<TD BGCOLOR="white" COLSPAN="2"></TD>'
label += "</TR>"
label += "</TABLE>>"
return label
def dfg_dot(g, sch):
"""Create edges among stages."""
stages = sch["stages"]
for stage in stages:
for i in range(len(stage["input_tensors"])):
src = dom_path_to_string(stage["input_tensors"][i], "Tensor")
dst = stage["id"] + ":" + get_port_id(True, i)
g.edge(src, dst)
for i in range(len(stage["output_tensors"])):
src = stage["id"] + ":" + get_port_id(False, i)
dst = stage["output_tensors"][i]["id"]
g.edge(src, dst)
graph = create_dataflow_graph("Dataflow Graph")
s = extract_dom_for_viz(sch, need_range=False)
for stage in s["stages"]:
stage_node_dot(graph, stage)
for tensor in stage["output_tensors"]:
tensor_node_dot(graph, tensor)
dfg_dot(graph, s)
return dump_graph(graph.source, show_svg, dot_file_path, output_dot_string)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/tf_op/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Module container of TensorFlow TVMDSO op"""
from . import module
OpModule = module.OpModule
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/tf_op/module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Module container of TensorFlow TVMDSO op"""
import tensorflow as tf
from tensorflow.python.framework import load_library
from tensorflow.python import platform
class OpModule:
"""Module container of TensorFlow TVMDSO op which wraps exported
TVM op implementation library to be called on TensorFlow side"""
def __init__(self, lib_path):
self.lib_path = lib_path
def func(self, name, output_dtype=None, output_shape=None):
"""Get tvm op function wrapped as TensorFlow tensor to tensor function
Parameters
----------
name: str
function name
output_dtype: str or TensorFlow datatype
Output datatype, default is float32
output_shape: List of integer/tf scalar tensor or tf shape tensor
Output shape, default the same with first input's shape
Returns
----------
Func object that acts as TensorFlow tensor to tensor function.
"""
return TensorFunc(self.lib_path, name, output_dtype, output_shape)
def __getitem__(self, func_name):
return self.func(func_name)
class TensorFunc:
"""Function object that acts as TensorFlow tensor to tensor function."""
def __init__(self, lib_path, func_name, output_dtype, output_shape):
self.lib_path = lib_path
self.func_name = func_name
self.output_dtype = output_dtype
# const(0) indicate invalid dynamic shape
self.dynamic_output_shape = tf.constant(0, tf.int64)
self.static_output_shape = None
self.has_static_output_shape = False # extra flag is required
if self._is_static_shape(output_shape):
self.static_output_shape = output_shape
self.has_static_output_shape = True
elif output_shape is not None:
self.dynamic_output_shape = self._pack_shape_tensor(output_shape)
self.module = self._load_platform_specific_library("libtvm_dso_op")
self.tvm_dso_op = self.module.tvm_dso_op
def apply(self, *params):
return self.tvm_dso_op(
params,
dynamic_output_shape=self.dynamic_output_shape,
static_output_shape=self.static_output_shape,
has_static_output_shape=self.has_static_output_shape,
lib_path=self.lib_path,
func_name=self.func_name,
output_dtype=self.output_dtype,
)
def __call__(self, *params):
return self.apply(*params)
def _load_platform_specific_library(self, lib_name):
system = platform.system()
if system == "Darwin":
lib_file_name = lib_name + ".dylib"
elif system == "Windows":
lib_file_name = lib_name + ".dll"
else:
lib_file_name = lib_name + ".so"
return load_library.load_op_library(lib_file_name)
def _is_static_shape(self, shape):
if shape is None or not isinstance(shape, list):
return False
for dim_value in shape:
if not isinstance(dim_value, int):
return False
if dim_value < 0:
raise Exception("Negative dimension is illegal: %d" % dim_value)
return True
def _pack_shape_tensor(self, shape):
if isinstance(shape, tf.Tensor):
if shape.dtype == tf.int32:
shape = tf.cast(shape, tf.int64)
elif isinstance(shape, list):
shape_dims = []
for dim_value in shape:
if isinstance(dim_value, int):
shape_dims.append(tf.constant(dim_value, tf.int64))
elif isinstance(dim_value, tf.Tensor) and dim_value.shape.rank == 0:
if dim_value.dtype == tf.int32:
dim_value = tf.cast(dim_value, tf.int64)
shape_dims.append(dim_value)
else:
raise TypeError("Input shape dimension is neither scalar tensor nor int")
shape = tf.stack(shape_dims)
else:
raise TypeError("Input shape is neither tensor nor list")
return shape
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/tflite_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TFLite runtime that load and run tflite models."""
import tvm._ffi
from ..rpc import base as rpc_base
def create(tflite_model_bytes, device, runtime_target="cpu"):
"""Create a runtime executor module given a tflite model and device.
Parameters
----------
tflite_model_byte : bytes
The tflite model to be deployed in bytes string format.
device : Device
The device to deploy the module. It can be local or remote when there
is only one Device.
runtime_target: str
Execution target of TFLite runtime: either `cpu` or `edge_tpu`.
Returns
-------
tflite_runtime : TFLiteModule
Runtime tflite module that can be used to execute the tflite model.
"""
device_type = device.device_type
if runtime_target == "edge_tpu":
runtime_func = "tvm.edgetpu_runtime.create"
else:
runtime_func = "tvm.tflite_runtime.create"
if device_type >= rpc_base.RPC_SESS_MASK:
fcreate = device._rpc_sess.get_function(runtime_func)
else:
fcreate = tvm._ffi.get_global_func(runtime_func)
return TFLiteModule(fcreate(bytearray(tflite_model_bytes), device))
class TFLiteModule(object):
"""Wrapper runtime module.
This is a thin wrapper of the underlying TVM module.
you can also directly call set_input, run, and get_output
of underlying module functions
Parameters
----------
module : Module
The internal tvm module that holds the actual tflite functions.
Attributes
----------
module : Module
The internal tvm module that holds the actual tflite functions.
"""
def __init__(self, module):
self.module = module
self._set_input = module["set_input"]
self._invoke = module["invoke"]
self._get_output = module["get_output"]
self._set_num_threads = module["set_num_threads"]
def set_input(self, index, value):
"""Set inputs to the module via kwargs
Parameters
----------
key : int or str
The input key
value : the input value.
The input key
params : dict of str to NDArray
Additonal arguments
"""
self._set_input(index, value)
def invoke(self):
"""Invoke forward execution of the model
Parameters
----------
input_dict: dict of str to NDArray
List of input values to be feed to
"""
self._invoke()
def get_output(self, index):
"""Get index-th output to out
Parameters
----------
index : int
The output index
"""
return self._get_output(index)
def set_num_threads(self, num_threads):
"""Set the number of threads via kwargs
Parameters
----------
num_threads : int
The number of threads
"""
self._set_num_threads(num_threads)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/thrust.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities for thrust"""
import logging
from tvm._ffi import get_global_func
def maybe_warn(target, func_name):
if get_global_func(func_name, allow_missing=True) and not "thrust" in target.libs:
logging.warning("TVM is built with thrust but thrust is not used.")
if "thrust" in target.libs and get_global_func(func_name, allow_missing=True) is None:
logging.warning("thrust is requested but TVM is not built with thrust.")
def can_use_thrust(target, func_name):
maybe_warn(target, func_name)
return (
target.kind.name in ["cuda", "nvptx"]
and "thrust" in target.libs
and get_global_func(func_name, allow_missing=True)
)
def can_use_rocthrust(target, func_name):
maybe_warn(target, func_name)
return (
target.kind.name == "rocm"
and "thrust" in target.libs
and get_global_func(func_name, allow_missing=True)
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/torch/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wrong-import-position,redefined-builtin,invalid-name
"""Module container of Pytorch custom class"""
import os
import platform
import warnings
import torch
from tvm._ffi import libinfo
def _load_platform_specific_library(lib_name):
system = platform.system()
if system == "Darwin":
lib_file_name = lib_name + ".dylib"
elif system == "Windows":
lib_file_name = lib_name + ".dll"
else:
lib_file_name = lib_name + ".so"
lib_path = libinfo.find_lib_path()[0]
lib_dir = os.path.dirname(lib_path)
lib_file_path = os.path.join(lib_dir, lib_file_name)
try:
torch.classes.load_library(lib_file_path)
except OSError as err:
errmsg = str(err)
if errmsg.find("undefined symbol") != -1:
reason = " ".join(
(
"Got undefined symbol error,",
"which might be due to the CXXABI incompatibility.",
)
)
else:
reason = errmsg
warnings.warn(
f"The library {lib_name} is not built successfully. {reason}",
RuntimeWarning,
)
_load_platform_specific_library("libpt_tvmdsoop")
_load_platform_specific_library("libpt_tvmdsoop_new")
from . import module
GraphModule = module.GraphModule
VMModule = module.VMModule
TraceTvmModule = module.TraceTvmModule
from . import pytorch_tvm
PyTorchTVMModule = pytorch_tvm.PyTorchTVMModule
compile = pytorch_tvm.compile
from . import as_torch
TVMScriptIRModule = as_torch.OperatorModuleWrapper
as_torch = as_torch.as_torch
from . import optimize_torch
GraphExecutorFactoryWrapper = optimize_torch.GraphExecutorFactoryWrapper
optimize_torch = optimize_torch.optimize_torch
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/torch/as_torch.py | # pylint: disable=inconsistent-return-statements
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
"""
as_torch: a decorator, which is used to wrap the TVMScript code to `torch.nn.module`.
"""
import tempfile
from typing import Callable, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
import torch
import torch.utils.dlpack
import tvm
from tvm import meta_schedule as ms
from tvm.target.target import Target
from tvm.tir import PrimFunc
# python wrapper for OperatorModule
class OperatorModuleWrapper(torch.nn.Module):
def __init__(
self,
module: Union[
tvm.ir.module.IRModule,
tvm.tir.function.PrimFunc,
],
):
super().__init__()
self.rt_module = None # runtime module
self.ir_module = module # IR modules
def tune(
self,
target: Union[str, Target] = "cpu",
max_trials_global: int = 32,
*,
num_trials_per_iter: int = 32,
builder: ms.Builder.BuilderType = "local",
runner: ms.Runner.RunnerType = "local",
database: ms.Database.DatabaseType = "json",
cost_model: ms.CostModel.CostModelType = "xgb",
measure_callbacks: ms.MeasureCallback.CallbackListType = "default",
task_scheduler: ms.TaskScheduler.TaskSchedulerType = "round-robin",
space: ms.SpaceGenerator.SpaceGeneratorType = "post-order-apply",
strategy: ms.SearchStrategy.SearchStrategyType = "replay-trace",
task_name: str = "main",
num_threads: Union[Literal["physical", "logical"], int] = "physical",
seed: Optional[int] = None,
) -> None:
"""
Tune the TVMScript code.
Parameters
----------
config: Optional[TuneConfig]
The tuning configuration.
target : Optional[str, Target]
The target to tune for.
"""
if target == "cpu":
target = f"llvm --num-cores {ms.utils.cpu_count(logical=False)}"
with tempfile.TemporaryDirectory() as work_dir:
database = ms.tir_integration.tune_tir(
mod=self.ir_module,
target=target,
work_dir=work_dir,
max_trials_global=max_trials_global,
num_trials_per_iter=num_trials_per_iter,
builder=builder,
runner=runner,
database=database,
cost_model=cost_model,
measure_callbacks=measure_callbacks,
task_scheduler=task_scheduler,
space=space,
strategy=strategy,
task_name=task_name,
num_threads=num_threads,
seed=seed,
)
sch = ms.tir_integration.compile_tir(database, self.ir_module, target)
self.ir_module = sch.mod
self.build(target)
def script(self):
return self.ir_module.script()
def build(self, target=None):
runtime_module = tvm.build(self.ir_module, target=target)
func = tvm.get_global_func("tvmtorch.save_runtime_mod", allow_missing=True)
if func is None:
raise ValueError('as_torch requires the flag /"USE_PT_TVMDSOOP/" set in config.cmake')
func(runtime_module)
self.rt_module = torch.classes.tvm_torch.OperatorModuleWrapper()
def forward(self, *torch_inputs: List[torch.Tensor]) -> List[torch.Tensor]:
if self.rt_module is None:
if torch_inputs[0].is_cuda:
self.build(target="cuda")
elif torch_inputs[0].device.type == "cpu":
self.build()
else:
raise Exception(f"the target {torch_inputs[0].device.type} is not supported yet")
return self.rt_module.forward(torch_inputs)
def as_torch(func: Union[tvm.ir.module.IRModule, tvm.tir.function.PrimFunc, Callable]):
"""A decorator of converting TensorIR to PyTorch nn.Module.
Parameters
----------
func: Optional[tvm.ir.module.IRModule, tvm.tir.function.PrimFunc, Callable]
The function written by TVMScript.
Returns
-------
mod : Union[OperatorModuleWrapper, Callable]
It will return an object, or a templated function of OperatorModuleWrapper,
which is the subclass of the original nn.Module.
"""
if isinstance(func, (tvm.ir.module.IRModule, PrimFunc)):
return OperatorModuleWrapper(func)
if callable(func):
def func_get_param(*args, **kwargs):
return OperatorModuleWrapper(func(*args, **kwargs))
return func_get_param
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/torch/module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Module container of PyTorch custom class"""
import warnings
from typing import List
import torch
class GraphModule(torch.nn.Module):
r"""Module container of Pytorch class which wraps exported
TVM op implementation library to be called on Pytorch side"""
@classmethod
def shape_repr(cls, input_shapes):
return torch.ops.tvm_dsoop.tvm_shape_repr(input_shapes)
def __init__(self, num_inputs, num_outputs, device=None):
warnings.warn(
"This module will be removed at TVM version 0.11",
DeprecationWarning,
stacklevel=2,
)
super().__init__()
self.dummy_param = torch.nn.Parameter(torch.empty(0))
self.engine = None
if device is not None:
self.to(device)
self.engine = torch.classes.tvm_dsoop.TvmGraphModule(num_inputs, num_outputs, self.device)
def init(self, input_shapes, lib_path, graph_path, params_path):
r"""Load tvm module"""
self.engine.load_tvm_module(input_shapes, lib_path, graph_path, params_path)
def forward(self, inputs: List[torch.Tensor]):
r"""Call tvm module to forward"""
return self.engine.forward(inputs)
@property
def device(self):
r"""Get the device string"""
return str(self.dummy_param.device)
def _apply(self, fn):
r"""Override to device function, manually move tvm module to desired device"""
super()._apply(fn)
if self.engine is not None:
self.engine.to(self.device)
return self
class VMModule(torch.nn.Module):
r"""Module container of Pytorch class which wraps exported
TVM op implementation library to be called on Pytorch side"""
@classmethod
def shape_repr(cls, input_shapes):
return torch.ops.tvm_dsoop.tvm_shape_repr(input_shapes)
def __init__(self, num_inputs, num_outputs, device=None):
warnings.warn(
"This module will be removed at TVM version 0.11",
DeprecationWarning,
stacklevel=2,
)
super().__init__()
self.dummy_param = torch.nn.Parameter(torch.empty(0))
self.engine = None
if device is not None:
self.to(device)
self.engine = torch.classes.tvm_dsoop.TvmVMModule(num_inputs, num_outputs, self.device)
def init(self, input_shapes, lib_path, code_path):
r"""Load tvm module"""
self.engine.load_tvm_module(input_shapes, lib_path, code_path)
def forward(self, inputs: List[torch.Tensor]):
r"""Call tvm module to forward"""
return self.engine.forward(inputs)
@property
def device(self):
r"""Get the device string"""
return str(self.dummy_param.device)
def _apply(self, fn):
r"""Override to device function, manually move tvm module to desired device"""
super()._apply(fn)
if self.engine is not None:
self.engine.to(self.device)
return self
class TraceTvmModule(torch.nn.Module):
r"""Wrapper for trace GraphModule
GraphModule and VMModule only supports List[Tensor] inputs and cannot be traced.
This is a wrapper class for trace GraphModule or VMModule in order to support
arbitrary number of inputs
Example:
import tvm.contrib.torch
tvm_module = tvm.contrib.torch.GraphModule(1, 1, 'cuda:0')
tvm_module.init(input_shapes, lib_path, graph_path, params_path)
trace_wrapper = tvm.contrib.torch.TraceGraphModule(torch.jit.script(tvm_module))
traced = torch.jit.trace(trace_wrapper, example_inputs)
"""
def __init__(self, tvm_module):
warnings.warn(
"This module will be removed at TVM version 0.11",
DeprecationWarning,
stacklevel=2,
)
super().__init__()
self.tvm_module = tvm_module
def forward(self, *inputs):
outputs = self.tvm_module(inputs)
return outputs[0] if len(outputs) == 1 else tuple(outputs)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/torch/optimize_torch.py | # pylint: disable=inconsistent-return-statements
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
"""
optimize_torch: a function similar to `torch.jit.trace`,
which is used to optimize the `torch.nn.module` by TVM metaSchedule,
and returns a custom TorchScript operator
"""
import base64
import contextlib
import tempfile
from typing import Optional, Tuple, Union
import torch
import torch.utils.dlpack
import tvm
from tvm import meta_schedule as ms
from tvm import relay
from tvm._ffi import get_global_func, register_func
from tvm.target import Target
class GraphExecutorFactoryWrapper(torch.nn.Module):
def __init__(self, module: tvm.runtime.Module):
super().__init__()
self.inner_module = module
def forward(self, *torch_inputs: Tuple[torch.Tensor]):
ret = self.inner_module.forward(torch_inputs)
if len(ret) == 1:
return ret[0]
return ret
@register_func("script_torch.save_to_base64")
def save_to_base64(obj) -> bytes:
with tempfile.NamedTemporaryFile(suffix=".so") as tmpfile:
obj.export_library(tmpfile.name)
with open(tmpfile.name, "rb") as temp_file:
return base64.b64encode(temp_file.read())
def optimize_torch(
func,
example_inputs,
*,
max_trials_global: int,
work_dir=None,
target: Union[str, Target] = "cpu",
max_trials_per_task: Optional[int] = None,
num_trials_per_iter: int = 64,
builder: ms.Builder.BuilderType = "local",
runner: ms.Runner.RunnerType = "local",
database: ms.Database.DatabaseType = "json",
cost_model: ms.CostModel.CostModelType = "xgb",
measure_callbacks: ms.MeasureCallback.CallbackListType = "default",
task_scheduler: ms.TaskScheduler.TaskSchedulerType = "gradient",
space: ms.SpaceGenerator.SpaceGeneratorType = "post-order-apply",
strategy: ms.SearchStrategy.SearchStrategyType = "evolutionary",
seed: Optional[int] = None,
):
"""Load PyTorch model that could be traced by TorchScript, then optimize it via MetaSchedule.
Parameters
----------
func : callable or torch.nn.Module
A Python function or nn.Module that could run by TorchScript's trace.
(ie: torch.jit.trace(model, input))
example_inputs : tuple or torch.Tensor
Inputs to `torch.jit.trace`.
max_trials_global : int
The maximum number of trials to run globally.
work_dir : Optional[str]
The working directory to save intermediate results.
target : Optional[Union[str, Target]]
The target of the compilation.
If user doesn't set the target, the module will be built for the CPU target.
max_trials_per_task : Optional[int]
The maximum number of trials to run per task.
num_trials_per_iter : int
The number of trials to run per iteration
builder : Builder.BuilderType
The builder.
runner : Runner.RunnerType
The runner.
database : Database.DatabaseType
The database.
cost_model : CostModel.CostModelType
The cost model.
measure_callbacks : MeasureCallback.CallbackListType
The measure callbacks.
task_scheduler : TaskScheduler.TaskSchedulerType
The task scheduler.
space : SpaceGenerator.SpaceGeneratorType
The space generator to use.
strategy : SearchStrategy.SearchStrategyType
The search strategy to use.
seed : Optional[int]
The random seed to use.
Returns
-------
mod : GraphExecutorFactoryWrapper
It will return an object of GraphExecutorFactoryWrapper,
which is the subclass of the original nn.Module.
"""
if target == "cpu":
target = f"llvm --num-cores {ms.utils.cpu_count(logical=False)}"
if not isinstance(target, Target):
target = Target(target)
# If `func` is already a traced module this statement makes no effect
jit_mod = torch.jit.trace(func, example_inputs)
if isinstance(example_inputs, torch.Tensor):
example_inputs = [example_inputs]
shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
mod, params = relay.frontend.from_pytorch(jit_mod, shape_list) # IRmodule
if work_dir:
context_manager = contextlib.nullcontext(work_dir)
else:
context_manager = tempfile.TemporaryDirectory()
with context_manager as work_dir: # pylint: disable=redefined-argument-from-local
database = ms.relay_integration.tune_relay(
mod=mod,
params=params,
target=target,
work_dir=work_dir,
max_trials_global=max_trials_global,
max_trials_per_task=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
builder=builder,
runner=runner,
database=database,
cost_model=cost_model,
measure_callbacks=measure_callbacks,
task_scheduler=task_scheduler,
space=space,
strategy=strategy,
seed=seed,
)
executor_factory = ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=target,
params=params,
backend="graph",
)
save_runtime_mod = get_global_func("tvmtorch.save_runtime_mod", allow_missing=True)
if save_runtime_mod is None:
raise ValueError('optimize_torch requires the flag /"USE_PT_TVMDSOOP/" set in config.cmake')
save_runtime_mod(executor_factory.module)
return GraphExecutorFactoryWrapper(torch.classes.tvm_torch.GraphExecutorFactoryWrapper())
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/torch/pytorch_tvm.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin
"""`compile` api that convert torch module to torch tvm module"""
import os
import warnings
import tvm
import tvm.testing
from tvm import relay, autotvm
from tvm.runtime import load_module
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib import graph_executor
from tvm.contrib.debugger import debug_executor
from . import GraphModule
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
"""Tune tasks and generate tuning log to file"""
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = f"[Task {i + 1:2d}/{len(tasks):2d}] "
# create tuner
if tuner in ("xgb", "sgb-rank"):
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=100)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
if not os.path.exists(log_filename):
with open(log_filename, "w", encoding="utf-8"):
pass
if os.path.exists(tmp_log_file):
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
def get_tuning_opt(log_file="tuning.log", n_trial=200):
"""Returns tuning options"""
tuning_opt = {
"log_filename": log_file,
"tuner": "random",
"n_trial": n_trial,
"early_stopping": 60,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(timeout=10),
runner=autotvm.LocalRunner(number=20, repeat=3, timeout=4, min_repeat_ms=150),
),
}
return tuning_opt
TVM_ASSETS = ["mod.so", "graph.json", "params"]
class PyTorchTVMModule:
"""Helper class for compiling pytorch module to tvm module"""
def __init__(self, target="cuda", device=tvm.cuda(0)) -> None:
self.script_module = None
self.input_infos = None
self.default_dtype = "float32"
self.mod = None
self.params = None
self.tasks = None
self.target = target
self.dev = device
self.log_file = None
self.tvm_module = None
self.tvm_graph = None
self.tvm_lib = None
self.tvm_params = None
def from_pytorch(self, script_module, input_infos, default_dtype="float32"):
self.script_module = script_module
self.input_infos = input_infos
self.default_dtype = default_dtype
self.mod, self.params = relay.frontend.from_pytorch(
script_module, input_infos, default_dtype=default_dtype
)
def tune_tvm(self, log_file="tuning.log", n_trial=200):
self.tasks = autotvm.task.extract_from_program(
self.mod["main"],
target=self.target,
params=self.params,
)
self.log_file = log_file
tuning_opt = get_tuning_opt(log_file, n_trial)
tune_tasks(self.tasks, **tuning_opt)
def build_tvm(self, export_dir, debug_runtime=False):
tvm_mod = self._build_tvm(debug_runtime)
self._export_tvm(export_dir)
return tvm_mod
def _build_tvm(self, debug_runtime=False):
# compile kernels with history best records
with autotvm.apply_history_best(self.log_file):
with tvm.transform.PassContext(opt_level=3):
self.tvm_graph, self.tvm_lib, self.tvm_params = relay.build(
self.mod, target=self.target, params=self.params
)
if not debug_runtime:
self.tvm_module = graph_executor.create(self.tvm_graph, self.tvm_lib, device=self.dev)
else:
self.tvm_module = debug_executor.create(self.tvm_graph, self.tvm_lib, device=self.dev)
self.tvm_module.set_input(**self.tvm_params)
return self.tvm_module
def _export_tvm(self, export_dir):
if not os.path.isdir(export_dir):
os.makedirs(export_dir)
self.export_dir = export_dir
self.tvm_lib.export_library(os.path.join(export_dir, TVM_ASSETS[0]))
with open(os.path.join(export_dir, TVM_ASSETS[1]), "w", encoding="utf8") as fout:
fout.write(self.tvm_graph)
with open(os.path.join(export_dir, TVM_ASSETS[2]), "wb") as fout:
fout.write(relay.save_param_dict(self.tvm_params))
def load_tvm(self, export_dir):
"""Load tvm module from export directory"""
self.export_dir = export_dir
self.tvm_lib = load_module(os.path.join(export_dir, TVM_ASSETS[0]))
with open(os.path.join(export_dir, TVM_ASSETS[1]), "r", encoding="utf8") as f:
self.tvm_graph = f.read()
with open(os.path.join(export_dir, TVM_ASSETS[2]), "rb") as f:
self.tvm_params = relay.load_param_dict(f.read())
self.tvm_module = graph_executor.create(self.tvm_graph, self.tvm_lib, device=self.dev)
self.tvm_module.set_input(**self.tvm_params)
return self.tvm_module
def build_pytorch_module(self, num_inputs, num_outputs, input_infos=None):
"""Build pytorch module containing TVM Graph Module"""
warnings.warn(
" ".join(
(
"This function will be removed at TVM version 0.11,",
"we suggest users to use `optimized_torch` for tuning Torch modules instead.",
)
),
DeprecationWarning,
stacklevel=2,
)
assert self.export_dir, "you must build_tvm or load_tvm before"
input_infos = input_infos or self.input_infos
assert input_infos
assert len(input_infos) == num_inputs
assets = [os.path.join(self.export_dir, i) for i in TVM_ASSETS]
input_shapes = [i[1] for i in input_infos]
def _tvm_dev_to_pt_dev(device):
"""convert tvm device to pytorch device string"""
if tvm.runtime.Device.MASK2STR[device.device_type] == "cpu":
return "cpu"
if tvm.runtime.Device.MASK2STR[device.device_type] == "cuda":
return f"cuda:{device.device_id}"
raise ValueError(f"unsupported device for pt graph module: {device}")
mod = GraphModule(num_inputs=num_inputs, num_outputs=num_outputs).to(
_tvm_dev_to_pt_dev(self.dev)
)
mod.init(input_shapes, *assets)
return mod
def compile(script_module, option):
"""
example:
option = {
"input_infos": [
("x", (1, 3, 244, 244)),
],
"default_dtype": "float16",
"export_dir": "pytorch_compiled",
"num_outputs": 1,
"tuning_n_trials": 20, # set zero to skip tuning
"tuning_log_file": "tuning.log",
"target": "llvm",
"device": tvm.cpu(),
}
script_module = torch.jit.script(model)
pytorch_tvm_module = compile(script_module, option)
pytorch_tvm_module("model_tvm.pt")
"""
warnings.warn(
" ".join(
(
"This function will be removed at TVM version 0.11,",
"we suggest users to use `optimized_torch` for tuning Torch modules instead.",
)
),
DeprecationWarning,
stacklevel=2,
)
input_infos = option["input_infos"]
default_dtype = option.get("default_dtype", "float32")
export_dir = option.get("export_dir", "pytorch_compiled")
tuning_log_file = option.get("tuning_log_file", "tuning.log")
tuning_n_trials = option.get("tuning_n_trials", 20)
num_outputs = option.get("num_outputs", 1)
target = option.get("target", "cuda")
device = option.get("device", tvm.cuda(0))
mod = PyTorchTVMModule(target=target, device=device)
print("Converting...")
mod.log_file = tuning_log_file
mod.from_pytorch(script_module, input_infos, default_dtype)
if tuning_n_trials > 0:
print("Tuning...")
mod.tune_tvm(log_file=tuning_log_file, n_trial=tuning_n_trials)
print("Building...")
mod.build_tvm(export_dir)
pytorch_mod = mod.build_pytorch_module(num_inputs=len(input_infos), num_outputs=num_outputs)
return pytorch_mod
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common system utilities"""
import atexit
import contextlib
import datetime
import os
import pathlib
import tempfile
import threading
import shutil
try:
import fcntl
except ImportError:
fcntl = None
class DirectoryCreatedPastAtExit(Exception):
"""Raised when a TempDirectory is created after the atexit hook runs."""
class TempDirectory(object):
"""Helper object to manage temp directory during testing.
Automatically removes the directory when it went out of scope.
"""
# When True, all TempDirectory are *NOT* deleted and instead live inside a predicable directory
# tree.
_KEEP_FOR_DEBUG = False
# In debug mode, each tempdir is named after the sequence
_NUM_TEMPDIR_CREATED = 0
_NUM_TEMPDIR_CREATED_LOCK = threading.Lock()
@classmethod
def _increment_num_tempdir_created(cls):
with cls._NUM_TEMPDIR_CREATED_LOCK:
to_return = cls._NUM_TEMPDIR_CREATED
cls._NUM_TEMPDIR_CREATED += 1
return to_return
_DEBUG_PARENT_DIR = None
@classmethod
def _get_debug_parent_dir(cls):
if cls._DEBUG_PARENT_DIR is None:
all_parents = f"{tempfile.gettempdir()}/tvm-debug-mode-tempdirs"
if not os.path.isdir(all_parents):
os.makedirs(all_parents)
cls._DEBUG_PARENT_DIR = tempfile.mkdtemp(
prefix=datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S___"), dir=all_parents
)
return cls._DEBUG_PARENT_DIR
TEMPDIRS = set()
@classmethod
def remove_tempdirs(cls):
temp_dirs = getattr(cls, "TEMPDIRS", None)
if temp_dirs is None:
return
for path in temp_dirs:
shutil.rmtree(path, ignore_errors=True)
cls.TEMPDIRS = None
@classmethod
@contextlib.contextmanager
def set_keep_for_debug(cls, set_to=True):
"""Keep temporary directories past program exit for debugging."""
old_keep_for_debug = cls._KEEP_FOR_DEBUG
try:
cls._KEEP_FOR_DEBUG = set_to
yield
finally:
cls._KEEP_FOR_DEBUG = old_keep_for_debug
def __init__(self, custom_path=None, keep_for_debug=None):
if self.TEMPDIRS is None:
raise DirectoryCreatedPastAtExit()
if keep_for_debug is not None:
self._created_with_keep_for_debug = keep_for_debug
else:
self._created_with_keep_for_debug = self._KEEP_FOR_DEBUG
if custom_path:
os.mkdir(custom_path)
self.temp_dir = custom_path
else:
if self._created_with_keep_for_debug:
parent_dir = self._get_debug_parent_dir()
self.temp_dir = f"{parent_dir}/{self._increment_num_tempdir_created():05d}"
os.mkdir(self.temp_dir)
else:
self.temp_dir = tempfile.mkdtemp()
if not self._created_with_keep_for_debug:
self.TEMPDIRS.add(self.temp_dir)
def remove(self):
"""Remove the tmp dir"""
if self.temp_dir:
if not self._created_with_keep_for_debug:
shutil.rmtree(self.temp_dir, ignore_errors=True)
self.TEMPDIRS.remove(self.temp_dir)
self.temp_dir = None
@property
def path(self):
return pathlib.Path(self.temp_dir)
def __truediv__(self, other):
if not isinstance(other, (str, pathlib.Path)):
raise TypeError(
"TempDirectory / operator: must supply str or pathlib.Path; got %r" % (other,)
)
return self.path / other
def __del__(self):
temp_dirs = getattr(self, "TEMPDIRS", None)
if temp_dirs is None:
# Do nothing if the atexit hook has already run.
return
self.remove()
def relpath(self, name):
"""Relative path in temp dir
Parameters
----------
name : str
The name of the file.
Returns
-------
path : str
The concatenated path.
"""
return os.path.join(self.temp_dir, name)
def listdir(self):
"""List contents in the dir.
Returns
-------
names : list
The content of directory
"""
return os.listdir(self.temp_dir)
atexit.register(TempDirectory.remove_tempdirs)
def tempdir(custom_path=None, keep_for_debug=None):
"""Create temp dir which deletes the contents when exit.
Parameters
----------
custom_path : str, optional
Manually specify the exact temp dir path
keep_for_debug : bool
Keep temp directory for debugging purposes
Returns
-------
temp : TempDirectory
The temp directory object
"""
return TempDirectory(custom_path=custom_path, keep_for_debug=keep_for_debug)
class FileLock(object):
"""File lock object
Parameters
----------
path : str
The path to the lock
"""
def __init__(self, path):
self.lock_file = open(path, "w")
if fcntl:
fcntl.lockf(self.lock_file, fcntl.LOCK_EX)
def release(self):
"""Release the lock"""
if self.lock_file:
if fcntl:
fcntl.lockf(self.lock_file, fcntl.LOCK_UN)
self.lock_file.close()
self.lock_file = None
def filelock(path):
"""Create a file lock which locks on path
Parameters
----------
path : str
The path to the lock
Returns
-------
lock : File lock object
"""
return FileLock(path)
def is_source_path(path):
"""Check if path is source code path.
Parameters
----------
path : str
A possible path
Returns
-------
valid : bool
Whether path is a possible source path
"""
if os.path.exists(path):
return True
if path.find("\n") != -1:
return False
spath = path.rsplit(".", 1)
return len(spath) == 2 and spath[1].strip() == spath[1]
def which(exec_name):
"""Try to find full path of exec_name
Parameters
----------
exec_name : str
The executable name
Returns
-------
path : str
The full path of executable if found, otherwise returns None
"""
base_list = ["", "/bin"] + os.environ.get("PATH", "").split(os.pathsep)
for path in base_list:
full_path = os.path.join(path, exec_name)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/xcode.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utility to invoke Xcode compiler toolchain"""
from __future__ import absolute_import as _abs
import os
import sys
import subprocess
import json
from .._ffi.base import py_str
from . import utils
def xcrun(cmd):
"""Run xcrun and return the output.
Parameters
----------
cmd : list of str
The command sequence.
Returns
-------
out : str
The output string.
"""
cmd = ["xcrun"] + cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
return out.strip()
def __get_min_os_version(sdk):
if sdk == "macosx":
return None
if sdk in ("iphoneos", "iphonesimulator"):
return "13.0"
raise RuntimeError("Unsupported sdk: %s" % sdk)
def __get_min_os_version_cmd(sdk, min_os_version):
if min_os_version is None:
min_os_version = __get_min_os_version(sdk)
if min_os_version is not None:
return "-mios-version-min=" + min_os_version
return ""
def create_dylib(output, objects, arch, sdk="macosx", min_os_version=None):
"""Create dynamic library.
Parameters
----------
output : str
The target shared library.
objects : list
List of object files.
options : str
The additional options.
arch : str
Target major architectures
sdk : str
The sdk to be used.
"""
clang = xcrun(["-sdk", sdk, "-find", "clang"])
sdk_path = xcrun(["-sdk", sdk, "--show-sdk-path"])
cmd = [clang]
cmd += ["-dynamiclib"]
cmd += ["-arch", arch]
cmd += ["-isysroot", sdk_path]
cmd += [__get_min_os_version_cmd(sdk, min_os_version)]
cmd += ["-o", output]
if isinstance(objects, str):
cmd += [objects]
else:
cmd += objects
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
# assign so as default output format
create_dylib.output_format = "dylib"
def compile_metal(code, path_target=None, sdk="macosx", min_os_version=None):
"""Compile metal with CLI tool from env.
Parameters
----------
code : str
The cuda code.
path_target : str, optional
Output file.
sdk : str, optional
The target platform SDK.
Return
------
metallib : bytearray
The bytearray of the metallib
"""
temp = utils.tempdir()
temp_code = temp.relpath("my_lib.metal")
temp_ir = temp.relpath("my_lib.air")
temp_target = temp.relpath("my_lib.metallib")
with open(temp_code, "w") as out_file:
out_file.write(code)
file_target = path_target if path_target else temp_target
# See:
# - https://developer.apple.com/documentation/metal/gpu_functions_libraries/building_a_library_with_metal_s_command-line_tools#overview # pylint: disable=line-too-long
#
# xcrun -sdk macosx metal -c MyLibrary.metal -o MyLibrary.air
# xcrun -sdk macosx metallib MyLibrary.air -o MyLibrary.metallib
min_target = __get_min_os_version_cmd(sdk, min_os_version)
if sdk == "macosx":
language_version = "-std=macos-metal2.3"
elif sdk in ("iphoneos", "iphonesimulator"):
language_version = "-std=ios-metal2.3"
else:
raise RuntimeError("Unsupported sdk: %s" % sdk)
cmd1 = ["xcrun", "-sdk", sdk, "metal", language_version, min_target, "-O3"]
cmd1 += ["-c", temp_code, "-o", temp_ir]
cmd2 = ["xcrun", "-sdk", sdk, "metallib"]
cmd2 += [temp_ir, "-o", file_target]
proc = subprocess.Popen(
" ".join(cmd1) + ";" + " ".join(cmd2),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
(out, _) = proc.communicate()
if proc.returncode != 0:
sys.stderr.write("Compilation error:\n")
sys.stderr.write(py_str(out))
sys.stderr.flush()
libbin = None
else:
libbin = bytearray(open(file_target, "rb").read())
return libbin
def compile_coreml(model, model_name="main", out_dir="."):
"""Compile coreml model and return the compiled model path."""
mlmodel_path = os.path.join(out_dir, model_name + ".mlmodel")
mlmodelc_path = os.path.join(out_dir, model_name + ".mlmodelc")
metadata = {"inputs": list(model.input_description), "outputs": list(model.output_description)}
# Use the description field to send info to CoreML runtime
model.short_description = json.dumps(metadata)
model.save(mlmodel_path)
res = xcrun(["coremlcompiler", "compile", mlmodel_path, out_dir])
if not os.path.isdir(mlmodelc_path):
raise RuntimeError("Compile failed: %s" % res)
return mlmodelc_path
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for driver APIs"""
from .build_module import lower, build
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.driver"""
import tvm._ffi
tvm._ffi._init_api("driver", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/build_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""The build utils in python."""
from typing import Union, Optional, List, Mapping
import warnings
import tvm.tir
from tvm import te
from tvm.runtime import Module
from tvm.runtime import ndarray
from tvm.ir import container
from tvm.tir import PrimFunc
from tvm.ir.module import IRModule
from tvm.te import tensor
from tvm.target import Target
from tvm.tir.buffer import Buffer
from tvm.tir.expr import Var
from tvm.driver import _ffi_api as _driver_ffi
from . import _ffi_api as ffi
def get_binds(args, compact=False, binds=None):
"""Internal function to get binds and arg_list given arguments.
Parameters
----------
args : list of Buffer or Tensor or Var
The argument lists to the function.
compact : bool
If the statement has already bound to a compact buffer.
binds : dict of :any:`Tensor` to :any:`Buffer`, optional
Dictionary that maps the Tensor to Buffer which specified the data layout
requirement of the function. By default, a new compact buffer is created
for each tensor in the argument.
Returns
-------
binds: dict
The bind specification
arg_list: list
The list of symbolic buffers of arguments.
"""
binds, arg_list = ffi.get_binds(args, compact, binds)
return binds, arg_list
def schedule_to_module(
sch: te.Schedule,
args: Optional[List[Union[Buffer, tensor.Tensor, Var]]] = None,
name: str = "main",
binds: Optional[Mapping[tensor.Tensor, Buffer]] = None,
) -> IRModule:
"""According to the given schedule, form a function.
This is a low-level function intended for testing purposes, and
does not apply any optimization passes. In general, `tvm.lower`
and `tvm.build` should be used instead.
Parameters
----------
sch : tvm.te.schedule.Schedule
The given scheduler to form the raw body
args : list of Buffer or Tensor or Var
The argument lists to the function.
name : str
The name of result function, default name is "main"
binds : dict of :any:`Tensor` to :any:`Buffer`, optional
The binds information
Returns
-------
The body formed according to the given schedule
"""
return ffi.schedule_to_module(sch, args, name, binds)
def lower(
inp: Union[te.Schedule, PrimFunc, IRModule],
args: Optional[List[Union[Buffer, tensor.Tensor, Var]]] = None,
name: str = "main",
binds: Optional[Mapping[tensor.Tensor, Buffer]] = None,
simple_mode: bool = False,
) -> IRModule:
"""Lowering step before build into target.
Parameters
----------
inp : Union[tvm.te.schedule.Schedule, tvm.tir.PrimFunc, IRModule]
The TE schedule or TensorIR PrimFunc/IRModule to be built
args : Optional[List[Union[tvm.tir.Buffer, tensor.Tensor, Var]]]
The argument lists to the function for TE schedule.
It should be None if we want to lower TensorIR.
name : str
The name of the result function.
binds : Optional[Mapping[tensor.Tensor, tvm.tir.Buffer]]
Dictionary that maps the Tensor to Buffer which specified the data layout
requirement of the function. By default, a new compact buffer is created
for each tensor in the argument.
simple_mode : bool
Whether only output simple and compact statement, this will skip
LoopPartition, api wrapper generation and Unrolling.
Returns
-------
m : IRModule
The result IRModule
"""
if isinstance(inp, IRModule):
return ffi.lower_module(inp, simple_mode)
if isinstance(inp, PrimFunc):
return ffi.lower_primfunc(inp, name, simple_mode)
if isinstance(inp, te.Schedule):
return ffi.lower_schedule(inp, args, name, binds, simple_mode)
raise ValueError(
f"Expected input to be an IRModule, PrimFunc or te.Schedule, but got {type(inp)}"
)
def build(
inputs: Union[te.Schedule, PrimFunc, IRModule, Mapping[str, IRModule]],
args: Optional[List[Union[Buffer, tensor.Tensor, Var]]] = None,
target: Optional[Union[str, Target]] = None,
target_host: Optional[Union[str, Target]] = None,
runtime: Optional[
"tvm.relay.backend.Runtime"
] = None, # Type is annotated this way to avoid cyclic dependency
name: Optional[str] = "default_function",
binds: Optional[Mapping[tensor.Tensor, Buffer]] = None,
):
"""Build a function with arguments as signature. Code will be generated
for devices coupled with target information.
Parameters
----------
inputs : Union[tvm.te.schedule.Schedule, tvm.tir.PrimFunc, IRModule, Mapping[str, IRModule]]
The input to be built
args : Optional[List[Union[tvm.tir.Buffer, tensor.Tensor, Var]]]
The argument lists to the function.
target : Optional[Union[str, Target]]
The target and option of the compilation.
target_host : Optional[Union[str, Target]]
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm interpreter is used.
runtime : Optional[Runtime]
Runtime to generate artifacts for
name : Optional[str]
The name of result function.
binds : Optional[Mapping[tensor.Tensor, tvm.tir.Buffer]]
Dictionary that maps the binding of symbolic buffer to Tensor.
By default, a new buffer is created for each tensor in the argument.
Returns
-------
ret : tvm.module
A module that combines both host and device code.
Examples
________
There are two typical example uses of this function depending on the type
of the argument `inputs`:
1. it is an IRModule.
.. code-block:: python
n = 2
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name='C')
s = tvm.te.create_schedule(C.op)
m = tvm.lower(s, [A, B, C], name="test_add")
rt_mod = tvm.build(m, target="llvm")
2. it is a dict of compilation target to IRModule.
.. code-block:: python
n = 2
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name='C')
s1 = tvm.te.create_schedule(C.op)
with tvm.target.cuda() as cuda_tgt:
s2 = topi.cuda.schedule_injective(cuda_tgt, [C])
m1 = tvm.lower(s1, [A, B, C], name="test_add1")
m2 = tvm.lower(s2, [A, B, C], name="test_add2")
rt_mod = tvm.build({"llvm": m1, "cuda": m2})
Note
----
See the note on :any:`tvm.target` on target string format.
"""
if isinstance(inputs, te.Schedule):
if args is None:
raise ValueError("args must be given for build from schedule")
input_mod = lower(inputs, args, name=name, binds=binds)
elif isinstance(inputs, (list, tuple, container.Array)):
merged_mod = tvm.IRModule({})
for x in inputs:
merged_mod.update(lower(x))
input_mod = merged_mod
elif isinstance(inputs, PrimFunc):
input_mod = lower(inputs, name=name)
elif isinstance(inputs, tvm.IRModule):
input_mod = lower(inputs)
elif not isinstance(inputs, (dict, container.Map)):
raise ValueError(
f"Inputs must be te.Schedule, IRModule, PrimFunc, "
f"or dict of target to IRModule, "
f"but got {type(inputs)}."
)
if not isinstance(inputs, (dict, container.Map)):
target = Target.current() if target is None else target
target = target if target else "llvm"
target_input_mod = {target: input_mod}
else:
target_input_mod = inputs
# Because modules can be created from a variety of sources, we annotate them
# with the relevant attributes here to ensure they propagate
annotated_mods = {}
for tar, mod in target_input_mod.items():
if not isinstance(tar, (str, Target)):
raise ValueError("The key of inputs must be str or " "Target when inputs is dict.")
if not isinstance(mod, tvm.IRModule):
raise ValueError("inputs must be Schedule, IRModule," "or dict of str to IRModule.")
annotated_mods[tar] = mod.with_attr("runtime", runtime)
# TODO(mbs): Both CompilationConfig and TIRToRuntime implement the same host target
# defaulting logic, but there's currently no way to get back the decided host.
if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
annotated_mods, target_host = Target.canon_target_map_and_host(annotated_mods, target_host)
if not target_host:
for tar, mod in annotated_mods.items():
device_type = ndarray.device(tar.kind.name, 0).device_type
if device_type == ndarray.cpu(0).device_type:
target_host = tar
break
if not target_host:
target_host = "llvm" if tvm.runtime.enabled("llvm") else "stackvm"
annotated_mods, target_host = Target.canon_target_map_and_host(annotated_mods, target_host)
rt_mod_host = _driver_ffi.tir_to_runtime(annotated_mods, target_host)
annotated_mods, target_host = Target.canon_target_map_and_host(annotated_mods, target_host)
if not isinstance(target_host, Target):
target_host = Target(target_host)
if str(runtime) == "crt" and runtime["system-lib"]:
if target_host.kind.name == "c":
create_csource_crt_metadata_module = tvm._ffi.get_global_func(
"runtime.CreateCSourceCrtMetadataModule"
)
to_return = create_csource_crt_metadata_module([rt_mod_host], target_host, runtime)
elif target_host.kind.name == "llvm":
create_llvm_crt_metadata_module = tvm._ffi.get_global_func(
"runtime.CreateLLVMCrtMetadataModule"
)
to_return = create_llvm_crt_metadata_module([rt_mod_host], target_host, runtime)
else:
to_return = rt_mod_host
return OperatorModule.from_module(to_return, ir_module_by_target=annotated_mods, name=name)
class OperatorModule(Module):
"""Wraps the Module returned by tvm.build() and captures additional outputs of that function."""
@classmethod
def from_module(cls, mod, **kwargs):
# NOTE(areusch): It is generally unsafe to continue using `mod` from this point forward.
# If an exception occurs in cls.__init__, handle will be deleted. For this reason,
# set mod.handle to None.
handle = mod.handle
mod.handle = None
return cls(handle, **kwargs)
def __init__(self, handle, ir_module_by_target=None, name=None):
super(OperatorModule, self).__init__(handle)
self.ir_module_by_target = ir_module_by_target
self.name = name
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin,wrong-import-position
"""
TVMC - TVM driver command-line interface
"""
class TVMCException(Exception):
"""TVMC Exception"""
class TVMCImportError(TVMCException):
"""TVMC TVMCImportError"""
from . import micro
from . import runner
from . import autotuner
from . import compiler
from . import result_utils
from .frontends import load_model as load
from .compiler import compile_model as compile
from .runner import run_module as run
from .autotuner import tune_model as tune
from .model import TVMCModel, TVMCPackage, TVMCResult
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/__main__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMC - TVM driver command-line interface
"""
from tvm.driver import tvmc
if __name__ == "__main__":
tvmc.main.main()
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/arguments.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMC Argument Parsing
"""
import argparse
from tvm.driver.tvmc import TVMCException
class TVMCSuppressedArgumentParser(argparse.ArgumentParser):
"""
A silent ArgumentParser class.
This class is meant to be used as a helper for creating dynamic parsers in
TVMC. It will create a "supressed" parser based on an existing one (parent)
which does not include a help message, does not print a usage message (even
when -h or --help is passed) and does not exit on invalid choice parse
errors but rather throws a TVMCException so it can be handled and the
dynamic parser construction is not interrupted prematurely.
"""
def __init__(self, parent, **kwargs):
# Don't add '-h' or '--help' options to the newly created parser. Don't print usage message.
# 'add_help=False' won't supress existing '-h' and '--help' options from the parser (and its
# subparsers) present in 'parent'. However that class is meant to be used with the main
# parser, which is created with `add_help=False` - the help is added only later. Hence it
# the newly created parser won't have help options added in its (main) root parser. The
# subparsers in the main parser will eventually have help activated, which is enough for its
# use in TVMC.
super().__init__(parents=[parent], add_help=False, usage=argparse.SUPPRESS, **kwargs)
def exit(self, status=0, message=None):
# Don't exit on error when parsing the command line.
# This won't catch all the errors generated when parsing tho. For instance, it won't catch
# errors due to missing required arguments. But this will catch "error: invalid choice",
# which is what it's necessary for its use in TVMC.
raise TVMCException()
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/autotuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Provides support to auto-tuning networks using AutoTVM.
"""
import os.path
import logging
import time
from copy import deepcopy
from typing import Any, Optional, Dict, List, Union
from urllib.parse import urlparse
import tvm
from tvm import autotvm, auto_scheduler
from tvm.auto_scheduler.search_task import HardwareParams
from tvm.autotvm.tuner import GATuner
from tvm.autotvm.tuner import GridSearchTuner
from tvm.autotvm.tuner import RandomTuner
from tvm.autotvm.tuner import XGBTuner
from tvm.target import Target
from . import TVMCException, composite_target, frontends
from .main import register_parser
from .model import TVMCModel
from .target import target_from_cli, generate_target_args, reconstruct_target_args
from .shape_parser import parse_shape_string
from .transform import convert_graph_layout
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
@register_parser
def add_tune_parser(subparsers, _, json_params):
"""Include parser for 'tune' subcommand"""
parser = subparsers.add_parser("tune", help="auto-tune a model")
parser.set_defaults(func=drive_tune)
parser.add_argument(
"--early-stopping",
type=int,
help="minimum number of trials before early stopping",
)
# There is some extra processing required to define the actual default value
# for --min-repeat-ms. This is done in `tune_model`.
parser.add_argument(
"--min-repeat-ms",
default=None,
type=int,
help="minimum time to run each trial, in milliseconds. "
"Defaults to 0 on x86 and 1000 on all other targets",
)
parser.add_argument(
"--model-format",
choices=frontends.get_frontend_names(),
help="specify input model format",
)
parser.add_argument(
"--number",
default=10,
type=int,
help="number of runs a single repeat is made of. "
"The final number of tuning executions is: "
"(1 + number * repeat)",
)
parser.add_argument(
"-o",
"--output",
required=True,
help="output file to store the tuning records for the tuning process",
)
parser.add_argument(
"--parallel",
default=4,
type=int,
help="the maximum number of parallel devices to use when tuning",
)
parser.add_argument(
"--repeat",
type=int,
default=1,
help="how many times to repeat each measurement",
)
parser.add_argument(
"--rpc-key",
help="the RPC tracker key of the target device. "
"Required when --rpc-tracker is provided.",
)
parser.add_argument(
"--rpc-tracker",
help="hostname (required) and port (optional, defaults to 9090) of the RPC tracker, "
"e.g. '192.168.0.100:9999'",
)
generate_target_args(parser)
parser.add_argument(
"--target-host",
help="the host compilation target, defaults to 'llvm'",
default="llvm",
)
parser.add_argument("--timeout", type=int, default=10, help="compilation timeout, in seconds")
parser.add_argument(
"--trials",
type=int,
default=1000,
help="the maximum number of tuning trials to perform",
)
parser.add_argument(
"--tuning-records",
metavar="PATH",
help="path to an auto-tuning log file by AutoTVM.",
)
parser.add_argument(
"--desired-layout",
choices=["NCHW", "NHWC"],
default=None,
help="change the data layout of the whole graph",
)
parser.add_argument(
"--enable-autoscheduler",
help="enable tuning the graph through the AutoScheduler tuner",
action="store_true",
)
auto_scheduler_group = parser.add_argument_group(
"AutoScheduler options",
"AutoScheduler options, used when --enable-autoscheduler is provided",
)
auto_scheduler_group.add_argument(
"--cache-line-bytes",
type=int,
help="the size of cache line in bytes. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--num-cores",
type=int,
help="the number of device cores. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--vector-unit-bytes",
type=int,
help="the width of vector units in bytes. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--max-shared-memory-per-block",
type=int,
help="the max shared memory per block in bytes. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--max-local-memory-per-block",
type=int,
help="the max local memory per block in bytes. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--max-threads-per-block",
type=int,
help="the max number of threads per block. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--max-vthread-extent",
type=int,
help="the max vthread extent. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--warp-size",
type=int,
help="the thread numbers of a warp. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--include-simple-tasks",
help="whether to extract simple tasks that do not include complicated ops",
action="store_true",
)
auto_scheduler_group.add_argument(
"--log-estimated-latency",
help="whether to log the estimated latency to the file after tuning a task",
action="store_true",
)
autotvm_group = parser.add_argument_group(
"AutoTVM options",
"AutoTVM options, used when the AutoScheduler is not enabled",
)
autotvm_group.add_argument(
"--tuner",
choices=["ga", "gridsearch", "random", "xgb", "xgb_knob", "xgb-rank"],
default="xgb",
help="type of tuner to use when tuning with autotvm.",
)
# TODO (@leandron) This is a path to a physical file, but
# can be improved in future to add integration with a modelzoo
# or URL, for example.
parser.add_argument("FILE", help="path to the input model file")
parser.add_argument(
"--input-shapes",
help="specify non-generic shapes for model to run, format is "
'"input_name:[dim1,dim2,...,dimn] input_name2:[dim1,dim2]"',
type=parse_shape_string,
)
for one_entry in json_params:
parser.set_defaults(**one_entry)
def drive_tune(args):
"""Invoke auto-tuning with command line arguments
Parameters
----------
args: argparse.Namespace
Arguments from command line parser.
"""
if not os.path.isfile(args.FILE):
raise TVMCException(
f"Input file '{args.FILE}' doesn't exist, is a broken symbolic link, or a directory."
)
tvmc_model = frontends.load_model(args.FILE, args.model_format, shape_dict=args.input_shapes)
# Specify hardware parameters, although they'll only be used if autoscheduling.
hardware_params = auto_scheduler.HardwareParams(
num_cores=args.num_cores,
vector_unit_bytes=args.vector_unit_bytes,
cache_line_bytes=args.cache_line_bytes,
max_shared_memory_per_block=args.max_shared_memory_per_block,
max_local_memory_per_block=args.max_local_memory_per_block,
max_threads_per_block=args.max_threads_per_block,
max_vthread_extent=args.max_vthread_extent,
warp_size=args.warp_size,
target=args.target,
target_host=args.target_host,
)
if args.rpc_tracker:
parsed_url = urlparse("//%s" % args.rpc_tracker)
rpc_hostname = parsed_url.hostname
rpc_port = parsed_url.port or 9090
logger.info("RPC tracker hostname: %s", rpc_hostname)
logger.info("RPC tracker port: %s", rpc_port)
if not args.rpc_key:
raise TVMCException("need to provide an RPC tracker key (--rpc-key) for remote tuning")
else:
rpc_hostname = None
rpc_port = None
tune_model(
tvmc_model,
args.target,
tuning_records=args.output,
prior_records=args.tuning_records,
enable_autoscheduler=args.enable_autoscheduler,
rpc_key=args.rpc_key,
hostname=rpc_hostname,
port=rpc_port,
trials=args.trials,
target_host=args.target_host,
tuner=args.tuner,
min_repeat_ms=args.min_repeat_ms,
early_stopping=args.early_stopping,
desired_layout=args.desired_layout,
timeout=args.timeout,
repeat=args.repeat,
number=args.number,
parallel=args.parallel,
hardware_params=hardware_params,
include_simple_tasks=args.include_simple_tasks,
log_estimated_latency=args.log_estimated_latency,
additional_target_options=reconstruct_target_args(args),
)
def tune_model(
tvmc_model: TVMCModel,
target: str,
tuning_records: Optional[str] = None,
prior_records: Optional[str] = None,
enable_autoscheduler: bool = False,
rpc_key: Optional[str] = None,
hostname: Optional[str] = None,
port: Optional[Union[int, str]] = 9090,
trials: int = 10000,
target_host: Optional[str] = None,
tuner: str = "xgb",
min_repeat_ms: Optional[int] = None,
early_stopping: Optional[int] = None,
desired_layout: Optional[str] = None,
timeout: int = 10,
repeat: int = 1,
number: int = 10,
parallel: int = 4,
hardware_params: Optional[HardwareParams] = None,
include_simple_tasks: bool = False,
log_estimated_latency: bool = False,
additional_target_options: Optional[Dict[str, Dict[str, Any]]] = None,
):
"""Use tuning to automatically optimize the functions in a model.
Parameters
----------
tvmc_model : TVMCModel
The model to be optimized.
target : str
Compilation target as plain string, inline JSON or path to a JSON file.
tuning_records: str, optional
The path to a file that tuning results will be saved to. If not specified,
a temporary file will be used.
prior_records: str, optional
A path to previous tuning results that will be used to hot-start the tuning
cost model if provided.
enable_autoscheduler : bool, optional
When true, use autoscheduling rather than autotvm. This should produce
faster kernels for compatible model-target pairs.
rpc_key : str, optional
The RPC tracker key of the target device. Required when rpc_tracker is provided.
hostname : str, optional
The IP address of an RPC tracker, used when benchmarking remotely.
port : int or str, optional
The port of the RPC tracker to connect to. Defaults to 9090.
trials : int, optional
The number of schedules to try out for the entire model. Note that the default
value is chosen as a decent average for most models, but larger models may need
more trials to reach a good result while smaller models will converge with fewer
trials.
tuner : str, optional
The type of tuner to use when tuning with autotvm. Can be one of
"ga", "gridsearch", "random", "xgb", "xgb_knob", and "xgb-rank".
min_repeat_ms : int, optional
Minimum time to run each trial. Defaults to 0 on x86 and 1000 on other targets.
early_stopping : int, optional
When specified, stop tuning after this number of trials if results aren't improving.
desired_layout : str, optional
Can be one of "NCHW" or "NHWC". When specified, compatible operations in the graph
will have their layout set to this format. Tasks will then be tuned using this
specified layout.
timeout : int, optional,
If a kernel trial lasts longer than this duration in seconds, it will be
considered a failure.
repeat : int, optional
How many times each measurement should be repeated.
number : int, optional
The number of runs a single repeat is made of.
parallel : int, optional
The maximum number of parallel devices to use when tuning.
hardware_params : auto_scheduler.HardwareParams, optional
When using the autoscheduler, this object defines the configuration of the target hardware.
include_simple_tasks : bool, optional
Whether to extract simple operations or only computationally intensive ones when using
the autoscheduler.
log_estimated_latency : bool, optional
If using the autoscheduler, write the estimated latency at each step of tuning to file.
additional_target_options: Optional[Dict[str, Dict[str, Any]]]
Additional target options in a dictionary to combine with initial Target arguments
Returns
-------
tuning_records : str
The path to the produced tuning log file.
"""
target, extra_targets = target_from_cli(target, additional_target_options)
target, target_host = Target.canon_target_and_host(target, target_host)
# TODO(jwfromm) Remove this deepcopy once AlterOpLayout bug that mutates source
# model is fixed. For now, creating a clone avoids the issue.
mod = deepcopy(tvmc_model.mod)
params = tvmc_model.params
with tvm.transform.PassContext(opt_level=3):
if tuning_records is None:
tuning_records = tvmc_model.default_tuning_records_path()
for codegen_from_cli in extra_targets:
codegen = composite_target.get_codegen_by_target(codegen_from_cli["name"])
partition_function = codegen["pass_pipeline"]
mod = partition_function(mod, params, **codegen_from_cli["opts"])
# min_repeat_ms should be:
# a. the value provided by the user, if any, or
# b. 0ms in case target is "cpu"; otherwise 1000ms
if min_repeat_ms is None:
min_repeat_ms = 0 if target.keys[0] == "cpu" else 1000
logger.info("Default --min-repeat-ms for this target is %s", min_repeat_ms)
if rpc_key:
if hostname is None or port is None:
raise TVMCException(
"You must provide a hostname and port to connect to a remote RPC device."
)
if isinstance(port, str):
port = int(port)
logger.info("Tuning will be performed on device %s at %s:%d.", rpc_key, hostname, port)
runner_ctor = auto_scheduler.RPCRunner if enable_autoscheduler else autotvm.RPCRunner
runner = runner_ctor(
key=rpc_key,
host=hostname,
port=port,
number=number,
repeat=repeat,
n_parallel=parallel,
timeout=timeout,
min_repeat_ms=min_repeat_ms,
)
else:
logger.info("Starting localhost tuning.")
runner_ctor = (
auto_scheduler.LocalRPCMeasureContext
if enable_autoscheduler
else autotvm.LocalRunner
)
local_server = runner_ctor(
number=number,
repeat=repeat,
timeout=timeout,
min_repeat_ms=min_repeat_ms,
)
# For autoscheduling on some devices, we need to maintain a
# LocalRPCMeasureContext object.
if enable_autoscheduler:
runner = local_server.runner
else:
runner = local_server
if enable_autoscheduler:
tasks, weights = autoscheduler_get_tuning_tasks(
mod=mod,
params=params,
target=target,
alter_layout=desired_layout,
hardware_params=hardware_params,
include_simple_tasks=include_simple_tasks,
)
# Create the autoscheduler tuning options
tuning_options = auto_scheduler.TuningOptions(
num_measure_trials=trials,
measure_callbacks=[auto_scheduler.RecordToFile(tuning_records)],
runner=runner,
early_stopping=early_stopping,
)
logger.info("Autoscheduling with configuration: %s", tuning_options)
# Schedule the tasks (i.e., produce a schedule for each task)
schedule_tasks(tasks, weights, tuning_options, prior_records, log_estimated_latency)
else:
tasks = autotvm_get_tuning_tasks(
mod=mod,
params=params,
target=target,
alter_layout=desired_layout,
)
# In autotvm, trials is specified per task. We can convert the per-model input
# provided to per-task trials by dividing by the number of tasks.
trials = int(trials / max(len(tasks), 1))
logger.info("Autotuning with %d trials per task.", trials)
tuning_options = {
"tuner": tuner,
"trials": trials,
"early_stopping": early_stopping,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="default"), runner=runner
),
"tuning_records": prior_records,
}
logger.info("Autotuning with configuration: %s", tuning_options)
tune_tasks(tasks, tuning_records, **tuning_options)
return tuning_records
def autotvm_get_tuning_tasks(
mod: tvm.IRModule,
params: Dict[str, tvm.nd.NDArray],
target: str,
target_host: Optional[str] = None,
alter_layout: Optional[str] = None,
):
"""Get the autotvm tuning tasks for a given relay module.
Parameters
----------
mod : tvm.IRModule
The relay module from which to extract tuning tasks.
params : dict
The params for the relay module.
target : tvm.target.Target
The compilation target.
target_host : str, optional
The compilation target for the host.
alter_layout : str, optional
The layout to convert the graph to. Note, the convert layout
pass doesn't currently guarantee the whole of the graph will
be converted to the chosen layout.
Returns
-------
tasks : list of autotvm.Tasks
list of tasks to be tuned
"""
target, target_host = Target.canon_target_and_host(target, target_host)
if alter_layout:
mod = convert_graph_layout(mod, alter_layout)
tasks = autotvm.task.extract_from_program(
mod["main"],
target=target,
params=params,
)
return tasks
def autoscheduler_get_tuning_tasks(
mod: tvm.IRModule,
params: Dict[str, tvm.nd.NDArray],
target: str,
target_host: Optional[str] = None,
alter_layout: Optional[str] = None,
hardware_params: Optional[HardwareParams] = None,
include_simple_tasks: bool = False,
):
"""Get the autoscheduler tuning tasks for a given relay module.
Parameters
----------
mod : tvm.IRModule
The relay module from which to extract tuning tasks.
params : dict
The params for the relay module.
target : tvm.target.Target
The compilation target.
target_host : str, optional
The compilation target for the host.
alter_layout : str, optional
The layout to convert the graph to. Note, the convert layout
pass doesn't currently guarantee the whole of the graph will
be converted to the chosen layout.
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
Returns
-------
tasks : list of autotvm.Tasks
list of tasks to be tuned
weights : List[int]
the weight (i.e. the number of appearance) of extracted tasks
"""
target, target_host = Target.canon_target_and_host(target, target_host)
if alter_layout:
mod = convert_graph_layout(mod, alter_layout)
# Extract the tasks
tasks, task_weights = auto_scheduler.extract_tasks(
mod["main"],
params,
target=target,
hardware_params=hardware_params,
include_simple_tasks=include_simple_tasks,
)
return tasks, task_weights
def schedule_tasks(
tasks: List[auto_scheduler.SearchTask],
task_weights: List[float],
tuning_options: auto_scheduler.TuningOptions,
prior_records: Optional[str] = None,
log_estimated_latency: bool = False,
):
"""Generate the schedules for the different tasks (i.e., subgraphs) contained in the module.
Store the schedules in a json file that will be used later by the compiler.
Parameters
----------
tasks : list
A list of auto_scheduler.SearchTask to tune.
task_weights : list
The weight (i.e. the number of appearance) of extracted tasks
tuning_options: auto_scheduler.TuningOptions
The options of tuning
prior_records : str, optional
The json file used to preload the autoscheduler
log_estimated_latency : bool, optional
If true, writes the estimated runtime of the model during each step of tuning to file.
"""
if not log_estimated_latency:
callbacks = [auto_scheduler.task_scheduler.PrintTableInfo()]
else:
callbacks = [
auto_scheduler.task_scheduler.PrintTableInfo(),
auto_scheduler.task_scheduler.LogEstimatedLatency(("total_latency.tsv")),
]
# Create the scheduler
tuner = auto_scheduler.TaskScheduler(
tasks, task_weights, load_log_file=prior_records, callbacks=callbacks
)
# Tune the tasks
tuner.tune(tuning_options)
def tune_tasks(
tasks: List[autotvm.task.Task],
log_file: str,
measure_option: autotvm.measure_option,
tuner: str,
trials: int,
early_stopping: Optional[int] = None,
tuning_records: Optional[str] = None,
):
"""Tune a list of tasks and output the history to a log file.
Parameters
----------
tasks : list
A list of autotvm.Tasks to tune.
log_file : str
A file to output the tuning history, in JSON.
measure_option : autotvm.measure_option
Options to build and run a tuning task.
tuner : str
Which tuner to use.
trials : int
The maximum number of tuning trials to perform.
early_stopping : int, optional
The minimum number of tuning trials to perform.
This will be equal to 'trials' if not specified.
tuning_records: str, optional
Path to the file produced by the tuning, to be used during
tuning.
"""
if not tasks:
logger.warning("there were no tasks found to be tuned")
return
if not early_stopping:
early_stopping = trials
for i, tsk in enumerate(tasks):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# Create a tuner
if tuner in ("xgb", "xgb-rank"):
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise TVMCException("invalid tuner: %s " % tuner)
# If transfer learning is being used, load the existing results
if tuning_records and os.path.exists(tuning_records):
logger.info("loading tuning records from %s", tuning_records)
start_time = time.time()
tuner_obj.load_history(autotvm.record.load_from_file(tuning_records))
logging.info("loaded history in %.2f sec(s)", time.time() - start_time)
tuner_obj.tune(
n_trial=min(trials, len(tsk.config_space)),
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(trials, prefix=prefix),
autotvm.callback.log_to_file(log_file),
],
)
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/compiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Provides support to compile networks both AOT and JIT.
"""
import logging
import os.path
from typing import Any, Optional, Dict, List, Union, Callable, Sequence
from pathlib import Path
import tvm
from tvm import autotvm, auto_scheduler
from tvm import relay
from tvm.driver.tvmc.registry import generate_registry_args, reconstruct_registry_entity
from tvm.ir.instrument import PassInstrument
from tvm.ir.memory_pools import WorkspaceMemoryPools
from tvm.target import Target
from tvm.relay.backend import Executor, Runtime
from . import composite_target, frontends, TVMCException
from .model import TVMCModel, TVMCPackage
from .main import register_parser
from .target import target_from_cli, generate_target_args, reconstruct_target_args
from .pass_config import parse_configs
from .pass_list import parse_pass_list_str
from .transform import convert_graph_layout
from .shape_parser import parse_shape_string
from .workspace_pools import generate_workspace_pools_args, workspace_pools_recombobulate
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
@register_parser
def add_compile_parser(subparsers, _, json_params):
"""Include parser for 'compile' subcommand"""
parser = subparsers.add_parser("compile", help="compile a model.")
parser.set_defaults(func=drive_compile)
parser.add_argument(
"--cross-compiler",
default="",
help="the cross compiler to generate target libraries, e.g. 'aarch64-linux-gnu-gcc'.",
)
parser.add_argument(
"--cross-compiler-options",
default="",
help="the cross compiler options to generate target libraries, e.g. '-mfpu=neon-vfpv4'.",
)
parser.add_argument(
"--desired-layout",
choices=["NCHW", "NHWC"],
default=None,
help="change the data layout of the whole graph.",
)
parser.add_argument(
"--dump-code",
metavar="FORMAT",
default="",
help="comma separated list of formats to export the input model, e.g. 'asm,ll,relay'.",
)
parser.add_argument(
"--model-format",
choices=frontends.get_frontend_names(),
help="specify input model format.",
)
parser.add_argument(
"-o",
"--output",
default="module.tar",
help="output the compiled module to a specified archive. Defaults to 'module.tar'.",
)
parser.add_argument(
"-f",
"--output-format",
choices=["so", "mlf"],
default="so",
help="output format. Use 'so' for shared object or 'mlf' for Model Library Format "
"(only for microTVM targets). Defaults to 'so'.",
)
parser.add_argument(
"--pass-config",
action="append",
metavar=("name=value"),
help="configurations to be used at compile time. This option can be provided multiple "
"times, each one to set one configuration value, "
"e.g. '--pass-config relay.backend.use_auto_scheduler=0', "
"e.g. '--pass-config tir.add_lower_pass=opt_level1,pass1,opt_level2,pass2'.",
)
generate_target_args(parser)
parser.add_argument(
"--tuning-records",
metavar="PATH",
default="",
help="path to an auto-tuning log file by AutoTVM. If not presented, "
"the fallback/tophub configs will be used.",
)
generate_registry_args(parser, Executor, "graph")
generate_registry_args(parser, Runtime, "cpp")
parser.add_argument("-v", "--verbose", action="count", default=0, help="increase verbosity.")
# TODO (@leandron) This is a path to a physical file, but
# can be improved in future to add integration with a modelzoo
# or URL, for example.
parser.add_argument("FILE", help="path to the input model file.")
parser.add_argument(
"-O",
"--opt-level",
default=3,
type=int,
choices=range(0, 4),
metavar="[0-3]",
help="specify which optimization level to use. Defaults to '3'.",
)
parser.add_argument(
"--input-shapes",
help="specify non-generic shapes for model to run, format is "
'"input_name:[dim1,dim2,...,dimn] input_name2:[dim1,dim2]".',
type=parse_shape_string,
default=None,
)
parser.add_argument(
"--disabled-pass",
help="disable specific passes, comma-separated list of pass names.",
type=parse_pass_list_str,
default="",
)
parser.add_argument(
"--module-name",
default="default",
help="The output module name. Defaults to 'default'.",
)
for one_entry in json_params:
parser.set_defaults(**one_entry)
generate_workspace_pools_args(parser)
def drive_compile(args):
"""Invoke tvmc.compiler module with command line arguments
Parameters
----------
args: argparse.Namespace
Arguments from command line parser.
Returns
-------
int
Zero if successfully completed
"""
if not os.path.isfile(args.FILE):
raise TVMCException(
f"Input file '{args.FILE}' doesn't exist, is a broken symbolic link, or a directory."
)
tvmc_model = frontends.load_model(args.FILE, args.model_format, args.input_shapes)
dump_code = [x.strip() for x in args.dump_code.split(",")] if args.dump_code else None
additional_targets = reconstruct_target_args(args)
workspace_pools_target, extra_targets = target_from_cli(args.target, additional_targets)
compile_model(
tvmc_model,
args.target,
opt_level=args.opt_level,
executor=reconstruct_registry_entity(args, Executor),
runtime=reconstruct_registry_entity(args, Runtime),
tuning_records=args.tuning_records,
package_path=args.output,
cross=args.cross_compiler,
cross_options=args.cross_compiler_options,
output_format=args.output_format,
dump_code=dump_code,
target_host=None,
desired_layout=args.desired_layout,
disabled_pass=args.disabled_pass,
pass_context_configs=args.pass_config,
mod_name=args.module_name,
additional_target_options=additional_targets,
workspace_pools=(
workspace_pools_recombobulate(args, [workspace_pools_target], extra_targets)
),
)
return 0
def compile_model(
tvmc_model: TVMCModel,
target: str,
opt_level: int = 3,
executor: Optional[Executor] = Executor("graph"),
runtime: Optional[Runtime] = Runtime("cpp"),
tuning_records: Optional[str] = None,
package_path: Optional[str] = None,
cross: Optional[Union[str, Callable]] = None,
cross_options: Optional[str] = None,
output_format: str = "so",
dump_code: Optional[List[str]] = None,
target_host: Optional[str] = None,
desired_layout: Optional[str] = None,
disabled_pass: Optional[str] = None,
pass_context_configs: Optional[List[str]] = None,
additional_target_options: Optional[Dict[str, Dict[str, Any]]] = None,
use_vm: bool = False,
mod_name: Optional[str] = "default",
workspace_pools: Optional[WorkspaceMemoryPools] = None,
instruments: Optional[Sequence[PassInstrument]] = None,
):
"""Compile a model from a supported framework into a TVM module.
This function takes a union of the arguments of both frontends.load_model
and compiler.compile_relay. The resulting TVM module can be executed using
the graph executor.
Parameters
----------
tvmc_model : TVMCModel
The model object that should be compiled.
target : str
The target for which to compile. Can be a plain string or
a path.
opt_level : int
The option that controls various sorts of optimizations.
tuning_records : str
A path to tuning records produced using tvmc.tune. When provided,
compilation will use more optimized kernels leading to better results.
package_path : str, optional
The path to export the compiled model to. If not provided it will
be saved in a temporary directory.
cross : str or callable object, optional
Function that performs the actual compilation
cross_options : str, optional
Command line options to be passed to the cross compiler.
output_format : str
What format to use when saving the function library. Must be one of "so" or "tar".
When compiling for a remote device without a cross compiler, "tar" will likely work better.
dump_code : list, optional
Dump the generated code for the specified source types, on
the requested target.
target_host : str, optional
The target of the host machine if host-side code
needs to be generated.
desired_layout: str, optional
The layout to convert the graph to. Note, the convert layout
pass doesn't currently guarantee the whole of the graph will
be converted to the chosen layout.
disabled_pass: str, optional
Comma-separated list of passes which needs to be disabled
during compilation
pass_context_configs: list[str], optional
List of strings containing a set of configurations to be passed to the
PassContext.
additional_target_options: Optional[Dict[str, Dict[str, Any]]]
Additional target options in a dictionary to combine with initial Target arguments
use_vm: bool
Whether to use the VM to compile the model as opposed to the graph executor
mod_name: str, optional
The module name
workspace_pools: WorkspaceMemoryPools, optional
Specification of WorkspacePoolInfo objects to be used as workspace memory in the
compilation.
instruments: Optional[Sequence[PassInstrument]]
The list of pass instrument implementations.
Returns
-------
compiled_model : TVMCPackage
The compiled TVMCModel ready to be run.
"""
mod, params = tvmc_model.mod, tvmc_model.params
config = parse_configs(pass_context_configs)
tvm_target, extra_targets = target_from_cli(target, additional_target_options)
tvm_target, target_host = Target.canon_target_and_host(tvm_target, target_host)
partition_functions = []
partition_opts = []
for codegen_from_cli in extra_targets:
codegen = composite_target.get_codegen_by_target(codegen_from_cli["name"])
partition_functions.append(codegen["pass_pipeline"])
partition_opts.append(codegen_from_cli["opts"])
if codegen["config_key"] is not None:
config[codegen["config_key"]] = codegen_from_cli["opts"]
with tvm.transform.PassContext(
opt_level=opt_level,
config=config,
disabled_pass=disabled_pass,
instruments=instruments,
):
if desired_layout:
mod = convert_graph_layout(mod, desired_layout)
for partition_function, opts in zip(partition_functions, partition_opts):
mod = partition_function(mod, params, mod_name=mod_name, **opts)
if tuning_records and os.path.exists(tuning_records):
logger.debug("tuning records file provided: %s", tuning_records)
use_autoscheduler = True
try:
auto_scheduler.load_records(tuning_records)
except tvm._ffi.base.TVMError:
use_autoscheduler = False
if use_autoscheduler:
with auto_scheduler.ApplyHistoryBest(tuning_records):
config["relay.backend.use_auto_scheduler"] = True
logger.debug("building relay graph with autoscheduler")
graph_module = build(
mod,
tvm_target=tvm_target,
executor=executor,
runtime=runtime,
params=params,
use_vm=use_vm,
mod_name=mod_name,
workspace_pools=workspace_pools,
)
else:
with autotvm.apply_history_best(tuning_records):
logger.debug("building relay graph with tuning records")
graph_module = build(
mod,
tvm_target=tvm_target,
executor=executor,
runtime=runtime,
params=params,
use_vm=use_vm,
mod_name=mod_name,
workspace_pools=workspace_pools,
)
else:
logger.debug("building relay graph (no tuning records provided)")
graph_module = build(
mod,
tvm_target=tvm_target,
executor=executor,
runtime=runtime,
params=params,
use_vm=use_vm,
mod_name=mod_name,
workspace_pools=workspace_pools,
)
# Generate output dump files with sources
if dump_code is None:
dump_code = []
if not isinstance(dump_code, list):
dump_code = [dump_code]
dumps = {}
for source_type in dump_code:
if use_vm:
lib = graph_module.lib
else:
lib = graph_module.get_lib()
# TODO lib.get_source call have inconsistent behavior for unsupported
# formats (@leandron).
source = str(mod) if source_type == "relay" else lib.get_source(source_type)
dumps[source_type] = source
# Create a new tvmc model package object from the graph definition.
package_path = tvmc_model.export_package(
graph_module, package_path, cross, cross_options, output_format
)
# Write dumps to file.
if dumps:
save_dumps(package_path, dumps)
return TVMCPackage(package_path)
def build(
mod: tvm.IRModule,
tvm_target: str,
executor: Executor,
runtime: Runtime,
params: Dict[str, tvm.nd.NDArray],
use_vm: bool,
mod_name: str,
workspace_pools: Optional[WorkspaceMemoryPools],
):
"""
Builds the model with the provided executor.
Parameters
----------
mod : tvm.IRModule
The relay module corresponding to this model.
tvm_target : str
The target for which to compile. Can be a plain string or
a path.
executor : Executor
The graph executor to build the model if use_vm is not True
runtime : Runtime
The runtime configuration.
params : dict
A parameter dictionary for the model.
use_vm: bool
Whether to use the VM to compile the model as opposed to the graph executor
mod_name: str
The module name
"""
if use_vm:
logger.debug("building with vm compile")
return relay.vm.compile(mod, target=tvm_target, params=params)
logger.debug("building with relay build")
return relay.build(
mod,
target=tvm_target,
executor=executor,
runtime=runtime,
params=params,
mod_name=mod_name,
workspace_memory_pools=workspace_pools,
)
def save_dumps(module_name: str, dumps: Dict[str, str], dump_root: str = "."):
"""
Serialize dump files to the disk.
Parameters
----------
module_name : str
File name, referring to the module that generated
the dump contents
dumps : dict
The output contents to be saved into the files
dump_root : str, optional
Path in which dump files will be created
"""
for dump_format in dumps:
dump_name = module_name + "." + dump_format
with open(Path(dump_root, dump_name), "w") as f:
f.write(dumps[dump_format])
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/composite_target.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Provides support to composite target on TVMC.
"""
import logging
# Make sure Vitis AI codegen is registered
import tvm.contrib.target.vitis_ai # pylint: disable=unused-import
from tvm.relay.op.contrib.arm_compute_lib import partition_for_arm_compute_lib
from tvm.relay.op.contrib.ethosn import partition_for_ethosn
from tvm.relay.op.contrib.cmsisnn import partition_for_cmsisnn
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
from tvm.relay.op.contrib.bnns import partition_for_bnns
from tvm.relay.op.contrib.vitis_ai import partition_for_vitis_ai
from tvm.driver.tvmc import TVMCException
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
# Global dictionary to map targets
#
# Options
# -------
# config_key : str
# The configuration key to be used in the PassContext (if any).
# pass_pipeline : Callable
# A function to transform a Module before compilation, mainly used
# for partitioning for the target currently.
REGISTERED_CODEGEN = {
"compute-library": {
"config_key": None,
"pass_pipeline": partition_for_arm_compute_lib,
},
"cmsis-nn": {
"config_key": "relay.ext.cmsisnn.options",
"pass_pipeline": partition_for_cmsisnn,
},
"ethos-n": {
"config_key": "relay.ext.ethos-n.options",
"pass_pipeline": partition_for_ethosn,
},
"ethos-u": {
"config_key": "relay.ext.ethos-u.options",
"pass_pipeline": partition_for_ethosu,
},
"bnns": {
"config_key": None,
"pass_pipeline": partition_for_bnns,
},
"vitis-ai": {
"config_key": "relay.ext.vitis_ai.options",
"pass_pipeline": partition_for_vitis_ai,
},
}
def get_codegen_names():
"""Return a list of all registered codegens.
Returns
-------
list of str
all registered targets
"""
return list(REGISTERED_CODEGEN.keys())
def get_codegen_by_target(name):
"""Return a codegen entry by name.
Parameters
----------
name : str
The name of the target for which the codegen info should be retrieved.
Returns
-------
dict
requested target codegen information
"""
try:
return REGISTERED_CODEGEN[name]
except KeyError:
raise TVMCException("Composite target %s is not defined in TVMC." % name)
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/config_options.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
manipulate json config file to work with TVMC
"""
import os
import json
from tvm._ffi import libinfo
from tvm.driver.tvmc import TVMCException
CONFIGS_JSON_DIR = None
class ConfigsJsonNotFoundError(TVMCException):
"""Raised when the JSON configs dirtree cannot be found."""
def get_configs_json_dir() -> str:
"""Find the 'configs' directory, containing the JSON files used to configure tvmc
with persistent argument settings.
Returns
-------
str :
The path to the 'configs' directory
"""
global CONFIGS_JSON_DIR
if CONFIGS_JSON_DIR is None:
# If a non-default location for the build directory is used, e.g. set via TVM_LIBRARY_PATH
# we need to provide the user a way to overwrite CONFIGS_JSON_DIR as well.
if os.environ.get("TVM_CONFIGS_JSON_DIR", None):
user_config_dir = os.environ["TVM_CONFIGS_JSON_DIR"]
if os.path.isdir(user_config_dir):
CONFIGS_JSON_DIR = user_config_dir
return CONFIGS_JSON_DIR
candidate_paths = []
candidate_paths.extend(libinfo.find_lib_path())
# When running from source, the configs directory will be located one directory above the
# native libraries, so covering that case.
candidate_paths.extend(
[os.path.abspath(os.path.join(lib_path, "..")) for lib_path in libinfo.find_lib_path()]
)
for path in candidate_paths:
configs_path = os.path.join(os.path.dirname(path), "configs")
if os.path.isdir(configs_path):
CONFIGS_JSON_DIR = configs_path
break
else:
raise ConfigsJsonNotFoundError()
return CONFIGS_JSON_DIR
def find_json_file(name, path):
"""search for json file given file name a path
Parameters
----------
name: string
the file name need to be searched
path: string
path to search at
Returns
-------
string
the full path to that file
"""
match = ""
for root, _dirs, files in os.walk(path):
if name in files:
match = os.path.join(root, name)
break
return match
def read_and_convert_json_into_dict(config_args):
"""Read json configuration file and return a dictionary with all parameters
Parameters
----------
args: argparse.Namespace
Arguments from command line parser holding the json file path.
Returns
-------
dictionary
dictionary with all the json arguments keys and values
"""
try:
if ".json" not in config_args.config:
config_args.config = config_args.config.strip() + ".json"
if os.path.isfile(config_args.config):
json_config_file = config_args.config
else:
config_dir = get_configs_json_dir()
json_config_file = find_json_file(config_args.config, config_dir)
return json.load(open(json_config_file, "rb"))
except FileNotFoundError:
raise TVMCException(
f"File {config_args.config} does not exist at {config_dir} or is wrong format."
)
def parse_target_from_json(one_target, command_line_list):
"""parse the targets out of the json file struct
Parameters
----------
one_target: dict
dictionary with all target's details
command_line_list: list
list to update with target parameters
"""
target_kind, *sub_type = [
one_target[key] if key == "kind" else (key, one_target[key]) for key in one_target
]
internal_dict = {}
if sub_type:
sub_target_type = sub_type[0][0]
target_value = sub_type[0][1]
internal_dict[f"target_{target_kind}_{sub_target_type}"] = target_value
command_line_list.append(internal_dict)
return target_kind
def convert_config_json_to_cli(json_params):
"""convert all configuration keys & values from dictionary to cli format
Parameters
----------
args: dictionary
dictionary with all configuration keys & values.
Returns
-------
int
list of configuration values in cli format
"""
command_line_list = []
for param_key in json_params:
if param_key == "targets":
target_list = [
parse_target_from_json(one_target, command_line_list)
for one_target in json_params[param_key]
]
internal_dict = {}
internal_dict["target"] = ", ".join(map(str, target_list))
command_line_list.append(internal_dict)
elif param_key in ("executor", "runtime"):
for key, value in json_params[param_key].items():
if key == "kind":
kind = f"{value}_"
new_dict_key = param_key
else:
new_dict_key = f"{param_key}_{kind}{key}"
internal_dict = {}
internal_dict[new_dict_key.replace("-", "_")] = value
command_line_list.append(internal_dict)
elif isinstance(json_params[param_key], dict):
internal_dict = {}
modify_param_key = param_key.replace("-", "_")
internal_dict[modify_param_key] = []
for key, value in json_params[param_key].items():
internal_dict[modify_param_key].append(f"{key}={value}")
command_line_list.append(internal_dict)
else:
internal_dict = {}
internal_dict[param_key.replace("-", "_")] = json_params[param_key]
command_line_list.append(internal_dict)
return command_line_list
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/fmtopt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Utils to format help text for project options.
"""
from textwrap import TextWrapper
# Maximum column length for accommodating option name and its choices.
# Help text is placed after it in a new line.
MAX_OPTNAME_CHOICES_TEXT_COL_LEN = 80
# Maximum column length for accommodating help text.
# 0 turns off formatting for the help text.
MAX_HELP_TEXT_COL_LEN = 0
# Justification of help text placed below option name + choices text.
HELP_TEXT_JUST = 2
def format_option(option_text, help_text, default_text, required=True):
"""Format option name, choices, and default text into a single help text.
Parameters
----------
options_text: str
String containing the option name and option's choices formatted as:
optname={opt0, opt1, ...}
help_text: str
Help text string.
default_text: str
Default text string.
required: bool
Flag that controls if a "(required)" text mark needs to be added to the final help text to
inform if the option is a required one.
Returns
-------
help_text_just: str
Single justified help text formatted as:
optname={opt0, opt1, ... }
HELP_TEXT. "(required)" | "Defaults to 'DEFAULT'."
"""
optname, choices_text = option_text.split("=", 1)
# Prepare optname + choices text chunck.
optname_len = len(optname)
wrapper = TextWrapper(width=MAX_OPTNAME_CHOICES_TEXT_COL_LEN - optname_len)
choices_lines = wrapper.wrap(choices_text)
# Set first choices line which merely appends to optname string.
# No justification is necessary for the first line since first
# line was wrapped based on MAX_OPTNAME_CHOICES_TEXT_COL_LEN - optname_len,
# i.e. considering optname_len, hence only append justified choices_lines[0] line.
choices_just_lines = [optname + "=" + choices_lines[0]]
# Justify the remaining lines based on first optname + '='.
for line in choices_lines[1:]:
line_len = len(line)
line_just = line.rjust(
optname_len + 1 + line_len
) # add 1 to align after '{' in the line above
choices_just_lines.append(line_just)
choices_text_just_chunk = "\n".join(choices_just_lines)
# Prepare help text chunck.
help_text = help_text[0].lower() + help_text[1:]
if MAX_HELP_TEXT_COL_LEN > 0:
wrapper = TextWrapper(width=MAX_HELP_TEXT_COL_LEN)
help_text_lines = wrapper.wrap(help_text)
else:
# Don't format help text.
help_text_lines = [help_text]
help_text_just_lines = []
for line in help_text_lines:
line_len = len(line)
line_just = line.rjust(HELP_TEXT_JUST + line_len)
help_text_just_lines.append(line_just)
help_text_just_chunk = "\n".join(help_text_just_lines)
# An option might be required for one method but optional for another one.
# If the option is required for one method it means there is no default for
# it when used in that method, hence suppress default text in that case.
if default_text and not required:
help_text_just_chunk += " " + default_text
if required:
help_text_just_chunk += " (required)"
help_text_just = choices_text_just_chunk + "\n" + help_text_just_chunk
return help_text_just
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/frontends.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Provides support to parse models from different frameworks into Relay networks.
Frontend classes do lazy-loading of modules on purpose, to reduce time spent on
loading the tool.
"""
import logging
import os
import sys
import re
import importlib
from abc import ABC
from abc import abstractmethod
from typing import Optional, List, Dict
from pathlib import Path
import numpy as np
from tvm import relay
from tvm import parser
from tvm.driver.tvmc import TVMCException, TVMCImportError
from tvm.driver.tvmc.model import TVMCModel
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
class Frontend(ABC):
"""Abstract class for command line driver frontend.
Provide a unified way to import models (as files), and deal
with any required preprocessing to create a TVM module from it."""
@staticmethod
@abstractmethod
def name():
"""Frontend name"""
@staticmethod
@abstractmethod
def suffixes():
"""File suffixes (extensions) used by this frontend"""
@abstractmethod
def load(self, path, shape_dict=None, **kwargs):
"""Load a model from a given path.
Parameters
----------
path: str
Path to a file
shape_dict: dict, optional
Mapping from input names to their shapes.
Returns
-------
mod : tvm.IRModule
The produced relay module.
params : dict
The parameters (weights) for the relay module.
"""
def lazy_import(pkg_name, from_pkg_name=None, hide_stderr=False):
"""Lazy import a frontend package or subpackage"""
try:
return importlib.import_module(pkg_name, package=from_pkg_name)
except ImportError as error:
raise TVMCImportError(pkg_name) from error
finally:
if hide_stderr:
sys.stderr = stderr
class KerasFrontend(Frontend):
"""Keras frontend for TVMC"""
@staticmethod
def name():
return "keras"
@staticmethod
def suffixes():
return ["h5"]
def load(self, path, shape_dict=None, **kwargs):
# pylint: disable=C0103
tf = lazy_import("tensorflow")
keras = lazy_import("keras", from_pkg_name="tensorflow")
# tvm build currently imports keras directly instead of tensorflow.keras
try:
model = keras.models.load_model(path)
except ValueError as err:
raise TVMCException(str(err))
# There are two flavours of keras model, sequential and
# functional, TVM expects a functional model, so convert
# if required:
if self.is_sequential_p(model):
model = self.sequential_to_functional(model)
in_shapes = []
for layer in model._input_layers:
if tf.executing_eagerly():
in_shapes.append(tuple(dim if dim is not None else 1 for dim in layer.input.shape))
else:
in_shapes.append(
tuple(dim.value if dim.value is not None else 1 for dim in layer.input.shape)
)
inputs = [np.random.uniform(size=shape, low=-1.0, high=1.0) for shape in in_shapes]
input_shapes = {name: x.shape for (name, x) in zip(model.input_names, inputs)}
if shape_dict is not None:
input_shapes.update(shape_dict)
kwargs.setdefault("layout", "NHWC")
return relay.frontend.from_keras(model, input_shapes, **kwargs)
def is_sequential_p(self, model):
keras = lazy_import("keras", from_pkg_name="tensorflow")
return isinstance(model, keras.models.Sequential)
def sequential_to_functional(self, model):
keras = lazy_import("keras", from_pkg_name="tensorflow")
assert self.is_sequential_p(model)
input_layer = keras.layers.Input(batch_shape=model.layers[0].input_shape)
prev_layer = input_layer
for layer in model.layers:
prev_layer = layer(prev_layer)
model = keras.models.Model([input_layer], [prev_layer])
return model
class OnnxFrontend(Frontend):
"""ONNX frontend for TVMC"""
@staticmethod
def name():
return "onnx"
@staticmethod
def suffixes():
return ["onnx"]
def load(self, path, shape_dict=None, **kwargs):
onnx = lazy_import("onnx")
# pylint: disable=E1101
model = onnx.load(path)
return relay.frontend.from_onnx(model, shape=shape_dict, **kwargs)
class TensorflowFrontend(Frontend):
"""TensorFlow frontend for TVMC"""
@staticmethod
def name():
return "pb"
@staticmethod
def suffixes():
return ["pb"]
def load(self, path, shape_dict=None, **kwargs):
tf = lazy_import("tensorflow")
tf_testing = lazy_import("tvm.relay.testing.tf")
with tf.io.gfile.GFile(path, "rb") as tf_graph:
content = tf_graph.read()
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(content)
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
logger.debug("parse TensorFlow model and convert into Relay computation graph")
return relay.frontend.from_tensorflow(graph_def, shape=shape_dict, **kwargs)
class TFLiteFrontend(Frontend):
"""TFLite frontend for TVMC"""
@staticmethod
def name():
return "tflite"
@staticmethod
def suffixes():
return ["tflite"]
def load(self, path, shape_dict=None, **kwargs):
model = lazy_import("tflite.Model")
with open(path, "rb") as tf_graph:
content = tf_graph.read()
# tflite.Model.Model is tflite.Model in 1.14 and 2.1.0
try:
tflite_model = model.Model.GetRootAsModel(content, 0)
except AttributeError:
tflite_model = model.GetRootAsModel(content, 0)
try:
version = tflite_model.Version()
logger.debug("tflite version %s", version)
except Exception:
raise TVMCException("input file not tflite")
if version != 3:
raise TVMCException("input file not tflite version 3")
logger.debug("parse TFLite model and convert into Relay computation graph")
mod, params = relay.frontend.from_tflite(tflite_model, shape_dict=shape_dict, **kwargs)
return mod, params
class PyTorchFrontend(Frontend):
"""PyTorch frontend for TVMC"""
@staticmethod
def name():
return "pytorch"
@staticmethod
def suffixes():
# Torch Script is a zip file, but can be named pth
return ["pth", "zip"]
def load(self, path, shape_dict=None, **kwargs):
torch = lazy_import("torch")
if shape_dict is None:
raise TVMCException("--input-shapes must be specified for %s" % self.name())
traced_model = torch.jit.load(path)
traced_model.eval() # Switch to inference mode
# Convert shape dictionary to list for Pytorch frontend compatibility
input_shapes = list(shape_dict.items())
logger.debug("parse Torch model and convert into Relay computation graph")
return relay.frontend.from_pytorch(
traced_model, input_shapes, keep_quantized_weight=True, **kwargs
)
class PaddleFrontend(Frontend):
"""PaddlePaddle frontend for TVMC"""
@staticmethod
def name():
return "paddle"
@staticmethod
def suffixes():
return ["pdmodel"]
def load(self, path, shape_dict=None, **kwargs):
# pylint: disable=C0415
import paddle
paddle.enable_static()
paddle.disable_signal_handler()
if not os.path.exists(path):
raise TVMCException("File {} is not exist.".format(path))
if not path.endswith(".pdmodel"):
raise TVMCException("Path of model file should be endwith suffixes '.pdmodel'.")
prefix = "".join(path.strip().split(".")[:-1])
params_file_path = prefix + ".pdiparams"
if not os.path.exists(params_file_path):
raise TVMCException("File {} is not exist.".format(params_file_path))
# pylint: disable=E1101
exe = paddle.static.Executor(paddle.CPUPlace())
prog, _, _ = paddle.static.load_inference_model(prefix, exe)
return relay.frontend.from_paddle(prog, shape_dict=shape_dict, **kwargs)
class RelayFrontend(Frontend):
"""Relay frontend for TVMC"""
@staticmethod
def name():
return "relay"
@staticmethod
def suffixes():
return ["relay"]
def load(self, path, shape_dict=None, **kwargs):
with open(path, "r", encoding="utf-8") as relay_text:
text = relay_text.read()
if shape_dict is None:
logger.warning(
"Specify --input-shapes to ensure that model inputs "
"will not be considered as constants."
)
def _validate_text(text):
"""Check the provided file contents.
The relay.txt artifact contained in the MLF is missing the version header and
the metadata which is required to use meta[relay.Constant]."""
if re.compile(r".*\#\[version\.*").match(text) is None:
raise TVMCException(
"The relay model does not include the required version information."
)
if re.compile(r".*meta\[.+\].*", re.DOTALL).match(text):
if "#[metadata]" not in text:
raise TVMCException(
"The relay model does not include the required #[metadata] section. "
"Use ir_mod.astext(show_meta_data=True) to export compatible code."
)
_validate_text(text)
ir_mod = parser.fromtext(text)
if shape_dict:
input_names = shape_dict.keys()
else:
input_names = []
def _gen_params(ir_mod, skip_names=None):
"""Populate the all the params in the mode with ones."""
main_func = ir_mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
params = {}
for name, shape in shape_dict.items():
if skip_names and name in skip_names:
continue
if "int" in type_dict[name]:
data = np.random.randint(128, size=shape, dtype=type_dict[name])
else:
data = np.random.uniform(-1, 1, size=shape).astype(type_dict[name])
params[name] = data
return params
params = _gen_params(ir_mod, skip_names=input_names)
return ir_mod, params
ALL_FRONTENDS = [
KerasFrontend,
OnnxFrontend,
TensorflowFrontend,
TFLiteFrontend,
PyTorchFrontend,
PaddleFrontend,
RelayFrontend,
]
def get_frontend_names():
"""Return the names of all supported frontends
Returns
-------
list : list of str
A list of frontend names as strings
"""
return [frontend.name() for frontend in ALL_FRONTENDS]
def get_frontend_by_name(name: str):
"""
This function will try to get a frontend instance, based
on the name provided.
Parameters
----------
name : str
the name of a given frontend
Returns
-------
frontend : tvm.driver.tvmc.Frontend
An instance of the frontend that matches with
the file extension provided in `path`.
"""
for frontend in ALL_FRONTENDS:
if name == frontend.name():
return frontend()
raise TVMCException(
"unrecognized frontend '{0}'. Choose from: {1}".format(name, get_frontend_names())
)
def guess_frontend(path: str):
"""
This function will try to imply which framework is being used,
based on the extension of the file provided in the path parameter.
Parameters
----------
path : str
The path to the model file.
Returns
-------
frontend : tvm.driver.tvmc.Frontend
An instance of the frontend that matches with
the file extension provided in `path`.
"""
suffix = Path(path).suffix.lower()
if suffix.startswith("."):
suffix = suffix[1:]
for frontend in ALL_FRONTENDS:
if suffix in frontend.suffixes():
return frontend()
raise TVMCException("failed to infer the model format. Please specify --model-format")
def load_model(
path: str,
model_format: Optional[str] = None,
shape_dict: Optional[Dict[str, List[int]]] = None,
**kwargs,
):
"""Load a model from a supported framework and convert it
into an equivalent relay representation.
Parameters
----------
path : str
The path to the model file.
model_format : str, optional
The underlying framework used to create the model.
If not specified, this will be inferred from the file type.
shape_dict : dict, optional
Mapping from input names to their shapes.
Returns
-------
tvmc_model : TVMCModel
The produced model package.
"""
if model_format is not None:
frontend = get_frontend_by_name(model_format)
else:
frontend = guess_frontend(path)
mod, params = frontend.load(path, shape_dict, **kwargs)
return TVMCModel(mod, params)
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/main.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMC - TVM driver command-line interface
"""
import argparse
import logging
import sys
import tvm
from tvm.driver.tvmc import TVMCException, TVMCImportError
from tvm.driver.tvmc.config_options import (
read_and_convert_json_into_dict,
convert_config_json_to_cli,
)
REGISTERED_PARSER = []
def register_parser(make_subparser):
"""
Utility function to register a subparser for tvmc.
Functions decorated with `tvm.driver.tvmc.main.register_parser` will be invoked
with a parameter containing the subparser instance they need to add itself to,
as a parser.
Example
-------
@register_parser
def _example_parser(main_subparser):
subparser = main_subparser.add_parser('example', help='...')
...
"""
REGISTERED_PARSER.append(make_subparser)
return make_subparser
def _main(argv):
"""TVM command line interface."""
parser = argparse.ArgumentParser(
prog="tvmc",
formatter_class=argparse.RawDescriptionHelpFormatter,
description="TVM compiler driver",
epilog=__doc__,
# Help action will be added later, after all subparsers are created,
# so it doesn't interfere with the creation of the dynamic subparsers.
add_help=False,
)
parser.add_argument("--config", default="default", help="configuration json file")
config_arg, argv = parser.parse_known_args(argv)
json_param_dict = read_and_convert_json_into_dict(config_arg)
json_config_values = convert_config_json_to_cli(json_param_dict)
parser.add_argument("-v", "--verbose", action="count", default=0, help="increase verbosity")
parser.add_argument("--version", action="store_true", help="print the version and exit")
subparser = parser.add_subparsers(title="commands")
for make_subparser in REGISTERED_PARSER:
make_subparser(subparser, parser, json_config_values)
# Finally, add help for the main parser.
parser.add_argument("-h", "--help", action="help", help="show this help message and exit.")
args = parser.parse_args(argv)
if args.verbose > 4:
args.verbose = 4
logging.getLogger("TVMC").setLevel(40 - args.verbose * 10)
if args.version:
sys.stdout.write("%s\n" % tvm.__version__)
return 0
if not hasattr(args, "func"):
# In case no valid subcommand is provided, show usage and exit
parser.print_help(sys.stderr)
return 1
try:
return args.func(args)
except TVMCImportError as err:
sys.stderr.write(
f'Package "{err}" is not installed. ' f'Hint: "pip install tlcpack[tvmc]".'
)
return 5
except TVMCException as err:
sys.stderr.write("Error: %s\n" % err)
return 4
def main():
sys.exit(_main(sys.argv[1:]))
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/micro.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Provides support for micro targets (microTVM).
"""
import argparse
import os
from pathlib import Path
import shutil
import sys
from . import TVMCException
from .main import register_parser
from .arguments import TVMCSuppressedArgumentParser
from .project import (
get_project_options,
get_and_check_options,
get_project_dir,
)
try:
import tvm.micro.project as project
from tvm.micro import get_microtvm_template_projects
from tvm.micro.build import MicroTVMTemplateProjectNotFoundError
from tvm.micro.project_api.server import ServerError
from tvm.micro.project_api.client import ProjectAPIServerNotFoundError
SUPPORT_MICRO = True
except (ImportError, NameError):
SUPPORT_MICRO = False
@register_parser
def add_micro_parser(subparsers, main_parser, json_params):
"""Includes parser for 'micro' context and associated subcommands:
create-project (create), build, and flash.
"""
if SUPPORT_MICRO is False:
# Don't create 'tvmc micro' parser.
return
# Probe available default platform templates.
templates = {}
for p in ("zephyr", "arduino"):
try:
templates[p] = get_microtvm_template_projects(p)
except MicroTVMTemplateProjectNotFoundError:
pass
micro = subparsers.add_parser("micro", help="select micro context.")
micro.set_defaults(func=drive_micro)
micro_parser = micro.add_subparsers(title="subcommands")
# Selecting a subcommand under 'micro' is mandatory
micro_parser.required = True
micro_parser.dest = "subcommand"
# 'create_project' subcommand
create_project_parser = micro_parser.add_parser(
"create-project",
aliases=["create"],
help="create a project template of a given type or given a template dir.",
)
create_project_parser.set_defaults(subcommand_handler=create_project_handler)
create_project_parser.add_argument(
"project_dir",
help="project dir where the new project based on the template dir will be created.",
)
create_project_parser.add_argument("MLF", help="Model Library Format (MLF) .tar archive.")
create_project_parser.add_argument(
"-f",
"--force",
action="store_true",
help="force project creating even if the specified project directory already exists.",
)
# 'build' subcommand
build_parser = micro_parser.add_parser(
"build",
help="build a project dir, generally creating an image to be flashed, e.g. zephyr.elf.",
)
build_parser.set_defaults(subcommand_handler=build_handler)
build_parser.add_argument("project_dir", help="project dir to build.")
build_parser.add_argument("-f", "--force", action="store_true", help="Force rebuild.")
# 'flash' subcommand
flash_parser = micro_parser.add_parser(
"flash", help="flash the built image on a given micro target."
)
flash_parser.set_defaults(subcommand_handler=flash_handler)
flash_parser.add_argument("project_dir", help="project dir where the built image is.")
# For each platform add arguments detected automatically using Project API info query.
# Create subparsers for the platforms under 'create-project', 'build', and 'flash' subcommands.
help_msg = (
"you must select a platform from the list. You can pass '-h' for a selected "
"platform to list its options."
)
create_project_platforms_parser = create_project_parser.add_subparsers(
title="platforms", help=help_msg, dest="platform"
)
build_platforms_parser = build_parser.add_subparsers(
title="platforms", help=help_msg, dest="platform"
)
flash_platforms_parser = flash_parser.add_subparsers(
title="platforms", help=help_msg, dest="platform"
)
subcmds = {
# API method name Parser associated to method Handler func to call after parsing
"generate_project": [create_project_platforms_parser, create_project_handler],
"build": [build_platforms_parser, build_handler],
"flash": [flash_platforms_parser, flash_handler],
}
# Helper to add a platform parser to a subcmd parser.
def _add_parser(parser, platform):
platform_name = platform[0].upper() + platform[1:] + " platform"
platform_parser = parser.add_parser(
platform, add_help=False, help=f"select {platform_name}."
)
platform_parser.set_defaults(platform=platform)
return platform_parser
parser_by_subcmd = {}
for subcmd, subcmd_parser_handler in subcmds.items():
subcmd_parser = subcmd_parser_handler[0]
subcmd_parser.required = True # Selecting a platform or template is mandatory
parser_by_platform = {}
for platform in templates:
new_parser = _add_parser(subcmd_parser, platform)
parser_by_platform[platform] = new_parser
# Besides adding the parsers for each default platform (like Zephyr and Arduino), add a
# parser for 'template' to deal with adhoc projects/platforms.
new_parser = subcmd_parser.add_parser(
"template", add_help=False, help="select an adhoc template."
)
new_parser.add_argument(
"--template-dir", required=True, help="Project API template directory."
)
new_parser.set_defaults(platform="template")
parser_by_platform["template"] = new_parser
parser_by_subcmd[subcmd] = parser_by_platform
disposable_parser = TVMCSuppressedArgumentParser(main_parser)
try:
known_args, _ = disposable_parser.parse_known_args()
except TVMCException:
return
try:
subcmd = known_args.subcommand
platform = known_args.platform
except AttributeError:
# No subcommand or platform, hence no need to augment the parser for micro targets.
return
# Augment parser with project options.
if platform == "template":
# adhoc template
template_dir = str(Path(known_args.template_dir).resolve())
else:
# default template
template_dir = templates[platform]
try:
template = project.TemplateProject.from_directory(template_dir)
except ProjectAPIServerNotFoundError:
sys.exit(f"Error: Project API server not found in {template_dir}!")
template_info = template.info()
options_by_method = get_project_options(template_info)
# TODO(gromero): refactor to remove this map.
subcmd_to_method = {
"create-project": "generate_project",
"create": "generate_project",
"build": "build",
"flash": "flash",
}
method = subcmd_to_method[subcmd]
parser_by_subcmd_n_platform = parser_by_subcmd[method][platform]
_, handler = subcmds[method]
parser_by_subcmd_n_platform.formatter_class = (
# Set raw help text so help_text format works
argparse.RawTextHelpFormatter
)
parser_by_subcmd_n_platform.set_defaults(
subcommand_handler=handler,
valid_options=options_by_method[method],
template_dir=template_dir,
)
required = any([opt["required"] for opt in options_by_method[method]])
nargs = "+" if required else "*"
help_text_by_option = [opt["help_text"] for opt in options_by_method[method]]
help_text = "\n\n".join(help_text_by_option) + "\n\n"
parser_by_subcmd_n_platform.add_argument(
"--project-option", required=required, metavar="OPTION=VALUE", nargs=nargs, help=help_text
)
parser_by_subcmd_n_platform.add_argument(
"-h",
"--help",
"--list-options",
action="help",
help="show this help message which includes platform-specific options and exit.",
)
for one_entry in json_params:
micro.set_defaults(**one_entry)
def drive_micro(args):
# Call proper handler based on subcommand parsed.
args.subcommand_handler(args)
def create_project_handler(args):
"""Creates a new project dir."""
project_dir = get_project_dir(args.project_dir)
if os.path.exists(project_dir):
if args.force:
shutil.rmtree(project_dir)
else:
raise TVMCException(
"The specified project dir already exists. "
"To force overwriting it use '-f' or '--force'."
)
template_dir = str(Path(args.template_dir).resolve())
if not os.path.exists(template_dir):
raise TVMCException(f"Template directory {template_dir} does not exist!")
mlf_path = str(Path(args.MLF).resolve())
if not os.path.exists(mlf_path):
raise TVMCException(f"MLF file {mlf_path} does not exist!")
options = get_and_check_options(args.project_option, args.valid_options)
try:
project.generate_project_from_mlf(template_dir, project_dir, mlf_path, options)
except ServerError as error:
print("The following error occurred on the Project API server side: \n", error)
sys.exit(1)
def build_handler(args):
"""Builds a firmware image given a project dir."""
project_dir = get_project_dir(args.project_dir)
if not os.path.exists(project_dir):
raise TVMCException(f"{project_dir} doesn't exist.")
if os.path.exists(project_dir + "/build"):
if args.force:
shutil.rmtree(project_dir + "/build")
else:
raise TVMCException(
f"There is already a build in {project_dir}. "
"To force rebuild it use '-f' or '--force'."
)
options = get_and_check_options(args.project_option, args.valid_options)
try:
prj = project.GeneratedProject.from_directory(project_dir, options=options)
prj.build()
except ServerError as error:
print("The following error occurred on the Project API server side: ", error)
sys.exit(1)
def flash_handler(args):
"""Flashes a firmware image to a target device given a project dir."""
project_dir = get_project_dir(args.project_dir)
if not os.path.exists(project_dir + "/build"):
raise TVMCException(f"Could not find a build in {project_dir}")
options = get_and_check_options(args.project_option, args.valid_options)
try:
prj = project.GeneratedProject.from_directory(project_dir, options=options)
prj.flash()
except ServerError as error:
print("The following error occurred on the Project API server side: ", error)
sys.exit(1)
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file contains the definition of a set of classes that wrap the outputs
of TVMC functions to create a simpler and more intuitive API.
There is one class for each required stage of a TVM workflow.
The TVMCModel represents the result of importing a model into TVM, it
contains the precompiled graph definition and parameters that define
what the model does.
Compiling a TVMCModel produces a TVMCPackage, which contains the generated
artifacts that allow the model to be run on the target hardware.
Running a TVMCPackage produces a TVMCResult, which contains the outputs of
the model and the measured runtime.
Examples
--------
The following code shows a full lifecycle for a model using tvmc, first the
model is imported from an exterior framework, in this case onnx, then it
is tuned to find the best schedules on CPU, then compiled into a TVMCPackage,
and finally run.
.. code-block:: python
tvmc_model = tvmc.load("my_model.onnx")
tuning_records = tvmc.tune(tvmc_model, target="llvm")
tvmc_package = tvmc.compile(tvmc_model, target="llvm", tuning_records=tuning_records)
result = tvmc.run(tvmc_package, device="cpu")
print(result)
"""
import os
import tarfile
import json
from typing import Optional, Union, Dict, Callable, TextIO
from pathlib import Path
import numpy as np
import tvm
import tvm.contrib.cc
from tvm import relay
from tvm.contrib import utils
from tvm.driver.tvmc import TVMCException
from tvm.relay.backend.executor_factory import GraphExecutorFactoryModule
from tvm.runtime.module import BenchmarkResult
from tvm.runtime.vm import Executable
try:
from tvm.micro import export_model_library_format
except ImportError:
export_model_library_format = None
class TVMCModel(object):
"""Initialize a TVMC model from a relay model definition or a saved file.
Parameters
----------
mod : tvm.IRModule, optional
The relay module corresponding to this model.
params : dict, optional
A parameter dictionary for the model.
model_path: str, optional
An alternative way to load a TVMCModel, the path to a previously
saved model.
"""
def __init__(
self,
mod: Optional[tvm.IRModule] = None,
params: Optional[Dict[str, tvm.nd.NDArray]] = None,
model_path: Optional[str] = None,
):
if (mod is None or params is None) and (model_path is None):
raise TVMCException(
"Either mod and params must be provided "
"or a path to a previously saved TVMCModel"
)
self._tmp_dir = utils.tempdir()
if model_path is not None:
self.load(model_path)
else:
self.mod = mod
self.params = params if params else {}
def save(self, model_path: str):
"""Save the TVMCModel to disk.
Note that this saves the graph representation,
the parameters, and the tuning records if applicable. It will not save any
compiled artifacts.
Parameters
----------
model_path : str
A full path to save this TVMCModel to including the output file name.
The file will be saved as a tar file so using a ".tar" extension is advised.
"""
temp = self._tmp_dir
# Save relay graph
relay_name = "model.json"
relay_path = temp.relpath(relay_name)
with open(relay_path, "w") as relay_file:
relay_file.write(tvm.ir.save_json(self.mod))
# Save params
params_name = "model.params"
params_path = temp.relpath(params_name)
with open(params_path, "wb") as params_file:
params_file.write(relay.save_param_dict(self.params))
# Create a tar file.
with tarfile.open(model_path, "w") as tar:
tar.add(relay_path, relay_name)
tar.add(params_path, params_name)
# If default tuning records exist, save them as well.
if os.path.exists(self.default_tuning_records_path()):
tar.add(self.default_tuning_records_path(), "tuning_records")
# Also save the compiled package if it can be found.
if os.path.exists(self.default_package_path()):
tar.add(self.default_package_path(), "model_package.tar")
def load(self, model_path: str):
"""Load a TVMCModel from disk.
Parameters
----------
model_path : str
A path to load the TVMCModel from.
"""
temp = self._tmp_dir
t = tarfile.open(model_path)
t.extractall(temp.relpath("."))
# Load relay IR.
relay_path = temp.relpath("model.json")
with open(relay_path, "r") as relay_file:
self.mod = tvm.ir.load_json(relay_file.read())
# Load parameter dictionary.
params_path = temp.relpath("model.params")
with open(params_path, "rb") as params_file:
self.params = relay.load_param_dict(params_file.read())
def default_tuning_records_path(self):
"""Get a full path for storing tuning records in this model's temporary direcotry
Note that when this path is used, the tuning records will be saved and loaded
when calling `save` and `load`.
Returns
-------
records_path: str
A path to the default location for tuning records.
"""
return self._tmp_dir.relpath("tuning_records")
def default_package_path(self):
"""Get a full path for storing a compiled package in this model's temporary direcotry
Note that when this path is used, the package will be saved and loaded
when calling `save` and `load`.
Returns
-------
records_path: str
A path to the default location for tuning records.
"""
return self._tmp_dir.relpath("model_package.tar")
def export_vm_format(
self,
vm_exec: Executable,
package_path: Optional[str] = None,
lib_format: str = "so",
):
"""Save this TVMCModel compiled via vm to file.
Parameters
----------
vm_exec : vm.Executable
The VM Executable containing compiled the compiled artifacts needed to run this model.
package_path : str, None
Where the model should be saved. Note that it will be packaged as a .tar file.
If not provided, the package will be saved to a generically named file in tmp.
lib_format : str
How to export the modules function library. Must be one of "so" or "tar".
Returns
-------
package_path : str
The path that the package was saved to.
"""
lib_name = "lib." + lib_format
temp = self._tmp_dir
if package_path is None:
package_path = self.default_package_path()
path_lib = temp.relpath(lib_name)
vm_exec.mod.export_library(path_lib)
self.lib_path = path_lib
# Package up all the temp files into a tar file.
with tarfile.open(package_path, "w") as tar:
tar.add(path_lib, lib_name)
return package_path
def export_classic_format(
self,
executor_factory: GraphExecutorFactoryModule,
package_path: Optional[str] = None,
cross: Optional[Union[str, Callable]] = None,
cross_options: Optional[str] = None,
lib_format: str = "so",
):
"""Save this TVMCModel to file.
Parameters
----------
executor_factory : GraphExecutorFactoryModule
The factory containing compiled the compiled artifacts needed to run this model.
package_path : str, None
Where the model should be saved. Note that it will be packaged as a .tar file.
If not provided, the package will be saved to a generically named file in tmp.
cross : str or callable object, optional
Function that performs the actual compilation.
cross_options : str, optional
Command line options to be passed to the cross compiler.
lib_format : str
How to export the modules function library. Must be one of "so" or "tar".
Returns
-------
package_path : str
The path that the package was saved to.
"""
lib_name = "mod." + lib_format
graph_name = "mod.json"
param_name = "mod.params"
temp = self._tmp_dir
if package_path is None:
package_path = self.default_package_path()
path_lib = temp.relpath(lib_name)
if not cross:
executor_factory.get_lib().export_library(path_lib)
else:
if not cross_options:
executor_factory.get_lib().export_library(
path_lib, tvm.contrib.cc.cross_compiler(cross)
)
else:
executor_factory.get_lib().export_library(
path_lib, tvm.contrib.cc.cross_compiler(cross, options=cross_options.split(" "))
)
self.lib_path = path_lib
with open(temp.relpath(graph_name), "w") as graph_file:
graph_file.write(executor_factory.get_graph_json())
with open(temp.relpath(param_name), "wb") as params_file:
params_file.write(relay.save_param_dict(executor_factory.get_params()))
# Package up all the temp files into a tar file.
with tarfile.open(package_path, "w") as tar:
tar.add(path_lib, lib_name)
tar.add(temp.relpath(graph_name), graph_name)
tar.add(temp.relpath(param_name), param_name)
return package_path
def export_package(
self,
executor_factory: Union[GraphExecutorFactoryModule, Executable],
package_path: Optional[str] = None,
cross: Optional[Union[str, Callable]] = None,
cross_options: Optional[str] = None,
output_format: str = "so",
):
"""Save this TVMCModel to file.
Parameters
----------
executor_factory : GraphExecutorFactoryModule
The factory containing the compiled artifacts needed to run this model.
package_path : str, None
Where the model should be saved. Note that it will be packaged as a .tar file.
If not provided, the package will be saved to a generically named file in tmp.
cross : str or callable object, optional
Function that performs the actual compilation.
cross_options : str, optional
Command line options to be passed to the cross compiler.
output_format : str
How to save the modules function library. Must be one of "so" and "tar" to save
using the classic format or "mlf" to save using the Model Library Format.
Returns
-------
package_path : str
The path that the package was saved to.
"""
if output_format not in ["so", "tar", "mlf"]:
raise TVMCException("Only 'so', 'tar', and 'mlf' output formats are supported.")
if output_format == "mlf" and cross:
raise TVMCException("Specifying the MLF output and a cross compiler is not supported.")
if isinstance(executor_factory, Executable):
package_path = self.export_vm_format(executor_factory, package_path, output_format)
elif output_format in ["so", "tar"]:
package_path = self.export_classic_format(
executor_factory, package_path, cross, cross_options, output_format
)
elif output_format == "mlf":
if export_model_library_format:
package_path = export_model_library_format(executor_factory, package_path)
else:
raise Exception("micro tvm is not enabled. Set USE_MICRO to ON in config.cmake")
return package_path
def summary(self, file: TextIO = None):
"""Print the IR corressponding to this model.
Arguments
---------
file: Writable, optional
If specified, the summary will be written to this file.
"""
print(self.mod, file=file)
class TVMCPackage(object):
"""Load a saved TVMCPackage from disk.
Parameters
----------
package_path : str
The path to the saved TVMCPackage that will be loaded.
project_dir : Path, str
If given and loading a MLF file, the path to the project directory that contains the file.
use_vm : bool
Whether the graph module was compiled with vm or not.
"""
def __init__(
self,
package_path: str,
project_dir: Optional[Union[Path, str]] = None,
):
self._tmp_dir = utils.tempdir()
self.package_path = package_path
self.import_package(self.package_path)
if project_dir and self.type != "mlf":
raise TVMCException("Setting 'project_dir' is only allowed when importing a MLF.!")
self.project_dir = project_dir
def import_package(self, package_path: str):
"""Load a TVMCPackage from a previously exported TVMCModel.
Parameters
----------
package_path : str
The path to the saved TVMCPackage.
"""
temp = self._tmp_dir
t = tarfile.open(package_path)
t.extractall(temp.relpath("."))
if os.path.exists(temp.relpath("metadata.json")):
# Model Library Format (MLF)
self.lib_name = None
self.lib_path = None
with open(temp.relpath("metadata.json")) as metadata_json:
metadata = json.load(metadata_json)
all_module_names = []
for name in metadata["modules"].keys():
all_module_names.append(name)
assert len(all_module_names) == 1, "Multiple modules in MLF is not supported."
module_name = all_module_names[0]
module_metdata = metadata["modules"][module_name]
has_graph_executor = "graph" in module_metdata["executors"]
graph = (
temp.relpath(f"executor-config/graph/{module_name}.graph")
if has_graph_executor
else None
)
params = temp.relpath(f"parameters/{module_name}.params")
self.type = "mlf"
else:
# Classic format
classic_lib_name_so = "mod.so"
classic_lib_name_tar = "mod.tar"
# VM format
vm_lib_name_so = "lib.so"
vm_lib_name_tar = "lib.tar"
if os.path.exists(temp.relpath(classic_lib_name_so)):
self.lib_name = classic_lib_name_so
self.type = "classic"
elif os.path.exists(temp.relpath(classic_lib_name_tar)):
self.lib_name = classic_lib_name_tar
self.type = "classic"
elif os.path.exists(temp.relpath(vm_lib_name_so)):
self.lib_name = vm_lib_name_so
self.type = "vm"
elif os.path.exists(temp.relpath(vm_lib_name_tar)):
self.lib_name = vm_lib_name_tar
self.type = "vm"
else:
raise TVMCException("Couldn't find exported library in the package.")
self.lib_path = temp.relpath(self.lib_name)
graph, params = None, None
if self.type == "classic":
graph = temp.relpath("mod.json")
params = temp.relpath("mod.params")
if params is not None:
with open(params, "rb") as param_file:
self.params = bytearray(param_file.read())
else:
self.params = None
if graph is not None:
with open(graph) as graph_file:
self.graph = graph_file.read()
else:
self.graph = None
class TVMCResult(object):
"""A class that stores the results of tvmc.run and provides helper utilities."""
def __init__(self, outputs: Dict[str, np.ndarray], times: BenchmarkResult):
"""Create a convenience wrapper around the output of tvmc.run
Parameters
----------
outputs : dict
Outputs dictionary mapping the name of the output to its numpy value.
times : BenchmarkResult
The execution times measured by the time evaluator in seconds to produce outputs.
"""
self.outputs = outputs
self.times = times
def format_times(self):
"""Format the mean, max, min and std of the execution times.
This has the effect of producing a small table that looks like:
.. code-block::
Execution time summary:
mean (ms) median (ms) max (ms) min (ms) std (ms)
0.14310 0.14310 0.16161 0.12933 0.01004
Returns
-------
str
A formatted string containing the statistics.
"""
return str(self.times)
def get_output(self, name: str):
"""A helper function to grab one of the outputs by name.
Parameters
----------
name : str
The name of the output to return
Returns
-------
output : np.ndarray
The output corresponding to name.
"""
return self.outputs[name]
def save(self, output_path: str):
"""Save the numpy outputs to disk as a .npz file.
Parameters
----------
output_path : str
The path to save the numpy results to.
"""
np.savez(output_path, **self.outputs)
def __str__(self):
stat_table = self.format_times()
output_keys = f"Output Names:\n {list(self.outputs.keys())}"
return stat_table + "\n" + output_keys
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/pass_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMC PassContext Interface
"""
import importlib
import tvm
from tvm.driver.tvmc import TVMCException
def load_function(full_name):
"""Dynamic loading a function by the full name.
Parameters
----------
full_name: str
The name of a PackedFunc or a string of the form "path.to.module.func"
that indicates the module that can be imported.
You must be aware of the load order here, it first tries to find it via
TVM global function, if not find, try to import it by "importlib.import_module".
Returns
-------
func: function or PackedFunc
The loaded fucntion.
"""
global_func = tvm.get_global_func(full_name, allow_missing=True)
if global_func is not None:
return global_func
# split full name "path.to.module.func" into two parts ["path.to.module", "func"]
module_name, func_name = full_name.rsplit(".", 1)
# import module and find the function
module = importlib.import_module(module_name)
if hasattr(module, func_name):
return getattr(module, func_name)
raise TVMCException(f"No function '{func_name}' found in module '{module_name}'.")
def get_pass_config_value(name, value, config_type):
"""Get a PassContext configuration value, based on its config data type.
Parameters
----------
name: str
config identifier name.
value: str
value assigned to the config, provided via command line.
config_type: str
data type defined to the config, as string.
Returns
-------
parsed_value: bool, int or str
a representation of the input value, converted to the type
specified by config_type.
"""
parsed_value = None
if config_type == "IntImm":
# "Bool" configurations in the PassContext are recognized as
# IntImm, so deal with this case here
mapping_values = {
"false": False,
"true": True,
}
if value.isdigit():
parsed_value = int(value)
else:
# if not an int, accept only values on the mapping table, case insensitive
parsed_value = mapping_values.get(value.lower(), None)
if parsed_value is None:
raise TVMCException(f"Invalid value '{value}' for configuration '{name}'.")
elif config_type == "runtime.String":
parsed_value = value
elif config_type == "Array":
if name == "tir.add_lower_pass":
pass_list = value.split(",")
if len(pass_list) % 2 != 0:
raise TVMCException(
f"The configuration of '{name}' must be of the form "
"'tir.add_lower_pass=opt_level1,pass1,opt_evel2,pass2'"
)
parsed_value = []
for i in range(0, len(pass_list), 2):
level, pass_func = pass_list[i].strip(), pass_list[i + 1].strip()
try:
level = int(level)
except ValueError:
raise TVMCException(f"Only integer is allow for configuration '{name}'.")
# TODO (@leeexyz) We should parse configurations of each tir Pass.
# For now, we only use the defaults. Currently, There are four config nodes:
# `tir.transform.LoopPartitionConfig`
# `tir.transform.UnrollLoopConfig`
# `tir.transform.HoistIfThenElseConfig`
# `tir.transform.InjectDoubleBufferConfig`
# loading pass func and calling it to get the Pass
pass_func = load_function(pass_func)()
parsed_value.append((level, pass_func))
else:
raise TVMCException(f"Unsupported configuration '{name}' for '{config_type}' type.")
else:
# not raise here cause we alreay checked before calling this function
pass
return parsed_value
def parse_configs(input_configs):
"""Parse configuration values set via command line.
Parameters
----------
input_configs: list of str
list of configurations provided via command line.
Returns
-------
pass_context_configs: dict
a dict containing key-value configs to be used in the PassContext.
"""
if not input_configs:
return {}
all_configs = tvm.ir.transform.PassContext.list_configs()
supported_config_types = ("IntImm", "runtime.String", "Array")
supported_configs = [
name for name in all_configs.keys() if all_configs[name]["type"] in supported_config_types
]
pass_context_configs = {}
for config in input_configs:
if not config:
raise TVMCException(
f"Invalid format for configuration '{config}', use <config>=<value>"
)
# Each config is expected to be provided as "name=value"
try:
name, value = config.split("=")
name = name.strip()
value = value.strip()
except ValueError:
raise TVMCException(
f"Invalid format for configuration '{config}', use <config>=<value>"
)
if name not in all_configs:
raise TVMCException(
f"Configuration '{name}' is not defined in TVM. "
f"These are the existing configurations: {', '.join(all_configs)}"
)
if name not in supported_configs:
raise TVMCException(
f"Configuration '{name}' uses a data type not supported by TVMC. "
f"The following configurations are supported: {', '.join(supported_configs)}"
)
config_type = all_configs[name]["type"]
parsed_value = get_pass_config_value(name, value, config_type)
if config_type == "Array" and name in pass_context_configs:
# merge configs if the configuration exists
pass_context_configs[name].extend(parsed_value)
else:
pass_context_configs[name] = parsed_value
return pass_context_configs
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/pass_list.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language
"""
TVMC Pass List Management
"""
import argparse
import tvm
from tvm._ffi import registry
def parse_pass_list_str(input_string):
"""Parse an input string for existing passes
Parameters
----------
input_string: str
Possibly comma-separated string with the names of passes
Returns
-------
list: a list of existing passes.
"""
_prefix = "relay._transform."
pass_list = input_string.split(",")
missing_list = [
p.strip()
for p in pass_list
if len(p.strip()) > 0 and tvm.get_global_func(_prefix + p.strip(), True) is None
]
if len(missing_list) > 0:
available_list = [
n[len(_prefix) :] for n in registry.list_global_func_names() if n.startswith(_prefix)
]
raise argparse.ArgumentTypeError(
"Following passes are not registered within tvm: {}. Available: {}.".format(
", ".join(missing_list), ", ".join(sorted(available_list))
)
)
return pass_list
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/project.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMC Project Generation Functions
"""
import os
import pathlib
from collections import defaultdict
from typing import Union
from . import TVMCException
from .fmtopt import format_option
def get_project_options(project_info):
"""Get all project options as returned by Project API 'server_info_query'
and return them in a dict indexed by the API method they belong to.
Parameters
----------
project_info: dict of list
a dict of lists as returned by Project API 'server_info_query' among
which there is a list called 'project_options' containing all the
project options available for a given project/platform.
Returns
-------
options_by_method: dict of list
a dict indexed by the API method names (e.g. "generate_project",
"build", "flash", or "open_transport") of lists containing all the
options (plus associated metadata and formatted help text) that belong
to a method.
The metadata associated to the options include the field 'choices' and
'required' which are convenient for parsers.
The formatted help text field 'help_text' is a string that contains the
name of the option, the choices for the option, and the option's default
value.
"""
options = project_info["project_options"]
options_by_method = defaultdict(list)
for opt in options:
# Get list of methods associated with an option based on the
# existance of a 'required' or 'optional' lists. API specification
# guarantees at least one of these lists will exist. If a list does
# not exist it's returned as None by the API.
metadata = ["required", "optional"]
option_methods = [(opt[md], bool(md == "required")) for md in metadata if opt[md]]
for methods, is_opt_required in option_methods:
for method in methods:
name = opt["name"]
# Only for boolean options set 'choices' accordingly to the
# option type. API returns 'choices' associated to them
# as None but 'choices' can be deduced from 'type' in this case.
if opt["type"] == "bool":
opt["choices"] = ["true", "false"]
if opt["choices"]:
choices = "{" + ", ".join(opt["choices"]) + "}"
else:
choices = opt["name"].upper()
option_choices_text = f"{name}={choices}"
help_text = opt["help"][0].lower() + opt["help"][1:]
if opt["default"]:
default_text = f"Defaults to '{opt['default']}'."
else:
default_text = None
formatted_help_text = format_option(
option_choices_text, help_text, default_text, is_opt_required
)
option = {
"name": opt["name"],
"choices": opt["choices"],
"help_text": formatted_help_text,
"required": is_opt_required,
}
options_by_method[method].append(option)
return options_by_method
def get_options(options):
"""Get option and option value from the list options returned by the parser.
Parameters
----------
options: list of str
list of strings of the form "option=value" as returned by the parser.
Returns
-------
opts: dict
dict indexed by option names and associated values.
"""
opts = {}
for option in options:
try:
k, v = option.split("=")
opts[k] = v
except ValueError:
raise TVMCException(f"Invalid option format: {option}. Please use OPTION=VALUE.")
return opts
def check_options(options, valid_options):
"""Check if an option (required or optional) is valid. i.e. in the list of valid options.
Parameters
----------
options: dict
dict indexed by option name of options and options values to be checked.
valid_options: list of dict
list of all valid options and choices for a platform.
Returns
-------
None. Raise TVMCException if check fails, i.e. if an option is not in the list of valid options.
"""
required_options = [opt["name"] for opt in valid_options if opt["required"]]
for required_option in required_options:
if required_option not in options:
raise TVMCException(
f"Option '{required_option}' is required but was not specified. Use --list-options "
"to see all required options."
)
remaining_options = set(options) - set(required_options)
optional_options = [opt["name"] for opt in valid_options if not opt["required"]]
for option in remaining_options:
if option not in optional_options:
raise TVMCException(
f"Option '{option}' is invalid. Use --list-options to see all available options."
)
def check_options_choices(options, valid_options):
"""Check if an option value is among the option's choices, when choices exist.
Parameters
----------
options: dict
dict indexed by option name of options and options values to be checked.
valid_options: list of dict
list of all valid options and choices for a platform.
Returns
-------
None. Raise TVMCException if check fails, i.e. if an option value is not valid.
"""
# Dict of all valid options and associated valid choices.
# Options with no choices are excluded from the dict.
valid_options_choices = {
opt["name"]: opt["choices"] for opt in valid_options if opt["choices"] is not None
}
for option in options:
if option in valid_options_choices:
if options[option] not in valid_options_choices[option]:
raise TVMCException(
f"Choice '{options[option]}' for option '{option}' is invalid. "
"Use --list-options to see all available choices for that option."
)
def get_and_check_options(passed_options, valid_options):
"""Get options and check if they are valid. If choices exist for them, check values against it.
Parameters
----------
passed_options: list of str
list of strings in the "key=value" form as captured by argparse.
valid_option: list
list with all options available for a given API method / project as returned by
get_project_options().
Returns
-------
opts: dict
dict indexed by option names and associated values.
Or None if passed_options is None.
"""
if passed_options is None:
# No options to check
return None
# From a list of k=v strings, make a dict options[k]=v
opts = get_options(passed_options)
# Check if passed options are valid
check_options(opts, valid_options)
# Check (when a list of choices exists) if the passed values are valid
check_options_choices(opts, valid_options)
return opts
def get_project_dir(project_dir: Union[pathlib.Path, str]) -> str:
"""Get project directory path"""
if not os.path.isabs(project_dir):
return os.path.abspath(project_dir)
return project_dir
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/registry.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file contains functions for processing registry based inputs for the TVMC CLI
"""
from tvm.driver.tvmc import TVMCException
# We can't tell the type inside an Array but all current options are strings so
# it can default to that. Bool is used alongside Integer but aren't distinguished
# between as both are represented by IntImm
INTERNAL_TO_NATIVE_TYPE = {"runtime.String": str, "IntImm": int, "Array": str}
INTERNAL_TO_HELP = {"runtime.String": " string", "IntImm": "", "Array": " options"}
def _generate_registry_option_args(parser, registry, name):
target_group = parser.add_argument_group(f"{registry.flag_registry_name} {name}")
for option_name, option_type in registry.list_registered_options(name).items():
if option_type in INTERNAL_TO_NATIVE_TYPE:
target_group.add_argument(
f"--{registry.flag_registry_name}-{name}-{option_name}",
type=INTERNAL_TO_NATIVE_TYPE[option_type],
help=(
f"{registry.flag_registry_name.title()} "
+ "{name} {option_name}{INTERNAL_TO_HELP[option_type]}"
),
)
def generate_registry_args(parser, registry, default=None):
"""Walks through the given registry and generates arguments for each of the available options"""
parser.add_argument(
f"--{registry.flag_registry_name}",
help=f"{registry.flag_registry_name.title()} to compile the model with",
required=False,
default=default,
)
names = registry.list_registered()
for name in names:
_generate_registry_option_args(parser, registry, name)
def _reconstruct_registry_options(args, registry, name):
options = {}
for option, option_type in registry.list_registered_options(name).items():
if option_type in INTERNAL_TO_NATIVE_TYPE:
var_name = f"{registry.flag_registry_name}_{name}_{option.replace('-', '_')}"
option_value = getattr(args, var_name)
if option_value is not None:
options[option] = option_value
return options
def reconstruct_registry_entity(args, registry):
"""Reconstructs an entity from arguments generated from a registry"""
possible_names = registry.list_registered()
name = getattr(args, registry.flag_registry_name)
if name is None:
return None
if name not in possible_names:
raise TVMCException(f'{registry.flag_registry_name.title()} "{name}" is not defined')
reconstructed = {
possible_name: _reconstruct_registry_options(args, registry, possible_name)
for possible_name in possible_names
}
for possible_name in possible_names:
if possible_name != name and reconstructed[possible_name]:
first_option = list(reconstructed[possible_name])[0]
raise TVMCException(
f"Passed --{registry.flag_registry_name}-{possible_name}-{first_option} "
f"but did not specify {possible_name} executor"
)
return registry(name, reconstructed[name])
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/result_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file contains utility functions for processing the outputs
of TVMC models. These utilities are likely to be task specific,
overtime more will be added to support more machine learning tasks.
Examples
--------
The following code shows how one might postprocess
the output of a classification model.
.. code-block:: python
result = tvmc.run(tvmc_package, device="cpu")
top_results = result_utils.get_top_results(max_results=5)
"""
import numpy as np
from .model import TVMCResult
def get_top_results(result: TVMCResult, max_results: int):
"""Return the top n results from the output tensor.
This function is primarily for image classification and will
not necessarily generalize.
Parameters
----------
result : TVMCResult
The output of a TVMCModel
max_results : int
Number of results to return
Returns
-------
top_results : np.array
Results array of shape (2, n).
The first row is the indices and the second is the values.
"""
output = np.copy(result.outputs["output_0"])
sorted_labels = output.argsort()[0][-max_results:][::-1]
output.sort()
sorted_values = output[0][-max_results:][::-1]
top_results = np.array([sorted_labels, sorted_values])
return top_results
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/runner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Provides support to run compiled networks both locally and remotely.
"""
from contextlib import ExitStack
import logging
import pathlib
from typing import Dict, Optional, Union
from tarfile import ReadError
import argparse
import sys
import json
import numpy as np
import tvm
from tvm import rpc
from tvm.runtime import vm
from tvm.autotvm.measure import request_remote
from tvm.contrib import graph_executor as executor
from tvm.contrib.debugger import debug_executor
from tvm.runtime import profiler_vm
from tvm.relay.param_dict import load_param_dict
from . import TVMCException
from .arguments import TVMCSuppressedArgumentParser
from .project import (
get_project_options,
get_and_check_options,
get_project_dir,
)
from .main import register_parser
from .model import TVMCPackage, TVMCResult
from .result_utils import get_top_results
from .tracker import tracker_host_port_from_cli
try:
import tvm.micro.project as project
from tvm.micro.project import TemplateProjectError
from tvm.micro.project_api.client import ProjectAPIServerNotFoundError
SUPPORT_MICRO = True
except (ImportError, AttributeError) as exception:
SUPPORT_MICRO = False
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
@register_parser
def add_run_parser(subparsers, main_parser, json_params):
"""Include parser for 'run' subcommand"""
# Use conflict_handler='resolve' to allow '--list-options' option to be properly overriden when
# augmenting the parser with the micro device options (i.e. when '--device micro').
parser = subparsers.add_parser("run", help="run a compiled module", conflict_handler="resolve")
parser.set_defaults(func=drive_run)
# TODO --device needs to be extended and tested to support other targets,
# like 'webgpu', etc (@leandron)
parser.add_argument(
"--device",
choices=["cpu", "cuda", "cl", "metal", "vulkan", "rocm", "micro"],
default="cpu",
help="target device to run the compiled module. Defaults to 'cpu'",
)
parser.add_argument(
"--fill-mode",
choices=["zeros", "ones", "random"],
default="random",
help="fill all input tensors with values. In case --inputs/-i is provided, "
"they will take precedence over --fill-mode. Any remaining inputs will be "
"filled using the chosen fill mode. Defaults to 'random'",
)
parser.add_argument("-i", "--inputs", help="path to the .npz input file")
parser.add_argument("-o", "--outputs", help="path to the .npz output file")
parser.add_argument(
"--print-time",
action="store_true",
help="record and print the execution time(s). Enabling print-time will result "
" in (1 + repeat * number) executions of the model. (non-micro devices only)",
)
parser.add_argument(
"--print-top",
metavar="N",
type=int,
help="print the top n values and indices of the output tensor",
)
parser.add_argument(
"--profile",
action="store_true",
help="generate profiling data from the runtime execution. "
"Using --profile requires the Graph Executor Debug enabled on TVM. "
"Profiling may also have an impact on inference time, "
"making it take longer to be generated. (non-micro devices only)",
)
parser.add_argument(
"--end-to-end",
action="store_true",
help="Measure data transfers as well as model execution. This can provide a "
"more realistic performance measurement in many cases. Requires "
"'--print-time' to be specified.",
)
parser.add_argument(
"--repeat",
metavar="N",
type=int,
default=1,
help="How many times to repeat the run. Requires '--print-time' to be "
"specified. Defaults to '1'",
)
parser.add_argument(
"--number",
metavar="N",
type=int,
default=1,
help="The number of runs to measure within each repeat. Requires "
"'--print-time' to be specified. Defaults to '1'",
)
parser.add_argument(
"--rpc-key",
help="the RPC tracker key of the target device. (non-micro devices only)",
)
parser.add_argument(
"--rpc-tracker",
help="hostname (required) and port (optional, defaults to 9090) of the RPC tracker, "
"e.g. '192.168.0.100:9999'. (non-micro devices only)",
)
parser.add_argument(
"PATH",
help="path to the compiled module file or to the project directory if '--device micro' "
"is selected.",
)
parser.add_argument(
"--list-options",
action="store_true",
help="show all run options and option choices when '--device micro' is selected. "
"(micro devices only)",
)
disposable_parser = TVMCSuppressedArgumentParser(main_parser)
try:
known_args, _ = disposable_parser.parse_known_args()
except TVMCException:
return
if vars(known_args).get("device") != "micro":
# No need to augment the parser for micro targets.
return
if SUPPORT_MICRO is False:
sys.exit(
"'--device micro' is not supported. "
"Please build TVM with micro support (USE_MICRO ON)!"
)
project_dir = get_project_dir(known_args.PATH)
try:
project_ = project.GeneratedProject.from_directory(project_dir, None)
except ProjectAPIServerNotFoundError:
sys.exit(f"Error: Project API server not found in {project_dir}!")
except TemplateProjectError:
sys.exit(
f"Error: Project directory error. That usually happens when model.tar is not found."
)
project_info = project_.info()
options_by_method = get_project_options(project_info)
mlf_path = project_info["model_library_format_path"]
parser.formatter_class = (
argparse.RawTextHelpFormatter
) # Set raw help text so customized help_text format works
parser.set_defaults(valid_options=options_by_method["open_transport"], mlf_path=mlf_path)
required = any([opt["required"] for opt in options_by_method["open_transport"]])
nargs = "+" if required else "*"
help_text_by_option = [opt["help_text"] for opt in options_by_method["open_transport"]]
help_text = "\n\n".join(help_text_by_option) + "\n\n"
parser.add_argument(
"--project-option", required=required, metavar="OPTION=VALUE", nargs=nargs, help=help_text
)
parser.add_argument(
"--list-options",
action="help",
help="show this help message with platform-specific options and exit.",
)
for one_entry in json_params:
parser.set_defaults(**one_entry)
def drive_run(args):
"""Invoke runner module with command line arguments
Parameters
----------
args: argparse.Namespace
Arguments from command line parser.
"""
path = pathlib.Path(args.PATH)
options = None
project_dir = None
if args.device == "micro":
# If it's a micro device, then grab the model.tar path from Project API instead.
# args.PATH will be used too since it points to the project directory. N.B.: there is no
# way to determine the model.tar path from the project dir or vice-verse (each platform
# is free to put model.tar whereever it's convenient).
project_dir = path
path = pathlib.Path(args.mlf_path)
# Check for options unavailable for micro targets.
if args.rpc_key or args.rpc_tracker:
raise TVMCException(
"--rpc-key and/or --rpc-tracker can't be specified for micro targets."
)
if args.device != "micro":
raise TVMCException(
f"Device '{args.device}' not supported. "
"Only device 'micro' is supported to run a model in MLF, "
"i.e. when '--device micro'."
)
if args.profile:
raise TVMCException("--profile is not currently supported for micro devices.")
if args.print_time:
raise TVMCException("--print-time is not currently supported for micro devices.")
# Get and check options for micro targets.
options = get_and_check_options(args.project_option, args.valid_options)
else:
# Check for options only availabe for micro targets.
if args.list_options:
raise TVMCException(
"--list-options is only availabe on micro targets, i.e. when '--device micro'."
)
try:
tvmc_package = TVMCPackage(package_path=path, project_dir=project_dir)
except IsADirectoryError:
raise TVMCException(f"File {path} must be an archive, not a directory.")
except FileNotFoundError:
raise TVMCException(f"File {path} does not exist.")
except ReadError:
raise TVMCException(f"Could not read model from archive {path}!")
rpc_hostname, rpc_port = tracker_host_port_from_cli(args.rpc_tracker)
try:
inputs = np.load(args.inputs) if args.inputs else {}
except IOError as ex:
raise TVMCException("Error loading inputs file: %s" % ex)
result = run_module(
tvmc_package,
args.device,
hostname=rpc_hostname,
port=rpc_port,
rpc_key=args.rpc_key,
inputs=inputs,
fill_mode=args.fill_mode,
benchmark=args.print_time,
repeat=args.repeat,
number=args.number,
profile=args.profile,
end_to_end=args.end_to_end,
options=options,
)
if args.print_time:
stat_table = result.format_times()
# print here is intentional
print(stat_table)
if args.print_top:
top_results = get_top_results(result, args.print_top)
# print here is intentional
print(top_results)
if args.outputs:
# Save the outputs
result.save(args.outputs)
def get_input_info(graph_str: str, params: Dict[str, tvm.nd.NDArray]):
"""Return the 'shape' and 'dtype' dictionaries for the input
tensors of a compiled module.
.. note::
We can't simply get the input tensors from a TVM graph
because weight tensors are treated equivalently. Therefore, to
find the input tensors we look at the 'arg_nodes' in the graph
(which are either weights or inputs) and check which ones don't
appear in the params (where the weights are stored). These nodes
are therefore inferred to be input tensors.
.. note::
There exists a more recent API to retrieve the input information
directly from the module. However, this isn't supported when using
with RPC due to a lack of support for Array and Map datatypes.
Therefore, this function exists only as a fallback when RPC is in
use. If RPC isn't being used, please use the more recent API.
Parameters
----------
graph_str : str
JSON graph of the module serialized as a string.
params : dict
Parameter dictionary mapping name to value.
Returns
-------
shape_dict : dict
Shape dictionary - {input_name: tuple}.
dtype_dict : dict
dtype dictionary - {input_name: dtype}.
"""
shape_dict = {}
dtype_dict = {}
params_dict = load_param_dict(params)
param_names = [k for (k, v) in params_dict.items()]
graph = json.loads(graph_str)
for node_id in graph["arg_nodes"]:
node = graph["nodes"][node_id]
# If a node is not in the params, infer it to be an input node
name = node["name"]
if name not in param_names:
shape_dict[name] = graph["attrs"]["shape"][1][node_id]
dtype_dict[name] = graph["attrs"]["dltype"][1][node_id]
return shape_dict, dtype_dict
def generate_tensor_data(shape: tuple, dtype: str, fill_mode: str):
"""Generate data to produce a tensor of given shape and dtype.
Random data generation depends on the dtype. For int8 types,
random integers in the range 0->255 are generated. For all other
types, random floats are generated in the range -1->1 and then
cast to the appropriate dtype.
This is used to quickly generate some data to input the models, as
a way to check that compiled module is sane for running.
Parameters
----------
shape : tuple
The shape of the tensor.
dtype : str
The dtype of the tensor.
fill_mode : str
The fill-mode to use, either "zeros", "ones" or "random".
Returns
-------
tensor : np.array
The generated tensor as a np.array.
"""
if fill_mode == "zeros":
tensor = np.zeros(shape=shape, dtype=dtype)
elif fill_mode == "ones":
tensor = np.ones(shape=shape, dtype=dtype)
elif fill_mode == "random":
if "int8" in dtype:
tensor = np.random.randint(128, size=shape, dtype=dtype)
else:
tensor = np.random.uniform(-1, 1, size=shape).astype(dtype)
else:
raise TVMCException("unknown fill-mode: {}".format(fill_mode))
return tensor
def make_inputs_dict(
shape_dict: tvm.container.Map,
dtype_dict: tvm.container.Map,
inputs: Optional[Dict[str, np.ndarray]] = None,
fill_mode: str = "random",
):
"""Make the inputs dictionary for a graph.
Use data from 'inputs' where specified. For input tensors
where no data has been given, generate data according to the
chosen fill-mode.
Parameters
----------
shape_dict : Map
Shape dictionary - {input_name: tuple}.
dtype_dict : Map
dtype dictionary - {input_name: dtype}.
inputs : dict, optional
A dictionary that maps input names to numpy values.
fill_mode : str, optional
The fill-mode to use when generating tensor data.
Can be either "zeros", "ones" or "random".
Returns
-------
inputs_dict : dict
Complete inputs dictionary - {input_name: np.array}.
"""
logger.debug("creating inputs dict")
if inputs is None:
inputs = {}
# First check all the keys in inputs exist in the graph
for input_name in inputs:
if input_name not in shape_dict.keys():
raise TVMCException(
"the input tensor '{}' is not in the graph. Expected inputs: '{}'".format(
input_name, list(shape_dict.keys())
)
)
# Now construct the input dict, generating tensors where no
# data already exists in 'inputs'
inputs_dict = {}
for input_name in shape_dict:
if input_name in inputs.keys():
logger.debug("setting input '%s' with user input data", input_name)
inputs_dict[input_name] = inputs[input_name]
else:
# container.ShapleTuple -> tuple
shape = tuple(shape_dict[input_name])
# container.String -> str
dtype = str(dtype_dict[input_name])
logger.debug(
"generating data for input '%s' (shape: %s, dtype: %s), using fill-mode '%s'",
input_name,
shape,
dtype,
fill_mode,
)
data = generate_tensor_data(shape, dtype, fill_mode)
inputs_dict[input_name] = data
return inputs_dict
def run_module(
tvmc_package: TVMCPackage,
device: str,
hostname: Optional[str] = None,
port: Union[int, str] = 9090,
rpc_key: Optional[str] = None,
inputs: Optional[Dict[str, np.ndarray]] = None,
fill_mode: str = "random",
benchmark: bool = False,
repeat: int = 10,
number: int = 10,
profile: bool = False,
end_to_end: bool = False,
options: dict = None,
):
"""Run a compiled graph executor module locally or remotely with
optional input values.
If input tensors are not specified explicitly, they can be filled
with zeroes, ones or random data.
Parameters
----------
tvmc_package: TVMCPackage
The compiled model package object that will be run.
device: str,
the device (e.g. "cpu" or "cuda") to be targeted by the RPC
session, local or remote).
hostname : str, optional
The hostname of the target device on which to run.
port : int, optional
The port of the target device on which to run.
rpc_key : str, optional
The tracker key of the target device. If this is set, it
will be assumed that remote points to a tracker.
inputs : dict, optional
A dictionary that maps input names to numpy values. If not provided,
inputs will be generated using the fill_mode argument.
fill_mode : str, optional
The fill-mode to use when generating data for input tensors.
Valid options are "zeros", "ones" and "random".
Defaults to "random".
benchmark : bool, optional
Whether to benchmark the execution of the module. Enabling benchmark will
result in (1 + repeat * number) executions of the model.
repeat : int, optional
How many times to repeat the run. Requires `benchmark` to be set to True.
number : int, optional
The number of runs to measure within each repeat.
Requires `benchmark` to be set to True.
profile : bool
Whether to profile the run with the debug executor.
end_to_end : bool
Whether to measure the time of memory copies as well as model
execution. Turning this on can provide a more realistic estimate
of how long running the model in production would take.
Requires `benchmark` to be set to True.
Returns
-------
TVMCResult
The results of the run, including the output data.
"""
if not isinstance(tvmc_package, TVMCPackage):
raise TVMCException(
"This model doesn't seem to have been compiled yet. "
"Try calling tvmc.compile on the model before running it."
)
with ExitStack() as stack:
# Currently only two package formats are supported: "classic" and
# "mlf". The later can only be used for micro targets, i.e. with microTVM.
if device == "micro":
if tvmc_package.type != "mlf":
raise TVMCException(f"Model {tvmc_package.package_path} is not a MLF archive.")
project_dir = get_project_dir(tvmc_package.project_dir)
# This is guaranteed to work since project_dir was already checked when
# building the dynamic parser to accommodate the project options, so no
# checks are in place when calling GeneratedProject.
project_ = project.GeneratedProject.from_directory(project_dir, options)
else:
if tvmc_package.type == "mlf":
raise TVMCException(
"You're trying to run a model saved using the Model Library Format (MLF). "
"MLF can only be used to run micro device ('--device micro')."
)
if hostname:
if isinstance(port, str):
port = int(port)
# Remote RPC
if rpc_key:
logger.debug("Running on remote RPC tracker with key %s.", rpc_key)
session = request_remote(rpc_key, hostname, port, timeout=1000)
else:
logger.debug("Running on remote RPC with no key.")
session = rpc.connect(hostname, port)
elif device == "micro":
# Remote RPC (running on a micro target)
logger.debug("Running on remote RPC (micro target).")
try:
session = tvm.micro.Session(project_.transport())
stack.enter_context(session)
except:
raise TVMCException("Could not open a session with the micro target.")
else:
# Local
logger.debug("Running a local session.")
session = rpc.LocalSession()
# Micro targets don't support uploading a model. The model to be run
# must be already flashed into the micro target before one tries
# to run it. Hence skip model upload for micro targets.
if device != "micro":
session.upload(tvmc_package.lib_path)
lib = session.load_module(tvmc_package.lib_name)
# TODO expand to other supported devices, as listed in tvm.rpc.client (@leandron)
logger.debug("Device is %s.", device)
if device == "cuda":
dev = session.cuda()
elif device == "cl":
dev = session.cl()
elif device == "metal":
dev = session.metal()
elif device == "vulkan":
dev = session.vulkan()
elif device == "rocm":
dev = session.rocm()
elif device == "micro":
dev = session.device
lib = session.get_system_lib()
else:
assert device == "cpu"
dev = session.cpu()
if tvmc_package.type == "vm":
assert inputs is not None, "vm runner requires inputs to be provided as a dict"
input_tensor = {}
for e, i in inputs.items():
input_tensor[e] = tvm.nd.array(i, dev)
if profile:
logger.debug("Creating vm with profile enabled.")
exe = profiler_vm.VirtualMachineProfiler(lib, dev)
res = exe.profile(**input_tensor, func_name="main")
# This print is intentional
print(res)
else:
exe = vm.VirtualMachine(lib, dev)
exe_outputs = exe.invoke("main", **input_tensor)
if benchmark:
times = exe.benchmark(
dev,
**input_tensor,
func_name="main",
repeat=repeat,
number=number,
end_to_end=end_to_end,
)
else:
exe.run(**input_tensor)
times = []
# Special handling if the output only has a single value
if not isinstance(exe_outputs, list):
exe_outputs = [exe_outputs]
outputs = {}
for i, val in enumerate(exe_outputs):
output_name = "output_{}".format(i)
outputs[output_name] = val.numpy()
else:
# TODO(gromero): Adjust for micro targets.
if profile:
logger.debug("Creating runtime with profiling enabled.")
module = debug_executor.create(tvmc_package.graph, lib, dev, dump_root="./prof")
else:
if device == "micro":
logger.debug("Creating runtime (micro) with profiling disabled.")
module = tvm.micro.create_local_graph_executor(tvmc_package.graph, lib, dev)
else:
logger.debug("Creating runtime with profiling disabled.")
module = executor.create(tvmc_package.graph, lib, dev)
logger.debug("Loading params into the runtime module.")
module.load_params(tvmc_package.params)
logger.debug("Collecting graph input shape and type:")
if isinstance(session, tvm.rpc.client.RPCSession):
# RPC does not support datatypes such as Array and Map,
# fallback to obtaining input information from graph json.
shape_dict, dtype_dict = get_input_info(tvmc_package.graph, tvmc_package.params)
else:
shape_dict, dtype_dict = module.get_input_info()
logger.debug("Graph input shape: %s", shape_dict)
logger.debug("Graph input type: %s", dtype_dict)
inputs_dict = make_inputs_dict(shape_dict, dtype_dict, inputs, fill_mode)
logger.debug("Setting inputs to the module.")
module.set_input(**inputs_dict)
# Run must be called explicitly if profiling
if profile:
logger.info("Running the module with profiling enabled.")
report = module.profile()
# This print is intentional
print(report)
if not benchmark or device == "micro":
# TODO(gromero): Fix time_evaluator() for micro targets. Once it's
# fixed module.benchmark() can be used instead and this if/else can
# be removed.
module.run()
times = []
else:
# Call the benchmarking function of the executor.
# Optionally measure e2e data transfers from the
# CPU to device memory overheads (e.g. PCIE
# overheads if the device is a discrete GPU).
if end_to_end:
dev = session.cpu()
times = module.benchmark(dev, number=number, repeat=repeat, end_to_end=end_to_end)
logger.debug("Collecting the output tensors.")
num_outputs = module.get_num_outputs()
outputs = {}
for i in range(num_outputs):
output_name = "output_{}".format(i)
outputs[output_name] = module.get_output(i).numpy()
return TVMCResult(outputs, times)
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/shape_parser.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMC Shape Parsing
"""
import argparse
import re
from tvm import relay
def parse_shape_string(inputs_string):
"""Parse an input shape dictionary string to a usable dictionary.
Parameters
----------
inputs_string: str
A string of the form "input_name:[dim1,dim2,...,dimn] input_name2:[dim1,dim2]" that
indicates the desired shape for specific model inputs. Colons, forward slashes and dots
within input_names are supported. Spaces are supported inside of dimension arrays.
Returns
-------
shape_dict: dict
A dictionary mapping input names to their shape for use in relay frontend converters.
"""
# Create a regex pattern that extracts each separate input mapping.
# We want to be able to handle:
# * Spaces inside arrays
# * forward slashes inside names (but not at the beginning or end)
# * colons inside names (but not at the beginning or end)
# * dots inside names
pattern = r"(?:\w+\/)?[:\w.]+\:\s*\[\-?\d+(?:\,\s*\-?\d+)*\]"
input_mappings = re.findall(pattern, inputs_string)
if not input_mappings:
raise argparse.ArgumentTypeError(
"--input-shapes argument must be of the form "
'"input_name:[dim1,dim2,...,dimn] input_name2:[dim1,dim2]"'
)
shape_dict = {}
for mapping in input_mappings:
# Remove whitespace.
mapping = mapping.replace(" ", "")
# Split mapping into name and shape.
name, shape_string = mapping.rsplit(":", 1)
# Convert shape string into a list of integers or Anys if negative.
shape = [int(x) if int(x) > 0 else relay.Any() for x in shape_string.strip("][").split(",")]
# Add parsed mapping to shape dictionary.
shape_dict[name] = shape
return shape_dict
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/target.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file contains functions for processing target inputs for the TVMC CLI
"""
import os
import logging
import json
import re
import tvm
from tvm.driver import tvmc
from tvm.driver.tvmc import TVMCException
from tvm.driver.tvmc.composite_target import get_codegen_by_target, get_codegen_names
from tvm.ir.attrs import make_node, _ffi_api as attrs_api
from tvm.ir.transform import PassContext
from tvm.target import Target, TargetKind
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
# We can't tell the type inside an Array but all current options are strings so
# it can default to that. Bool is used alongside Integer but aren't distinguished
# between as both are represented by IntImm
INTERNAL_TO_NATIVE_TYPE = {"runtime.String": str, "IntImm": int, "Array": str}
INTERNAL_TO_HELP = {"runtime.String": " string", "IntImm": "", "Array": " options"}
def _valid_target_kinds():
codegen_names = tvmc.composite_target.get_codegen_names()
return filter(lambda target: target not in codegen_names, Target.list_kinds())
def _generate_target_kind_args(parser, kind_name):
target_group = parser.add_argument_group(f"target {kind_name}")
for target_option, target_type in TargetKind.options_from_name(kind_name).items():
if target_type in INTERNAL_TO_NATIVE_TYPE:
target_group.add_argument(
f"--target-{kind_name}-{target_option}",
type=INTERNAL_TO_NATIVE_TYPE[target_type],
help=f"target {kind_name} {target_option}{INTERNAL_TO_HELP[target_type]}",
)
def _generate_codegen_args(parser, codegen_name):
codegen = get_codegen_by_target(codegen_name)
pass_configs = PassContext.list_configs()
if codegen["config_key"] is not None and codegen["config_key"] in pass_configs:
target_group = parser.add_argument_group(f"target {codegen_name}")
attrs = make_node(pass_configs[codegen["config_key"]]["type"])
fields = attrs_api.AttrsListFieldInfo(attrs)
for field in fields:
for tvm_type, python_type in INTERNAL_TO_NATIVE_TYPE.items():
if field.type_info.startswith(tvm_type):
target_option = field.name
target_group.add_argument(
f"--target-{codegen_name}-{target_option}",
type=python_type,
help=f"target {codegen_name} {target_option}{python_type}",
)
def generate_target_args(parser):
"""Walks through the TargetKind registry and generates arguments for each Target's options"""
parser.add_argument(
"--target",
help="compilation target as plain string, inline JSON or path to a JSON file",
required=False,
)
for target_kind in _valid_target_kinds():
_generate_target_kind_args(parser, target_kind)
for codegen_name in get_codegen_names():
_generate_codegen_args(parser, codegen_name)
def _reconstruct_target_kind_args(args, kind_name):
kind_options = {}
for target_option, target_type in TargetKind.options_from_name(kind_name).items():
if target_type in INTERNAL_TO_NATIVE_TYPE:
var_name = f"target_{kind_name.replace('-', '_')}_{target_option.replace('-', '_')}"
option_value = getattr(args, var_name)
if option_value is not None:
kind_options[target_option] = getattr(args, var_name)
return kind_options
def _reconstruct_codegen_args(args, codegen_name):
codegen = get_codegen_by_target(codegen_name)
pass_configs = PassContext.list_configs()
codegen_options = {}
if codegen["config_key"] is not None and codegen["config_key"] in pass_configs:
attrs = make_node(pass_configs[codegen["config_key"]]["type"])
fields = attrs_api.AttrsListFieldInfo(attrs)
for field in fields:
for tvm_type in INTERNAL_TO_NATIVE_TYPE:
if field.type_info.startswith(tvm_type):
target_option = field.name
var_name = (
f"target_{codegen_name.replace('-', '_')}_{target_option.replace('-', '_')}"
)
option_value = getattr(args, var_name)
if option_value is not None:
codegen_options[target_option] = option_value
return codegen_options
def reconstruct_target_args(args):
"""Reconstructs the target options from the arguments"""
reconstructed = {}
for target_kind in _valid_target_kinds():
kind_options = _reconstruct_target_kind_args(args, target_kind)
if kind_options:
reconstructed[target_kind] = kind_options
for codegen_name in get_codegen_names():
codegen_options = _reconstruct_codegen_args(args, codegen_name)
if codegen_options:
reconstructed[codegen_name] = codegen_options
return reconstructed
def validate_targets(parse_targets, additional_target_options=None):
"""
Apply a series of validations in the targets provided via CLI.
"""
tvm_target_kinds = tvm.target.Target.list_kinds()
targets = [t["name"] for t in parse_targets]
if len(targets) > len(set(targets)):
raise TVMCException("Duplicate target definitions are not allowed")
if targets[-1] not in tvm_target_kinds:
tvm_target_names = ", ".join(tvm_target_kinds)
raise TVMCException(
f"The last target needs to be a TVM target. Choices: {tvm_target_names}"
)
tvm_targets = [t for t in targets if t in _valid_target_kinds()]
if len(tvm_targets) > 2:
verbose_tvm_targets = ", ".join(tvm_targets)
raise TVMCException(
"Only two of the following targets can be used at a time. "
f"Found: {verbose_tvm_targets}."
)
if additional_target_options is not None:
for target_name in additional_target_options:
if not any([target for target in parse_targets if target["name"] == target_name]):
first_option = list(additional_target_options[target_name].keys())[0]
raise TVMCException(
f"Passed --target-{target_name}-{first_option}"
f" but did not specify {target_name} target"
)
def tokenize_target(target):
"""
Extract a list of tokens from a target specification text.
It covers some corner-cases that are not covered by the built-in
module 'shlex', such as the use of "+" as a punctuation character.
Example
-------
For the input `foo -op1=v1 -op2="v ,2", bar -op3=v-4` we
should obtain:
["foo", "-op1=v1", "-op2="v ,2"", ",", "bar", "-op3=v-4"]
Parameters
----------
target : str
Target options sent via CLI arguments
Returns
-------
list of str
a list of parsed tokens extracted from the target string
"""
# Regex to tokenize the "--target" value. It is split into five parts
# to match with:
# 1. target and option names e.g. llvm, -mattr=, -mcpu=
# 2. option values, all together, without quotes e.g. -mattr=+foo,+opt
# 3. option values, when single quotes are used e.g. -mattr='+foo, +opt'
# 4. option values, when double quotes are used e.g. -mattr="+foo ,+opt"
# 5. commas that separate different targets e.g. "my-target, llvm"
target_pattern = (
r"(\-{0,2}[\w\-]+\=?"
r"(?:[\w\+\-\.]+(?:,[\w\+\-\.])*"
r"|[\'][\w\+\-,\s\.]+[\']"
r"|[\"][\w\+\-,\s\.]+[\"])*"
r"|,)"
)
return re.findall(target_pattern, target)
def parse_target(target):
"""
Parse a plain string of targets provided via a command-line
argument.
To send more than one codegen, a comma-separated list
is expected. Options start with -<option_name>=<value>.
We use python standard library 'shlex' to parse the argument in
a POSIX compatible way, so that if options are defined as
strings with spaces or commas, for example, this is considered
and parsed accordingly.
Example
-------
For the input `--target="foo -op1=v1 -op2="v ,2", bar -op3=v-4"` we
should obtain:
[
{
name: "foo",
opts: {"op1":"v1", "op2":"v ,2"},
raw: 'foo -op1=v1 -op2="v ,2"'
},
{
name: "bar",
opts: {"op3":"v-4"},
raw: 'bar -op3=v-4'
}
]
Parameters
----------
target : str
Target options sent via CLI arguments
Returns
-------
codegens : list of dict
This list preserves the order in which codegens were
provided via command line. Each Dict contains three keys:
'name', containing the name of the codegen; 'opts' containing
a key-value for all options passed via CLI; 'raw',
containing the plain string for this codegen
"""
codegen_names = tvmc.composite_target.get_codegen_names()
codegens = []
tvm_target_kinds = tvm.target.Target.list_kinds()
parsed_tokens = tokenize_target(target)
split_codegens = []
current_codegen = []
split_codegens.append(current_codegen)
for token in parsed_tokens:
# every time there is a comma separating
# two codegen definitions, prepare for
# a new codegen
if token == ",":
current_codegen = []
split_codegens.append(current_codegen)
else:
# collect a new token for the current
# codegen being parsed
current_codegen.append(token)
# at this point we have a list of lists,
# each item on the first list is a codegen definition
# in the comma-separated values
for codegen_def in split_codegens:
# the first is expected to be the name
name = codegen_def[0]
is_tvm_target = name in tvm_target_kinds and name not in codegen_names
raw_target = " ".join(codegen_def)
all_opts = codegen_def[1:] if len(codegen_def) > 1 else []
opts = {}
for opt in all_opts:
try:
# deal with -- prefixed flags
if opt.startswith("--"):
opt_name = opt[2:]
opt_value = True
else:
opt = opt[1:] if opt.startswith("-") else opt
opt_name, opt_value = opt.split("=", maxsplit=1)
# remove quotes from the value: quotes are only parsed if they match,
# so it is safe to assume that if the string starts with quote, it ends
# with quote.
opt_value = opt_value[1:-1] if opt_value[0] in ('"', "'") else opt_value
except ValueError:
raise ValueError(f"Error when parsing '{opt}'")
opts[opt_name] = opt_value
codegens.append(
{"name": name, "opts": opts, "raw": raw_target, "is_tvm_target": is_tvm_target}
)
return codegens
def is_inline_json(target):
try:
json.loads(target)
return True
except json.decoder.JSONDecodeError:
return False
def _combine_target_options(target, additional_target_options=None):
if additional_target_options is None:
return target
if target["name"] in additional_target_options:
target["opts"].update(additional_target_options[target["name"]])
return target
def _recombobulate_target(target):
name = target["name"]
opts = " ".join([f"-{key}={value}" for key, value in target["opts"].items()])
return f"{name} {opts}"
def target_from_cli(target, additional_target_options=None):
"""
Create a tvm.target.Target instance from a
command line interface (CLI) string.
Parameters
----------
target : str
compilation target as plain string,
inline JSON or path to a JSON file
additional_target_options: Optional[Dict[str, Dict[str,str]]]
dictionary of additional target options to be
combined with parsed targets
Returns
-------
tvm.target.Target
an instance of target device information
extra_targets : list of dict
This list preserves the order in which extra targets were
provided via command line. Each Dict contains three keys:
'name', containing the name of the codegen; 'opts' containing
a key-value for all options passed via CLI; 'raw',
containing the plain string for this codegen
"""
extra_targets = []
if os.path.isfile(target):
with open(target) as target_file:
logger.debug("target input is a path: %s", target)
target = "".join(target_file.readlines())
elif is_inline_json(target):
logger.debug("target input is inline JSON: %s", target)
else:
logger.debug("target input is plain text: %s", target)
try:
parsed_targets = parse_target(target)
except ValueError as error:
raise TVMCException(f"Error parsing target string '{target}'.\nThe error was: {error}")
validate_targets(parsed_targets, additional_target_options)
tvm_targets = [
_combine_target_options(t, additional_target_options)
for t in parsed_targets
if t["is_tvm_target"]
]
# Validated target strings have 1 or 2 tvm targets, otherwise
# `validate_targets` above will fail.
if len(tvm_targets) == 1:
target = _recombobulate_target(tvm_targets[0])
target_host = None
else:
assert len(tvm_targets) == 2
target = _recombobulate_target(tvm_targets[0])
target_host = _recombobulate_target(tvm_targets[1])
extra_targets = [
_combine_target_options(t, additional_target_options)
for t in parsed_targets
if not t["is_tvm_target"]
]
return tvm.target.Target(target, host=target_host), extra_targets
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/tracker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language
"""
TVMC Remote Tracker
"""
import logging
from urllib.parse import urlparse
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
def tracker_host_port_from_cli(rpc_tracker_str):
"""Extract hostname and (optional) port from strings
like "1.2.3.4:9090" or "4.3.2.1".
Used as a helper function to cover --rpc-tracker
command line argument, in different subcommands.
Parameters
----------
rpc_tracker_str : str
hostname (or IP address) and port of the RPC tracker,
in the format 'hostname[:port]'.
Returns
-------
rpc_hostname : str or None
hostname or IP address, extracted from input.
rpc_port : int or None
port number extracted from input (9090 default).
"""
rpc_hostname = rpc_port = None
if rpc_tracker_str:
parsed_url = urlparse("//%s" % rpc_tracker_str)
rpc_hostname = parsed_url.hostname
rpc_port = parsed_url.port or 9090
logger.info("RPC tracker hostname: %s", rpc_hostname)
logger.info("RPC tracker port: %s", rpc_port)
return rpc_hostname, rpc_port
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language
"""
TVMC Graph Transforms
"""
from tvm import relay, transform
from tvm.driver.tvmc import TVMCException
def convert_graph_layout(mod, desired_layout):
"""Alter the layout of the input graph.
Parameters
----------
mod : tvm.IRModule
The relay module to convert.
desired_layout : str
The layout to convert to.
Returns
-------
mod : tvm.IRModule
The converted module.
"""
# Assume for the time being that graphs only have
# conv2d as heavily-sensitive operators.
desired_layouts = {
"nn.conv2d": [desired_layout, "default"],
"nn.conv2d_transpose": [desired_layout, "default"],
"qnn.conv2d": [desired_layout, "default"],
}
# Convert the layout of the graph where possible.
seq = transform.Sequential(
[
relay.transform.RemoveUnusedFunctions(),
relay.transform.ConvertLayout(desired_layouts),
relay.transform.FoldConstant(),
]
)
try:
return seq(mod)
except Exception as err:
raise TVMCException("Error converting layout to {0}: {1}".format(desired_layout, str(err)))
| https://github.com/zk-ml/tachikoma |
python/tvm/driver/tvmc/workspace_pools.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions for processing dynamic workspace pool TVMC args
"""
import logging
import re
from tvm.driver.tvmc import TVMCException
from tvm.target import Target
from tvm.ir.memory_pools import PoolInfoProperties, WorkspaceMemoryPools, WorkspacePoolInfo
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
def generate_workspace_pools_args(parser):
"""Generates arguments for each Workspace Pools's options"""
parser.add_argument(
"--workspace-pools",
help="""The name of the memory pool
Example usage: --workspace-pools=flash""",
)
parser.add_argument(
"--workspace-pools-targets",
help="""The name of the targets specified for the memory pool
Example usage: --workspace-pools-targets=flash:llvm""",
action="append",
)
parser.add_argument(
"--workspace-pools-size-hint-bytes",
nargs="?",
help="""The expected size hint to be used by the allocator.
Example usage: --workspace-pools-size-hint-bytes=flash:8""",
action="append",
)
parser.add_argument(
"--workspace-pools-clock-frequency-hz",
nargs="?",
help="""The clock frequency that the memory pool runs at in Hz.
Example usage: --workspace-pools-clock-frequency-hz=flash:70000000""",
action="append",
)
parser.add_argument(
"--workspace-pools-read-bandwidth-bytes-per-cycle",
nargs="?",
help="""The read bandwidth of the memory pool in bytes/cycle.
Example usage: --workspace-pools-read-bandwidth-bytes-per-cycle=flash:4""",
action="append",
)
parser.add_argument(
"--workspace-pools-write-bandwidth-bytes-per-cycle",
nargs="?",
help="""The write bandwidth of the memory pool in bytes/cycle.
Example usage: --workspace-pools-write-bandwidth-bytes-per-cycle=flash:8""",
action="append",
)
parser.add_argument(
"--workspace-pools-read-latency-cycles",
nargs="?",
help="""The read latency of the memory pool in cycles.
Example usage: --workspace-pools-read-latency-cycles=flash:4""",
action="append",
)
parser.add_argument(
"--workspace-pools-write-latency-cycles",
nargs="?",
help="""The write latency of the memory pool in cycles.
Example usage: --workspace-pools-write-latency-cycles=flash:8""",
action="append",
)
parser.add_argument(
"--workspace-pools-target-burst-bytes",
help="""The burst length of the memory pool in bytes per target.
Example usage: --workspace-pools-target-burst-bytes=flash:accel:1""",
action="append",
)
def _parse_target_burst(attr_str, pool_name):
if pool_name not in attr_str:
return {}
return {target: int(attr_str[pool_name][target]) for target in attr_str[pool_name]}
def _parse_target_string(attr_str, targets, pool_name):
if attr_str is None:
raise TVMCException(f'No target specified for Workspace Pool "{pool_name}"')
target_name = [re.split(",", attr_str)]
matched_targets = [
target
for target in targets
if any(target.kind.name in target_string_match for target_string_match in target_name[0])
]
if not matched_targets:
raise TVMCException(f'Workspace Pool "{pool_name}" using undefined Target "{target_name}"')
return matched_targets
def _split_pools_to_pool_names(attr_str):
return re.split(",", attr_str) if attr_str else []
def _parse_target_attributes_of_pool_name(attr_str, targets):
if not targets or attr_str is None:
return {}
target_attributes = {}
for pool_values in attr_str:
pool_name, target_name, target_value = re.split(":", pool_values)
if pool_name not in target_attributes:
target_attributes[pool_name] = {}
matched_targets = [target for target in targets if target_name == target.kind.name]
if matched_targets:
target_attributes[pool_name][matched_targets[0]] = target_value
else:
raise TVMCException(
"The workspace pool target specification "
"needs to contain a subset of the same TVM "
"targets as when specifying targets to use."
)
return target_attributes
def _parse_attribute_of_pool_name(attr_str):
return dict(pool.split(":", maxsplit=1) for pool in attr_str) if attr_str else {}
def workspace_pools_recombobulate(parsed, targets, extra_target):
"""Reconstructs the Workspace Pools args and returns a WorkspaceMemoryPool object"""
WORKSPACE_POOL_PARAMS = [
"workspace_pools_size_hint_bytes",
"workspace_pools_targets",
"workspace_pools_clock_frequency_hz",
"workspace_pools_read_bandwidth_bytes_per_cycle",
"workspace_pools_write_bandwidth_bytes_per_cycle",
"workspace_pools_read_latency_cycles",
"workspace_pools_write_latency_cycles",
]
WORKSPACE_POOL_TARGET_PARAMS = [
"workspace_pools_target_burst_bytes",
]
workspace_pools = _split_pools_to_pool_names(parsed.workspace_pools)
if not workspace_pools:
return None
parse_attribute_to_pool_name = {
workspace_pool_param: _parse_attribute_of_pool_name(getattr(parsed, workspace_pool_param))
for workspace_pool_param in WORKSPACE_POOL_PARAMS
}
parse_target_burst_bytes_to_pool = {
workspace_pool_param: _parse_target_attributes_of_pool_name(
getattr(parsed, workspace_pool_param), targets
)
for workspace_pool_param in WORKSPACE_POOL_TARGET_PARAMS
}
# Load extra targets from CLI
additional_targets = []
for t in extra_target:
additional_targets.append(Target(t["raw"], host=targets[0].host or targets[0]))
target = targets + additional_targets
if targets[0].host:
target.append(targets[0].host)
return WorkspaceMemoryPools(
[
WorkspacePoolInfo(
pool_name,
targets=_parse_target_string(
parse_attribute_to_pool_name["workspace_pools_targets"].get(pool_name),
target,
pool_name,
),
pool_info_properties=PoolInfoProperties(
size_hint_bytes=int(
parse_attribute_to_pool_name["workspace_pools_size_hint_bytes"].get(
pool_name, -1
)
),
clock_frequency_hz=int(
parse_attribute_to_pool_name["workspace_pools_clock_frequency_hz"].get(
pool_name, -1
)
),
read_bandwidth_bytes_per_cycle=int(
parse_attribute_to_pool_name[
"workspace_pools_read_bandwidth_bytes_per_cycle"
].get(pool_name, -1)
),
write_bandwidth_bytes_per_cycle=int(
parse_attribute_to_pool_name[
"workspace_pools_write_bandwidth_bytes_per_cycle"
].get(pool_name, -1)
),
read_latency_cycles=int(
parse_attribute_to_pool_name["workspace_pools_read_latency_cycles"].get(
pool_name, 0
)
),
write_latency_cycles=int(
parse_attribute_to_pool_name["workspace_pools_write_latency_cycles"].get(
pool_name, 0
)
),
target_burst_bytes=_parse_target_burst(
parse_target_burst_bytes_to_pool["workspace_pools_target_burst_bytes"],
pool_name,
),
),
)
for pool_name in workspace_pools
]
)
| https://github.com/zk-ml/tachikoma |
python/tvm/error.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Structured error classes in TVM.
Each error class takes an error message as its input.
See the example sections for suggested message conventions.
To make the code more readable, we recommended developers to
copy the examples and raise errors with the same message convention.
.. note::
Please also refer to :ref:`error-handling-guide`.
"""
from tvm._ffi.base import register_error, TVMError
@register_error
class InternalError(TVMError):
"""Internal error in the system.
Examples
--------
.. code :: c++
// Example code C++
LOG(FATAL) << "InternalError: internal error detail.";
.. code :: python
# Example code in python
raise InternalError("internal error detail")
"""
def __init__(self, msg):
# Patch up additional hint message.
if "TVM hint:" not in msg:
msg += (
"\nTVM hint: You hit an internal error. "
+ "Please open a thread on https://discuss.tvm.apache.org/ to report it."
)
super(InternalError, self).__init__(msg)
register_error("ValueError", ValueError)
register_error("TypeError", TypeError)
register_error("AttributeError", AttributeError)
register_error("KeyError", KeyError)
register_error("IndexError", IndexError)
@register_error
class RPCError(TVMError):
"""Error thrown by the remote server handling the RPC call."""
@register_error
class OpError(TVMError):
"""Base class of all operator errors in frontends."""
@register_error
class OpNotImplemented(OpError, NotImplementedError):
"""Operator is not implemented.
Example
-------
.. code:: python
raise OpNotImplemented(
"Operator {} is not supported in {} frontend".format(
missing_op, frontend_name))
"""
@register_error
class OpAttributeRequired(OpError, AttributeError):
"""Required attribute is not found.
Example
-------
.. code:: python
raise OpAttributeRequired(
"Required attribute {} not found in operator {}".format(
attr_name, op_name))
"""
@register_error
class OpAttributeInvalid(OpError, AttributeError):
"""Attribute value is invalid when taking in a frontend operator.
Example
-------
.. code:: python
raise OpAttributeInvalid(
"Value {} in attribute {} of operator {} is not valid".format(
value, attr_name, op_name))
"""
@register_error
class OpAttributeUnImplemented(OpError, NotImplementedError):
"""Attribute is not supported in a certain frontend.
Example
-------
.. code:: python
raise OpAttributeUnImplemented(
"Attribute {} is not supported in operator {}".format(
attr_name, op_name))
"""
@register_error
class DiagnosticError(TVMError):
"""Error diagnostics were reported during the execution of a pass.
See the configured diagnostic renderer for detailed error information.
"""
| https://github.com/zk-ml/tachikoma |
python/tvm/exec/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace of executables python files that directly run throw cmd"""
| https://github.com/zk-ml/tachikoma |
python/tvm/exec/autotvm_log_editor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Pick best log entries from a large file and store them to a small file"""
import argparse
import os
import logging
import warnings
from .. import autotvm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--act", type=str, choices=["pick-best"], required=True, help="The action")
parser.add_argument("--i", type=str, help="The input file or directory", required=True)
parser.add_argument("--o", type=str, help="The output file")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.act == "pick-best":
if os.path.isfile(args.i):
args.o = args.o or args.i + ".best.log"
autotvm.record.pick_best(args.i, args.o)
elif os.path.isdir(args.i):
args.o = args.o or "best.log"
tmp_filename = args.o + ".tmp"
with open(tmp_filename, "w") as tmp_fout:
for filename in os.listdir(args.i):
if filename.endswith(".log"):
try:
autotvm.record.pick_best(filename, tmp_fout)
except Exception: # pylint: disable=broad-except
warnings.warn("Ignore invalid file %s" % filename)
logging.info("Run final filter...")
autotvm.record.pick_best(tmp_filename, args.o)
os.remove(tmp_filename)
logging.info("Output to %s ...", args.o)
else:
raise ValueError("Invalid input file: " + args.i)
else:
raise ValueError("Invalid action " + args.act)
| https://github.com/zk-ml/tachikoma |
python/tvm/exec/measure_peak.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""measure bandwidth and compute peak
e.g.
python3 -m tvm.exec.measure_peak --target cuda --rpc-host 127.0.0.1 --rpc-port 9090
python3 -m tvm.exec.measure_peak --target opencl --target-host "llvm -mtriple=aarch64-linux-gnu" \
--rpc-host $TVM_OPENCL_DEVICE_HOST --rpc-port 9090
"""
import argparse
import logging
from tvm.target import Target
from ..contrib.peak import measure_peak_all
def main():
"""Main function"""
parser = argparse.ArgumentParser()
parser.add_argument("--target", type=str, default="llvm", help="The build target")
parser.add_argument(
"--target-host", type=str, default=None, help="The host code compilation target"
)
parser.add_argument(
"--rpc-host", type=str, default="127.0.0.1", help="the hostname of the server"
)
parser.add_argument("--rpc-port", type=int, default=9090, help="The port of the RPC")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
args.target, args.target_host = Target.canon_target_and_host(args.target, args.target_host)
measure_peak_all(args.target, args.target_host, args.rpc_host, args.rpc_port)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/exec/microtvm_debug_shell.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-outer-name, invalid-name
"""Start an RPC server intended for use as a microTVM debugger.
microTVM aims to be runtime-agnostic, and to that end, frameworks often define command-line tools
used to launch a debug flow. These tools often manage the process of connecting to an attached
device using a hardware debugger, exposing a GDB server, and launching GDB connected to that
server with a source file attached. It's also true that this debugger can typically not be executed
concurrently with any flash tool, so this integration point is provided to allow TVM to launch and
terminate any debuggers integrated with the larger microTVM compilation/autotuning flow.
To use this tool, first launch this script in a separate terminal window. Then, provide the hostport
to your compiler's Flasher instance.
"""
import argparse
import logging
import socket
import struct
import tvm.micro.debugger as _ # NOTE: imported to expose global PackedFuncs over RPC.
from .._ffi.base import py_str
from ..rpc import base
from ..rpc import _ffi_api
_LOG = logging.getLogger(__name__)
def parse_args():
"""Parse command line arguments to this script."""
parser = argparse.ArgumentParser(description="microTVM debug-tool runner")
parser.add_argument("--host", default="0.0.0.0", help="hostname to listen on")
parser.add_argument("--port", type=int, default=9090, help="hostname to listen on")
parser.add_argument(
"--impl",
help=(
"If given, name of a module underneath tvm.micro.contrib "
"which contains the Debugger implementation to use. For example, to enable a "
"debugger named BarDebugger in python/tvm/micro/contrib/foo.py, specify either "
"'tvm.micro.contrib.foo' or 'foo' here. To enable a debugger named BazDebugger in "
"a third-party module ext_package.debugger, specify 'ext_package.debugger' here. "
"NOTE: the module cannot be in a sub-package of tvm.micro.contrib."
),
)
return parser.parse_args()
class ConnectionClosedError(Exception):
"""Raised when the connection is closed."""
def handle_conn(conn, rpc_key):
"""Handle a single connection that has just been accept'd()."""
def send(data):
conn.sendall(data)
return len(data)
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
return
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:"
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
_LOG.warning("mismatch key from %s", addr)
return
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
server = _ffi_api.CreateEventDrivenServer(send, "microtvm-rpc-debugger", key)
def _readall(n):
buf = bytearray()
while len(buf) < n:
x = conn.recv(n - len(buf))
if not x:
raise ConnectionClosedError()
buf = buf + x
return buf
while True:
packet_length_bytes = _readall(8)
packet_length = struct.unpack("<q", packet_length_bytes)[0]
if not packet_length:
break
status = server(packet_length_bytes, 3)
if status == 0:
break
packet_body = _readall(packet_length)
status = server(packet_body, 3)
def main():
"""Main entry point for microTVM debug shell."""
args = parse_args()
logging.basicConfig(level=logging.INFO)
if args.impl:
package = None
if "." not in args.impl:
package = f"tvm.micro.contrib.{args.impl}"
importlib.import_module(args.impl, package)
sock = socket.socket(base.get_addr_family([args.host, args.port]), socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((args.host, args.port))
sock.listen(1)
bind_addr, bind_port = sock.getsockname()
_LOG.info("listening for connections on %s:%d", bind_addr, bind_port)
while True:
conn, peer = sock.accept()
_LOG.info("accepted connection from %s", peer)
try:
handle_conn(conn, "")
except ConnectionClosedError:
pass
finally:
conn.close()
_LOG.info("closed connection from %s", peer)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/exec/popen_worker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Internal PopenWorker for PopenPool."""
import sys
import os
import struct
import threading
import traceback
import pickle
import logging
import cloudpickle
from tvm.contrib.popen_pool import StatusKind
class TimeoutStatus:
__slot__ = ["status"]
def __init__(self):
self.status = StatusKind.RUNNING
def main():
"""Main worker function"""
if len(sys.argv) != 3:
print("Usage: <read_fd> <write_fd>")
return
if sys.platform == "win32":
# pylint: disable=import-outside-toplevel
import msvcrt
reader = os.fdopen(msvcrt.open_osfhandle(int(sys.argv[1]), os.O_BINARY), "rb")
writer = os.fdopen(msvcrt.open_osfhandle(int(sys.argv[2]), os.O_BINARY), "wb")
else:
reader = os.fdopen(int(sys.argv[1]), "rb")
writer = os.fdopen(int(sys.argv[2]), "wb")
logging.basicConfig(level=logging.INFO)
lock = threading.Lock()
def _respond(ret_value):
"""Send data back to the client."""
data = cloudpickle.dumps(ret_value, protocol=pickle.HIGHEST_PROTOCOL)
writer.write(struct.pack("<i", len(data)))
writer.write(data)
writer.flush()
def _cancel_run(status):
lock.acquire()
if status.status == StatusKind.RUNNING:
_respond((StatusKind.TIMEOUT, TimeoutError()))
status.status = StatusKind.TIMEOUT
lock.release()
while True:
raw_bytes_size = reader.read(4)
if len(raw_bytes_size) != 4:
# the parent exited
return
bytes_size = struct.unpack("<i", raw_bytes_size)[0]
fn, args, kwargs, timeout = cloudpickle.loads(reader.read(bytes_size))
status = TimeoutStatus()
if timeout is not None:
watcher = threading.Timer(timeout, _cancel_run, [status])
watcher.daemon = True
watcher.start()
# pylint: disable=broad-except
try:
result = fn(*args, **kwargs)
ret_value = (StatusKind.COMPLETE, result)
except Exception as exception:
msg = traceback.format_exc()
ret_value = (StatusKind.EXCEPTION, type(exception)(msg))
if timeout is not None:
watcher.cancel()
lock.acquire()
if status.status == StatusKind.RUNNING:
_respond(ret_value)
status.status = StatusKind.COMPLETE
lock.release()
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, IOError):
pass
| https://github.com/zk-ml/tachikoma |
python/tvm/exec/query_rpc_tracker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tool to query RPC tracker status"""
from __future__ import absolute_import
import logging
import argparse
import os
from .. import rpc
def main():
"""Main function"""
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="", help="the hostname of the tracker")
parser.add_argument("--port", type=int, default=None, help="The port of the RPC")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
# default to local host or environment variable
if not args.host:
args.host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
if not args.port:
args.port = int(os.environ.get("TVM_TRACKER_PORT", "9190"))
conn = rpc.connect_tracker(args.host, args.port)
# pylint: disable=superfluous-parens
print("Tracker address %s:%d\n" % (args.host, args.port))
print("%s" % conn.text_summary())
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/exec/rpc_proxy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-outer-name, invalid-name
"""RPC web proxy, allows redirect to websocket based RPC servers(browsers)"""
import logging
import argparse
import os
from tvm.rpc.proxy import Proxy
def find_example_resource():
"""Find resource examples."""
curr_path = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
base_path = os.path.abspath(os.path.join(curr_path, "..", "..", ".."))
index_page = os.path.join(base_path, "web", "apps", "browser", "rpc_server.html")
resource_files = [
os.path.join(base_path, "web", "dist", "tvmjs.bundle.js"),
os.path.join(base_path, "web", "dist", "wasm", "tvmjs_runtime.wasi.js"),
]
resource_base = os.path.join(base_path, "web", "dist", "www")
if os.path.isdir(resource_base):
for fname in os.listdir(resource_base):
full_name = os.path.join(resource_base, fname)
if os.path.isfile(full_name):
resource_files.append(full_name)
for fname in [index_page] + resource_files:
if not os.path.exists(fname):
raise RuntimeError("Cannot find %s" % fname)
return index_page, resource_files
def main(args):
"""Main function"""
if args.tracker:
url, port = args.tracker.split(":")
port = int(port)
tracker_addr = (url, port)
else:
tracker_addr = None
if args.example_rpc:
index, js_files = find_example_resource()
prox = Proxy(
args.host,
port=args.port,
web_port=args.web_port,
index_page=index,
resource_files=js_files,
tracker_addr=tracker_addr,
)
else:
prox = Proxy(args.host, port=args.port, web_port=args.web_port, tracker_addr=tracker_addr)
prox.proc.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="127.0.0.1", help="the hostname of the server")
parser.add_argument("--port", type=int, default=9090, help="The port of the RPC")
parser.add_argument(
"--web-port", type=int, default=8888, help="The port of the http/websocket server"
)
parser.add_argument(
"--example-rpc", type=bool, default=False, help="Whether to switch on example rpc mode"
)
parser.add_argument("--tracker", type=str, default="", help="Report to RPC tracker")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
main(args)
| https://github.com/zk-ml/tachikoma |
python/tvm/exec/rpc_server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-outer-name, invalid-name
"""Start an RPC server"""
import argparse
import logging
from .. import rpc
def main(args):
"""Main function
Parameters
----------
args : argparse.Namespace
parsed args from command-line invocation
"""
if args.tracker:
url, port = args.tracker.rsplit(":", 1)
port = int(port)
tracker_addr = (url, port)
if not args.key:
raise RuntimeError("Need key to present type of resource when tracker is available")
else:
tracker_addr = None
server = rpc.Server(
args.host,
args.port,
args.port_end,
is_proxy=args.through_proxy,
key=args.key,
tracker_addr=tracker_addr,
load_library=args.load_library,
custom_addr=args.custom_addr,
silent=args.silent,
no_fork=not args.fork,
)
server.proc.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--host", type=str, default="0.0.0.0", help="The host IP address the tracker binds to"
)
parser.add_argument("--port", type=int, default=9090, help="The port of the RPC")
parser.add_argument(
"--through-proxy",
dest="through_proxy",
action="store_true",
help=(
"Whether this server provide service through a proxy. If this is true, the host and"
"port actually is the address of the proxy."
),
)
parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
parser.add_argument(
"--tracker",
type=str,
help=("The address of RPC tracker in host:port format. " "e.g. (10.77.1.234:9190)"),
)
parser.add_argument(
"--key", type=str, default="", help="The key used to identify the device type in tracker."
)
parser.add_argument("--silent", action="store_true", help="Whether run in silent mode.")
parser.add_argument("--load-library", type=str, help="Additional library to load")
parser.add_argument(
"--no-fork",
dest="fork",
action="store_false",
help="Use spawn mode to avoid fork. This option \
is able to avoid potential fork problems with Metal, OpenCL \
and ROCM compilers.",
)
parser.add_argument(
"--custom-addr", type=str, help="Custom IP Address to Report to RPC Tracker"
)
parser.set_defaults(fork=True)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if not args.fork is False and not args.silent:
logging.info(
"If you are running ROCM/Metal, fork will cause "
"compiler internal error. Try to launch with arg ```--no-fork```"
)
main(args)
| https://github.com/zk-ml/tachikoma |
python/tvm/exec/rpc_tracker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-outer-name, invalid-name
"""Tool to start RPC tracker"""
import logging
import argparse
from ..rpc.tracker import Tracker
def main(args):
"""Main function"""
tracker = Tracker(args.host, port=args.port, port_end=args.port_end, silent=args.silent)
tracker.proc.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--host", type=str, default="0.0.0.0", help="The host IP address the tracker binds to"
)
parser.add_argument("--port", type=int, default=9190, help="The port of the RPC")
parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
parser.add_argument("--silent", action="store_true", help="Whether run in silent mode.")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
main(args)
| https://github.com/zk-ml/tachikoma |
python/tvm/generic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generic operators."""
# pylint:disable=unused-wildcard-import, wildcard-import
from .tir.generic import *
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import
"""Common data structures across all IR variants."""
from .base import SourceName, Span, Node, EnvFunc, load_json, save_json
from .base import structural_equal, assert_structural_equal, structural_hash
from .type import Type, TypeKind, PrimType, PointerType, TypeVar, GlobalTypeVar, TupleType
from .type import TypeConstraint, FuncType, IncompleteType, RelayRefType
from .tensor_type import TensorType
from .affine_type import TensorAffineType, TupleAffineType
from .type_relation import TypeCall, TypeRelation
from .expr import BaseExpr, PrimExpr, RelayExpr, GlobalVar, Range
from .op import Op, register_op_attr, register_intrin_lowering
from .function import CallingConv, BaseFunc
from .adt import Constructor, TypeData
from .module import IRModule
from .attrs import Attrs, DictAttrs, make_node
from .container import Array, Map
from .memory_pools import (
PoolInfo,
WorkspacePoolInfo,
ConstantPoolInfo,
WorkspaceMemoryPools,
ConstantMemoryPools,
PoolInfoProperties,
)
from . import transform
from . import instrument
from . import diagnostics
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.ir"""
import tvm._ffi
tvm._ffi._init_api("ir", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/_ffi_instrument_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.instrument"""
import tvm._ffi
tvm._ffi._init_api("instrument", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/_ffi_transform_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.transform"""
import tvm._ffi
tvm._ffi._init_api("transform", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/adt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Algebraic data type definitions."""
import tvm._ffi
from .type import Type
from .expr import RelayExpr
from . import _ffi_api
@tvm._ffi.register_object("relay.Constructor")
class Constructor(RelayExpr):
"""Relay ADT constructor.
Parameters
----------
name_hint : str
Name of constructor (only a hint).
inputs : List[Type]
Input types.
belong_to : GlobalTypeVar
Denotes which ADT the constructor belongs to.
"""
def __init__(self, name_hint, inputs, belong_to):
self.__init_handle_by_constructor__(_ffi_api.Constructor, name_hint, inputs, belong_to)
def __call__(self, *args):
"""Call the constructor.
Parameters
----------
args: List[RelayExpr]
The arguments to the constructor.
Returns
-------
call: RelayExpr
A call to the constructor.
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
return relay.Call(self, args)
@tvm._ffi.register_object("relay.TypeData")
class TypeData(Type):
"""Stores the definition for an Algebraic Data Type (ADT) in Relay.
Note that ADT definitions are treated as type-level functions because
the type parameters need to be given for an instance of the ADT. Thus,
any global type var that is an ADT header needs to be wrapped in a
type call that passes in the type params.
Parameters
----------
header: GlobalTypeVar
The name of the ADT.
ADTs with the same constructors but different names are
treated as different types.
type_vars: List[TypeVar]
Type variables that appear in constructors.
constructors: List[Constructor]
The constructors for the ADT.
"""
def __init__(self, header, type_vars, constructors):
self.__init_handle_by_constructor__(_ffi_api.TypeData, header, type_vars, constructors)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/affine_type.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Types for quantized Tensors."""
import tvm._ffi
from .base import Node
from . import _ffi_api
class AffineType(Node):
"""The base class of Affine Types."""
def __eq__(self, other):
"""Compare two types for structural equivalence."""
return bool(tvm.ir.structural_equal(self, other))
def __ne__(self, other):
return not self.__eq__(other)
@tvm._ffi.register_object("TensorAffineType")
class TensorAffineType(AffineType):
"""The quantized type of a tensor, with scale, zero point, and datatype
The real space value is calculated as x = x_q * scale + zero_point
Parameters
----------
scale: Expr
The scale
zero_point: Expr
The zero_point
dtype : str
The content data type.
axis : int
The axis for per-channel quantization.
"""
def __init__(self, scale, zero_point, dtype, axis=-1):
self.__init_handle_by_constructor__(
_ffi_api.TensorAffineType, scale, zero_point, dtype, axis
)
@tvm._ffi.register_object("TupleAffineType")
class TupleAffineType(AffineType):
"""Affine types of a node with multiple outputs
Parameters
----------
types : List[TensorAffineType]
The shape of the Tensor
"""
def __init__(self, types):
self.__init_handle_by_constructor__(_ffi_api.TupleAffineType, types)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/attrs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" TVM Attribute module, which is mainly used for defining attributes of operators."""
import tvm._ffi
from tvm.runtime import Object
import tvm.runtime._ffi_node_api
from . import _ffi_api
@tvm._ffi.register_object
class Attrs(Object):
"""Attribute node, which is mainly use for defining attributes of relay operators.
Used by function registered in python side, such as compute, schedule and alter_layout.
Attrs is passed as the first argument to these functions.
"""
def list_field_info(self):
"""Get fields information
Returns
-------
infos: list of AttrFieldInfo
List of field information
"""
return _ffi_api.AttrsListFieldInfo(self)
def keys(self):
"""Get list of names in the attribute.
Returns
-------
keys : list of str
List of keys
"""
return [field.name for field in self.list_field_info()]
def get_int_tuple(self, key):
"""Get a python int tuple of a key
Parameters
----------
key: str
Returns
-------
value: Tuple of int
"""
return tuple(x.value for x in self.__getattr__(key))
def get_int(self, key):
"""Get a python int value of a key
Parameters
----------
key: str
Returns
-------
value: int
"""
return self.__getattr__(key)
def get_str(self, key):
"""Get a python int value of a key
Parameters
----------
key: str
Returns
-------
value: int
"""
return self.__getattr__(key)
def __getitem__(self, item):
return self.__getattr__(item)
@tvm._ffi.register_object
class DictAttrs(Attrs):
"""Dictionary attributes."""
def _dict(self):
"""Get internal dict"""
return _ffi_api.DictAttrsGetDict(self)
def keys(self):
"""Get list of names in the attribute.
Returns
-------
keys : list of str
List of keys
"""
return [k for k, _ in self.items()]
def __getitem__(self, k):
return self._dict().__getitem__(k)
def __contains__(self, k):
return self._dict().__contains__(k)
def items(self):
"""Get items from the map."""
return self._dict().items()
def __len__(self):
return self._dict().__len__()
def make_node(type_key, **kwargs):
"""Make a new IR node by its type key and fields
Parameters
----------
type_key : str
The type key of the node.
**kwargs : dict
The fields of the node.
Returns
-------
node : Node
The corresponding IR Node
Note
----
If the created node is instance of AttrsNode, then
the creator function will also run bound checks and
default value setup as supported by Attrs.
Example
-------
The following code constructs a IntImm object
.. code-block:: python
x = tvm.ir.make_node("IntImm", dtype="int32", value=10)
assert isinstance(x, tvm.tir.IntImm)
assert x.value == 10
"""
args = [type_key]
for k, v in kwargs.items():
args += [k, v]
return tvm.runtime._ffi_node_api.MakeNode(*args)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common base structures."""
import tvm._ffi
import tvm.error
import tvm.runtime._ffi_node_api
from tvm.runtime import Object
from . import _ffi_api
from . import json_compact
class Node(Object):
"""Base class of all IR Nodes, implements astext function."""
def astext(self, show_meta_data=True, annotate=None):
"""Get the text format of the expression.
Parameters
----------
show_meta_data : bool
Whether to include meta data section in the text
if there is meta data.
annotate: Optional[Object->str]
Optionally annotate function to provide additional
information in the comment block.
Returns
-------
text : str
The text format of the expression.
Notes
-----
The meta data section is necessary to fully parse the text format.
However, it can contain dumps that are big (e.g constant weights),
so it can be helpful to skip printing the meta data section.
"""
return _ffi_api.AsText(self, show_meta_data, annotate)
def __str__(self):
return _ffi_api.PrettyPrint(self)
@tvm._ffi.register_object("SourceName")
class SourceName(Object):
"""A identifier for a source location.
Parameters
----------
name : str
The name of the source.
"""
def __init__(self, name):
self.__init_handle_by_constructor__(_ffi_api.SourceName, name)
@tvm._ffi.register_object("Span")
class Span(Object):
"""Specifies a location in a source program.
Parameters
----------
source : SourceName
The source name.
lineno : int
The line number.
col_offset : int
The column offset of the location.
"""
def __init__(self, source_name, line, end_line, column, end_column):
self.__init_handle_by_constructor__(
_ffi_api.Span, source_name, line, end_line, column, end_column
)
@tvm._ffi.register_object
class EnvFunc(Object):
"""Environment function.
This is a global function object that can be serialized by its name.
"""
def __call__(self, *args):
return _ffi_api.EnvFuncCall(self, *args)
@property
def func(self):
return _ffi_api.EnvFuncGetPackedFunc(self)
@staticmethod
def get(name):
"""Get a static env function
Parameters
----------
name : str
The name of the function.
"""
return _ffi_api.EnvFuncGet(name)
def load_json(json_str):
"""Load tvm object from json_str.
Parameters
----------
json_str : str
The json string
Returns
-------
node : Object
The loaded tvm node.
"""
try:
return tvm.runtime._ffi_node_api.LoadJSON(json_str)
except tvm.error.TVMError:
json_str = json_compact.upgrade_json(json_str)
return tvm.runtime._ffi_node_api.LoadJSON(json_str)
def save_json(node):
"""Save tvm object as json string.
Parameters
----------
node : Object
A TVM object to be saved.
Returns
-------
json_str : str
Saved json string.
"""
return tvm.runtime._ffi_node_api.SaveJSON(node)
def structural_equal(lhs, rhs, map_free_vars=False):
"""Check structural equality of lhs and rhs.
The structural equality is recursively defined in the DAG of IRNodes.
There are two kinds of nodes:
- Graph node: a graph node in lhs can only be mapped as equal to
one and only one graph node in rhs.
- Normal node: equality is recursively defined without the restriction
of graph nodes.
Vars(tir::Var, TypeVar) and non-constant relay expression nodes are graph nodes.
For example, it means that `%1 = %x + %y; %1 + %1` is not structurally equal
to `%1 = %x + %y; %2 = %x + %y; %1 + %2` in relay.
A var-type node(e.g. tir::Var, TypeVar) can be mapped as equal to another var
with the same type if one of the following condition holds:
- They appear in a same definition point(e.g. function argument).
- They points to the same VarNode via the same_as relation.
- They appear in a same usage point, and map_free_vars is set to be True.
The rules for var are used to remap variables occurs in function
arguments and let-bindings.
Parameters
----------
lhs : Object
The left operand.
rhs : Object
The left operand.
map_free_vars : bool
Whether free variables (i.e. variables without a definition site) should be mapped
as equal to each other.
Return
------
result : bool
The comparison result.
See Also
--------
structural_hash
assert_strucural_equal
"""
lhs = tvm.runtime.convert(lhs)
rhs = tvm.runtime.convert(rhs)
return bool(tvm.runtime._ffi_node_api.StructuralEqual(lhs, rhs, False, map_free_vars))
def get_first_structural_mismatch(lhs, rhs, map_free_vars=False):
"""Like structural_equal(), but returns the ObjectPaths of the first detected mismatch.
Parameters
----------
lhs : Object
The left operand.
rhs : Object
The left operand.
map_free_vars : bool
Whether free variables (i.e. variables without a definition site) should be mapped
as equal to each other.
Returns
-------
mismatch: Optional[Tuple[ObjectPath, ObjectPath]]
`None` if `lhs` and `rhs` are structurally equal.
Otherwise, a tuple of two ObjectPath objects that point to the first detected mismtach.
"""
lhs = tvm.runtime.convert(lhs)
rhs = tvm.runtime.convert(rhs)
mismatch = tvm.runtime._ffi_node_api.GetFirstStructuralMismatch(lhs, rhs, map_free_vars)
if mismatch is None:
return None
else:
return mismatch.lhs_path, mismatch.rhs_path
def assert_structural_equal(lhs, rhs, map_free_vars=False):
"""Assert lhs and rhs are structurally equal to each other.
Parameters
----------
lhs : Object
The left operand.
rhs : Object
The left operand.
map_free_vars : bool
Whether or not shall we map free vars that does
not bound to any definitions as equal to each other.
Raises
------
ValueError : if assertion does not hold.
See Also
--------
structural_equal
"""
lhs = tvm.runtime.convert(lhs)
rhs = tvm.runtime.convert(rhs)
tvm.runtime._ffi_node_api.StructuralEqual(lhs, rhs, True, map_free_vars)
def structural_hash(node, map_free_vars=False):
"""Compute structural hash of node
The structural hash value is recursively defined in the DAG of IRNodes.
There are two kinds of nodes:
- Normal node: the hash value is defined by its content and type only.
- Graph node: each graph node will be assigned a unique index ordered by the
first occurence during the visit. The hash value of a graph node is
combined from the hash values of its contents and the index.
structural_hash is made to be concistent with structural_equal.
If two nodes are structurally equal to each other,
then their structural hash (with the same map_free_vars option)
should be equal to each other as well.
If the structural hash of two nodes equals to each other,
then it is highly likely(except for rare hash value collison cases)
that the two nodes are structurally equal to each other.
Parameters
----------
node : Object
The input to be hashed.
map_free_vars : bool
If map_free_vars is set to true, we will hash free variables
by the order of their occurrences. Otherwise, we will hash by
their in-memory pointer address.
Return
------
result : int
The hash result
See Also
--------
structrual_equal
"""
return tvm.runtime._ffi_node_api.StructuralHash(node, map_free_vars)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/container.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Additional container data structures used across IR variants."""
import tvm._ffi
from tvm.runtime import Object
from tvm.runtime.container import getitem_helper
from tvm.runtime import _ffi_api
@tvm._ffi.register_object("Array")
class Array(Object):
"""Array container of TVM.
You do not need to create Array explicitly.
Normally python list and tuple will be converted automatically
to Array during tvm function call.
You may get Array in return values of TVM function call.
"""
def __getitem__(self, idx):
return getitem_helper(self, _ffi_api.ArrayGetItem, len(self), idx)
def __len__(self):
return _ffi_api.ArraySize(self)
def __dir__(self):
return sorted(dir(self.__class__) + ["type_key"])
def __getattr__(self, name):
if name == "handle":
raise AttributeError("handle is not set")
if name == "type_key":
return super().__getattr__(name)
raise AttributeError("%s has no attribute %s" % (str(type(self)), name))
@tvm._ffi.register_object
class Map(Object):
"""Map container of TVM.
You do not need to create Map explicitly.
Normally python dict will be converted automatically to Map during tvm function call.
You can use convert to create a dict[Object-> Object] into a Map
"""
def __getitem__(self, k):
return _ffi_api.MapGetItem(self, k)
def __contains__(self, k):
return _ffi_api.MapCount(self, k) != 0
def __iter__(self):
akvs = _ffi_api.MapItems(self)
for i in range(len(self)):
yield akvs[i * 2]
def __dir__(self):
return sorted(dir(self.__class__) + ["type_key"])
def __getattr__(self, name):
if name == "handle":
raise AttributeError("handle is not set")
if name == "type_key":
return super().__getattr__(name)
raise AttributeError("%s has no attribute %s" % (str(type(self)), name))
def keys(self):
return iter(self)
def values(self):
akvs = _ffi_api.MapItems(self)
for i in range(len(self)):
yield akvs[i * 2 + 1]
def items(self):
"""Get the items from the map"""
akvs = _ffi_api.MapItems(self)
return [(akvs[i], akvs[i + 1]) for i in range(0, len(akvs), 2)]
def __len__(self):
return _ffi_api.MapSize(self)
def get(self, key, default=None):
"""Get an element with a default value.
Parameters
----------
key : object
The attribute key.
default : object
The default object.
Returns
-------
value: object
The result value.
"""
return self[key] if key in self else default
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/diagnostics/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""
The diagnostic interface to TVM, used for reporting and rendering
diagnostic information by the compiler. This module exposes
three key abstractions: a Diagnostic, the DiagnosticContext,
and the DiagnosticRenderer.
"""
import enum
import tvm._ffi
from . import _ffi_api
from ... import get_global_func, register_func, Object
def get_renderer():
"""
Get the diagnostic renderer.
Returns
-------
renderer: DiagnosticRenderer
"""
return _ffi_api.GetRenderer()
@tvm.register_func("diagnostics.override_renderer")
def override_renderer(render_func):
"""
Sets a custom renderer for diagnostics.
Params
------
render_func: Option[Callable[[DiagnosticContext], None]]
If the render_func is None it will remove the current custom renderer
and return to default behavior.
"""
if render_func:
def _render_factory():
return DiagnosticRenderer(render_func)
register_func("diagnostics.OverrideRenderer", _render_factory, override=True)
else:
_ffi_api.ClearRenderer()
class DiagnosticLevel(enum.IntEnum):
"""The diagnostic level, see diagnostic.h for more details."""
BUG = 10
ERROR = 20
WARNING = 30
NOTE = 40
HELP = 50
@tvm._ffi.register_object("Diagnostic")
class Diagnostic(Object):
"""A single diagnostic object from TVM."""
def __init__(self, level, span, message):
self.__init_handle_by_constructor__(_ffi_api.Diagnostic, level, span, message)
@tvm._ffi.register_object("DiagnosticRenderer")
class DiagnosticRenderer(Object):
"""
A diagnostic renderer, which given a diagnostic context produces a "rendered"
form of the diagnostics for either human or computer consumption.
"""
def __init__(self, render_func):
self.__init_handle_by_constructor__(_ffi_api.DiagnosticRenderer, render_func)
def render(self, ctx):
"""
Render the provided context.
Params
------
ctx: DiagnosticContext
The diagnostic context to render.
"""
return _ffi_api.DiagnosticRendererRender(self, ctx)
# Register the diagnostic context.
@tvm._ffi.register_object("DiagnosticContext")
class DiagnosticContext(Object):
"""
A diagnostic context which records active errors
and contains a renderer.
"""
def __init__(self, module, renderer):
self.__init_handle_by_constructor__(_ffi_api.DiagnosticContext, module, renderer)
def emit(self, diagnostic):
"""Emit a diagnostic."""
_ffi_api.Emit(self, diagnostic)
def render(self):
"""Render the current context using its renderer member."""
_ffi_api.DiagnosticContextRender(self)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/diagnostics/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for TVM diagnostics."""
import tvm._ffi
tvm._ffi._init_api("diagnostics", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/expr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common expressions data structures in the IR."""
import tvm._ffi
from .base import Node
from . import _ffi_api
from ..runtime import const, convert
class BaseExpr(Node):
"""Base class of all the expressions."""
class PrimExpr(BaseExpr):
"""Base class of all primitive expressions.
PrimExpr is used in the low-level code
optimizations and integer analysis.
"""
class RelayExpr(BaseExpr):
"""Base class of all non-primitive expressions."""
@property
def checked_type(self):
"""Get the checked type of tvm.relay.Expr.
Returns
-------
checked_type : tvm.relay.Type
The checked type.
"""
ret = self._checked_type_
if ret is None:
raise ValueError("The type checker has not populated" " the checked_type for this node")
return ret
@tvm._ffi.register_object("GlobalVar")
class GlobalVar(RelayExpr):
"""A global variable in the IR.
GlobalVar is used to refer to the global functions
stored in the IRModule.
Parameters
----------
name_hint: str
The name of the variable.
"""
def __init__(self, name_hint, type_annot=None):
self.__init_handle_by_constructor__(_ffi_api.GlobalVar, name_hint, type_annot)
def __call__(self, *args):
"""Call the global variable.
Parameters
----------
args: List[RelayExpr]
The arguments to the call.
Returns
-------
call: BaseExpr
A call taking the variable as a function.
"""
# pylint: disable=import-outside-toplevel
if all(isinstance(x, RelayExpr) for x in args):
from tvm import relay
return relay.Call(self, args)
arg_types = [type(x) for x in args]
raise RuntimeError(
"Do not know how to handle GlobalVar.__call__ for types {}".format(arg_types)
)
@tvm._ffi.register_object
class Range(Node):
"""Represent a range in TVM.
You do not need to create a Range explicitly.
Python lists and tuples will be converted automatically to a Range in API functions.
Parameters
----------
begin : PrimExpr
The begin value of the range when end is None.
Otherwise it is the length of the range.
end : Optional[PrimExpr]
The end value of the range.
span : Optional[Span]
The location of this itervar in the source code.
Note
----
The constructor creates the range `[begin, end)`
if the end argument is not None. Otherwise, it creates `[0, begin)`.
"""
def __init__(self, begin, end=None, span=None):
if end is None:
end = convert(begin)
begin = const(0, dtype=end.dtype, span=span)
self.__init_handle_by_constructor__(_ffi_api.Range, begin, end, span)
@staticmethod
def from_min_extent(min_value, extent, span=None):
"""Construct a Range by min and extent.
This constructs a range in [min_value, min_value + extent)
Parameters
----------
min_value : PrimExpr
The minimum value of the range.
extent : PrimExpr
The extent of the range.
span : Optional[Span]
The location of this itervar in the source code.
Returns
-------
rng : Range
The constructed range.
"""
return _ffi_api.Range_from_min_extent(min_value, extent, span)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/function.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Function defintiions."""
from enum import IntEnum
import tvm.runtime
from .expr import RelayExpr
from . import _ffi_api
class CallingConv(IntEnum):
"""Possible kinds of calling conventions."""
DEFAULT = 0
C_PACKED_FUNC = 1
DEVICE_KERNEL_LAUNCH = 2
class BaseFunc(RelayExpr):
"""Base class of all functions."""
@property
def attrs(self):
"""Return the attrs member of the function."""
return _ffi_api.BaseFunc_Attrs(self)
def with_attr(self, attr_key_or_dict, attr_value=None):
"""Create a new copy of the function and update the attribute.
Parameters
----------
attr_key_or_dict : Union[str, dict]
The attribute key to use or a dict containing multiple key value pairs.
attr_value : Object
The new attribute value.
Returns
-------
func : Function
A new copy of the function
"""
# make sure we first copy so that we can safely do copy on write
# for multiple updates.
res = _ffi_api.BaseFuncCopy(self)
if isinstance(attr_key_or_dict, dict):
for key, val in attr_key_or_dict.items():
res = _ffi_api.BaseFuncWithAttr(res._move(), key, tvm.runtime.convert(val))
return res
return _ffi_api.BaseFuncWithAttr(
res._move(), attr_key_or_dict, tvm.runtime.convert(attr_value)
)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/instrument.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Common pass instrumentation across IR variants."""
import inspect
import functools
import tvm._ffi
import tvm.runtime
from . import _ffi_instrument_api
@tvm._ffi.register_object("instrument.PassInstrument")
class PassInstrument(tvm.runtime.Object):
"""A pass instrument implementation.
To use, a user class can either subclass from PassInstrument
directly, or can apply the :py:func:`pass_instrument` wrapper. In
either case, the `enter_pass_ctx`, `exit_pass_ctx`, `should_run`,
`run_before_pass`, and `run_after_pass` methods can be defined to
adjust the instrument's behavior. See the no-op implementations
in this class definition for more information on each.
"""
def __init__(self):
# initialize handle in case pi_cls creation failed.
self.handle = None
cls = type(self)
# If the child class declared the method, then use it.
# Otherwise, pass None to avoid a C++ -> Python round trip for
# a no-op.
def get_child_method(name):
if getattr(cls, name) is getattr(PassInstrument, name):
return None
return getattr(self, name)
# Create runtime pass instrument object.
# reister instance's enter_pass_ctx,exit_pass_ctx, should_run, run_before_pass and
# run_after_pass methods to it if present.
self.__init_handle_by_constructor__(
_ffi_instrument_api.PassInstrument,
cls.__name__,
get_child_method("enter_pass_ctx"),
get_child_method("exit_pass_ctx"),
get_child_method("should_run"),
get_child_method("run_before_pass"),
get_child_method("run_after_pass"),
)
def enter_pass_ctx(self):
"""Called when entering the instrumented context.
Returns
-------
None
"""
def exit_pass_ctx(self):
"""Called when exiting the instrumented context.
Returns
-------
None
"""
def should_run(self, mod, info):
"""Determine whether to run the pass or not.
Called once for each pass that is run while the instrumented
context is active.
Parameters
----------
mod : tvm.ir.module.IRModule
The module on which an optimization pass is being run.
info : tvm.transform.PassInfo
The pass information.
Returns
-------
should_run : bool
True to run the pass, or False to skip the pass.
"""
def run_before_pass(self, mod, info):
"""Instrument before the pass runs.
Called once for each pass that is run while the instrumented
context is active.
Parameters
----------
mod : tvm.ir.module.IRModule
The module on which an optimization pass is being run.
info : tvm.transform.PassInfo
The pass information.
Returns
-------
None
"""
def run_after_pass(self, mod, info):
"""Instrument after the pass runs.
Called once for each pass that is run while the instrumented
context is active.
Parameters
----------
mod : tvm.ir.module.IRModule
The module on which an optimization pass is being run.
info : tvm.transform.PassInfo
The pass information.
Returns
-------
None
"""
def _wrap_class_pass_instrument(pi_cls):
"""Wrap a python class as pass instrument"""
# No additional wrapping needed if the user class already
# inherits.
if issubclass(pi_cls, PassInstrument):
return pi_cls
class PyPassInstrument(pi_cls, PassInstrument):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in case pi_cls creation failed.
self.handle = None
pi_cls.__init__(self, *args, **kwargs)
PassInstrument.__init__(self)
functools.update_wrapper(PyPassInstrument.__init__, pi_cls.__init__)
PyPassInstrument.__name__ = pi_cls.__name__
PyPassInstrument.__doc__ = pi_cls.__doc__
PyPassInstrument.__module__ = pi_cls.__module__
return PyPassInstrument
def pass_instrument(pi_cls=None):
"""Decorate a pass instrument.
Parameters
----------
pi_class : class
Instrument class. See example below.
Examples
--------
.. code-block:: python
@tvm.instrument.pass_instrument
class SkipPass:
def __init__(self, skip_pass_name):
self.skip_pass_name = skip_pass_name
# Uncomment to customize
# def enter_pass_ctx(self):
# pass
# Uncomment to customize
# def exit_pass_ctx(self):
# pass
# If pass name contains keyword, skip it by return False. (return True: not skip)
def should_run(self, mod, pass_info)
if self.skip_pass_name in pass_info.name:
return False
return True
# Uncomment to customize
# def run_before_pass(self, mod, pass_info):
# pass
# Uncomment to customize
# def run_after_pass(self, mod, pass_info):
# pass
skip_annotate = SkipPass("AnnotateSpans")
with tvm.transform.PassContext(instruments=[skip_annotate]):
tvm.relay.build(mod, "llvm")
"""
def create_pass_instrument(pi_cls):
if not inspect.isclass(pi_cls):
raise TypeError("pi_cls must be a class")
return _wrap_class_pass_instrument(pi_cls)
if pi_cls:
return create_pass_instrument(pi_cls)
return create_pass_instrument
@tvm._ffi.register_object("instrument.PassInstrument")
class PassTimingInstrument(tvm.runtime.Object):
"""A wrapper to create a passes time instrument that implemented in C++"""
def __init__(self):
self.__init_handle_by_constructor__(_ffi_instrument_api.MakePassTimingInstrument)
@staticmethod
def render():
"""Retrieve rendered time profile result
Returns
-------
string : string
The rendered string result of time profiles
Examples
--------
.. code-block:: python
timing_inst = PassTimingInstrument()
with tvm.transform.PassContext(instruments=[timing_inst]):
relay_mod = relay.transform.InferType()(relay_mod)
relay_mod = relay.transform.FoldScaleAxis()(relay_mod)
# before exiting the context, get profile results.
profiles = timing_inst.render()
"""
return _ffi_instrument_api.RenderTimePassProfiles()
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/json_compact.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tool to upgrade json from historical versions."""
import json
import tvm.ir
import tvm.runtime
def create_updater(node_map, from_ver, to_ver):
"""Create an updater to update json loaded data.
Parameters
----------
node_map : Map[str, Function]
Map from type_key to updating function
from_ver : str
Prefix of version that we can accept,
to_ver : str
The target version.
Returns
-------
fupdater : function
The updater function
"""
def _updater(data):
assert data["attrs"]["tvm_version"].startswith(from_ver)
nodes = data["nodes"]
for idx, item in enumerate(nodes):
f = node_map.get(item["type_key"], None)
if isinstance(f, list):
for fpass in f:
item = fpass(item, nodes)
elif f:
item = f(item, nodes)
nodes[idx] = item
data["attrs"]["tvm_version"] = to_ver
return data
return _updater
def create_updater_08_to_09():
"""
Create an update to upgrade json from v0.8 to v0.9
Returns
-------
fupdater : function
The updater function
"""
def _initialize_virtual_device(item, _):
if "virtual_device_" not in item["attrs"]:
item["attrs"]["virtual_device_"] = "0"
return item
node_map = {
# Base IR
"GlobalVar": _initialize_virtual_device,
"relay.Var": _initialize_virtual_device,
"relay.Function": _initialize_virtual_device,
"relay.Tuple": _initialize_virtual_device,
"relay.Call": _initialize_virtual_device,
"relay.Let": _initialize_virtual_device,
"relay.If": _initialize_virtual_device,
"relay.TupleGetItem": _initialize_virtual_device,
"relay.RefCreate": _initialize_virtual_device,
"relay.RefRead": _initialize_virtual_device,
"relay.RefWrite": _initialize_virtual_device,
"relay.Match": _initialize_virtual_device,
"relay.Constant": _initialize_virtual_device,
}
return create_updater(node_map, "0.8", "0.9")
def create_updater_07_to_08():
"""Create an update to upgrade json from v0.7 to v0.8"""
def _initialize_module_attributes(item, _):
assert item["type_key"] == "IRModule", "Only initialize the attributes for IRModules"
if "attrs" not in item["attrs"]:
item["attrs"]["attrs"] = "0"
return item
node_map = {"IRModule": _initialize_module_attributes}
return create_updater(node_map, "0.7", "0.8")
def create_updater_06_to_07():
"""Create an update to upgrade json from v0.6 to v0.7
Returns
-------
fupdater : function
The updater function
"""
def _ftype_var(item, nodes):
vindex = int(item["attrs"]["var"])
item["attrs"]["name_hint"] = nodes[vindex]["attrs"]["name"]
# set vindex to null
nodes[vindex]["type_key"] = ""
del item["attrs"]["var"]
assert item["type_key"].startswith("relay.")
item["type_key"] = item["type_key"][len("relay.") :]
return item
def _rename(new_name):
def _convert(item, _):
item["type_key"] = new_name
return item
return _convert
def _update_tir_var(new_name):
def _convert(item, _):
item["type_key"] = new_name
item["attrs"]["type_annotation"] = "0"
return item
return _convert
def _update_global_key(item, _):
if "global_key" in item:
item["repr_str"] = item["global_key"]
del item["global_key"]
return item
def _update_from_std_str(key):
def _convert(item, nodes):
str_val = item["attrs"][key]
jdata = json.loads(tvm.ir.save_json(tvm.runtime.String(str_val)))
root_idx = jdata["root"]
val = jdata["nodes"][root_idx]
sidx = len(nodes)
nodes.append(val)
item["attrs"][key] = "%d" % sidx
return item
return _convert
node_map = {
# Base IR
"SourceName": _update_global_key,
"EnvFunc": _update_global_key,
"relay.Op": [_update_global_key, _rename("Op")],
"relay.TypeVar": [_ftype_var, _update_from_std_str("name_hint")],
"TypeVar": _update_from_std_str("name_hint"),
"relay.Id": [_update_from_std_str("name_hint")],
"relay.GlobalTypeVar": [_ftype_var, _update_from_std_str("name_hint")],
"GlobalTypeVar": _update_from_std_str("name_hint"),
"relay.Type": _rename("Type"),
"relay.TupleType": _rename("TupleType"),
"relay.TypeConstraint": _rename("TypeConstraint"),
"relay.FuncType": _rename("FuncType"),
"relay.IncompleteType": _rename("IncompleteType"),
"relay.TypeRelation": _rename("TypeRelation"),
"relay.TypeCall": _rename("TypeCall"),
"relay.Constructor": _update_from_std_str("name_hint"),
"relay.Module": _rename("IRModule"),
"relay.SourceName": _rename("SourceName"),
"relay.Span": _rename("Span"),
"relay.GlobalVar": [_rename("GlobalVar"), _update_from_std_str("name_hint")],
"GlobalVar": _update_from_std_str("name_hint"),
"relay.Pass": _rename("transform.Pass"),
"relay.PassInfo": _rename("transform.PassInfo"),
"relay.PassContext": _rename("transform.PassContext"),
"relay.ModulePass": _rename("transform.ModulePass"),
"relay.Sequential": _rename("transform.Sequential"),
"StrMap": _rename("Map"),
# TIR
"Variable": [_update_tir_var("tir.Var"), _update_from_std_str("name")],
"SizeVar": [_update_tir_var("tir.SizeVar"), _update_from_std_str("name")],
"StringImm": [_rename("tir.StringImm"), _update_from_std_str("value")],
"Cast": _rename("tir.Cast"),
"Add": _rename("tir.Add"),
"Sub": _rename("tir.Sub"),
"Mul": _rename("tir.Mul"),
"Div": _rename("tir.Div"),
"Mod": _rename("tir.Mod"),
"FloorDiv": _rename("tir.FloorDiv"),
"FloorMod": _rename("tir.FloorMod"),
"Min": _rename("tir.Min"),
"Max": _rename("tir.Max"),
"EQ": _rename("tir.EQ"),
"NE": _rename("tir.NE"),
"LT": _rename("tir.LT"),
"LE": _rename("tir.LE"),
"GT": _rename("tir.GT"),
"GE": _rename("tir.GE"),
"And": _rename("tir.And"),
"Or": _rename("tir.Or"),
"Not": _rename("tir.Not"),
"Select": _rename("tir.Select"),
"Load": _rename("tir.Load"),
"BufferLoad": _rename("tir.BufferLoad"),
"Ramp": _rename("tir.Ramp"),
"Broadcast": _rename("tir.Broadcast"),
"Shuffle": _rename("tir.Shuffle"),
"Call": [_rename("tir.Call"), _update_from_std_str("name")],
"Let": _rename("tir.Let"),
"Any": _rename("tir.Any"),
"LetStmt": _rename("tir.LetStmt"),
"AssertStmt": _rename("tir.AssertStmt"),
"Store": _rename("tir.Store"),
"BufferStore": _rename("tir.BufferStore"),
"BufferRealize": _rename("tir.BufferRealize"),
"Allocate": _rename("tir.Allocate"),
"IfThenElse": _rename("tir.IfThenElse"),
"Evaluate": _rename("tir.Evaluate"),
"Prefetch": _rename("tir.Prefetch"),
"AttrStmt": [_rename("tir.AttrStmt"), _update_from_std_str("attr_key")],
"Layout": [_rename("tir.Layout"), _update_from_std_str("name")],
"Buffer": [
_rename("tir.Buffer"),
_update_from_std_str("name"),
_update_from_std_str("scope"),
],
}
return create_updater(node_map, "0.6", "0.7")
def upgrade_json(json_str):
"""Update json from a historical version.
Parameters
----------
json_str : str
A historical json file.
Returns
-------
updated_json : str
The updated version.
"""
data = json.loads(json_str)
from_version = data["attrs"]["tvm_version"]
if from_version.startswith("0.6"):
data = create_updater_08_to_09()(create_updater_07_to_08()(create_updater_06_to_07()(data)))
elif from_version.startswith("0.7"):
data = create_updater_08_to_09()(create_updater_07_to_08()(data))
elif from_version.startswith("0.8"):
data = create_updater_08_to_09()(data)
else:
raise ValueError("Cannot update from version %s" % from_version)
return json.dumps(data, indent=2)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/memory_pools.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Objects for Memory Pools to be used within the compilation"""
from typing import Optional, List
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.runtime import NDArray
from . import _ffi_api
@register_object("ir.PoolInfo")
class PoolInfo(Object):
"""PoolInfo object holds information related to memory pools
where the statically sized allocate nodes will pooled into.
This is a base class for WorkspacePoolInfo and ConstantPoolInfo.
"""
def __init__(self):
pass
@register_object("ir.PoolInfoProperties")
class PoolInfoProperties(Object):
"""PoolInfo object holds information related to memory pools
where the statically sized allocate nodes will pooled into.
Parameters
----------
size_hint_bytes : Optional[int]
The expected size hint to be used by the allocator.
The default value would be -1 which means the pool
is not size restricted.
clock_frequency_hz : Optional[int]
The clock frequency that the memory pool runs at in Hz.
If not specified/known, this will default to -1 indicating
it hasn't been defined.
read_bandwidth_bytes_per_cycle : Optional[int]
The read bandwidth of the memory pool in bytes/cycle.
If not specified/known, this will default to -1 indicating
it hasn't been defined.
write_bandwidth_bytes_per_cycle : Optional[int]
The write bandwidth of the memory pool in bytes/cycle.
If not specified/known, this will default to -1 indicating
it hasn't been defined.
read_latency_cycles : Optional[int]
The read latency of the memory pool in cycles.
If not specified/known, this will default to 0.
write_latency_cycles : Optional[int]
The write latency of the memory pool in cycles.
If not specified/known, this will default to 0.
target_burst_bytes : Optional[Union[Dict[Target, int], None]]
The burst length of the memory pool in bytes per target.
If not specified/known for a given target, a burst length
of 1 byte will be assumed.
"""
def __init__(
self,
size_hint_bytes: Optional[int] = -1,
clock_frequency_hz: Optional[int] = -1,
read_bandwidth_bytes_per_cycle: Optional[int] = -1,
write_bandwidth_bytes_per_cycle: Optional[int] = -1,
read_latency_cycles: Optional[int] = 0,
write_latency_cycles: Optional[int] = 0,
target_burst_bytes=None,
):
if not target_burst_bytes:
target_burst_bytes = dict()
self.__init_handle_by_constructor__(
_ffi_api.PoolInfoProperties, # type: ignore # pylint: disable=no-member
size_hint_bytes,
clock_frequency_hz,
read_bandwidth_bytes_per_cycle,
write_bandwidth_bytes_per_cycle,
read_latency_cycles,
write_latency_cycles,
target_burst_bytes,
)
@register_object("ir.ConstantInfo")
class ConstantInfo(Object):
"""ConstantInfo object hold information on a constant pool.
Parameters
----------
name_hint : str
Name of the constant.
byte_offset : int
The byte_offset of the constant.
data : NDArray
The data of the constant.
"""
def __init__(
self,
name_hint: str,
byte_offset: int,
data: NDArray,
):
self.__init_handle_by_constructor__(
_ffi_api.ConstantInfo, # type: ignore # pylint: disable=no-member
name_hint,
byte_offset,
data,
)
@register_object("ir.WorkspacePoolInfo")
class WorkspacePoolInfo(PoolInfo):
"""WorkspacePoolInfo object holds information related to RW memory pools
where the statically sized allocate nodes will pooled into.
Parameters
----------
pool_name : str
The name of the memory pool
targets : list[Target]
A list of targets which could access the pool
pool_info_properties : PoolInfoProperties
The properties of the pool.
"""
def __init__(
self,
pool_name: str,
targets,
pool_info_properties=None,
):
super().__init__()
if pool_info_properties is None:
pool_info_properties = PoolInfoProperties()
self.__init_handle_by_constructor__(
_ffi_api.WorkspacePoolInfo, # type: ignore # pylint: disable=no-member
pool_name,
targets,
pool_info_properties,
)
@register_object("ir.ConstantPoolInfo")
class ConstantPoolInfo(PoolInfo):
"""ConstantPoolInfo object holds information related to RO memory pools
where the statically sized allocate nodes are pooled into.
Parameters
----------
pool_name : str
The name of the memory pool
targets : list[Target]
describes which targets could access the pool
pool_info_properties : PoolInfoProperties
The properties of the pool.
"""
def __init__(
self,
pool_name: str,
targets, # list[Target]
constant_info_arr=None, # list[ConstantInfo]
pool_info_properties=None,
):
super().__init__()
if constant_info_arr is None:
constant_info_arr = []
if pool_info_properties is None:
pool_info_properties = PoolInfoProperties()
self.__init_handle_by_constructor__(
_ffi_api.ConstantPoolInfo, # type: ignore # pylint: disable=no-member
pool_name,
targets,
constant_info_arr,
pool_info_properties,
)
@register_object("ir.WorkspaceMemoryPools")
class WorkspaceMemoryPools(Object):
"""This object contains a list of WorkspacePoolInfo objects to be used as
workspace memory in the compilation
Parameters
----------
pools : List[WorkspacePoolInfo]
The list of ConstantPoolInfo objects to be used with the compilation
"""
def __init__(
self,
pools: List[WorkspacePoolInfo],
):
self.__init_handle_by_constructor__(
_ffi_api.WorkspaceMemoryPools, pools # type: ignore # pylint: disable=no-member
)
@register_object("ir.ConstantMemoryPools")
class ConstantMemoryPools(Object):
"""This object contains a list of ConstantPoolInfo objects to be used as
read-only memory in the compilation
Parameters
----------
pools : List[ConstantPoolInfo]
The list of ConstantPoolInfo objects to be used with the compilation
"""
def __init__(
self,
pools: List[ConstantPoolInfo],
):
self.__init_handle_by_constructor__(
_ffi_api.ConstantMemoryPools, pools # type: ignore # pylint: disable=no-member
)
@register_object("ir.ConstantMemoryPools")
class AllocatedPoolInfo(Object):
"""Allocate memory in a given pool.
Parameters
----------
pool : PoolInfo
The pool in which to allocate memory.
allocated_size : int
The size of memory to allocate.
"""
def __init__(
self,
pool: PoolInfo,
allocated_size: int,
pool_var_idx: int = 0,
):
self.__init_handle_by_constructor__(
_ffi_api.AllocatedPoolInfo, pool, allocated_size, pool_var_idx # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""IRModule that holds the functions and type definitions."""
from typing import Optional
from tvm._ffi.base import string_types
import tvm._ffi
from .base import Node
from . import expr as _expr
from . import type as _ty
from . import _ffi_api
@tvm._ffi.register_object("IRModule")
class IRModule(Node):
"""IRModule that holds functions and type definitions.
IRModule is the basic unit for all IR transformations across the stack.
Parameters
----------
functions: Optional[dict].
Map of global var to BaseFunc
"""
def __init__(self, functions=None, type_definitions=None):
if functions is None:
functions = {}
elif isinstance(functions, dict):
mapped_funcs = {}
for k, v in functions.items():
if isinstance(k, string_types):
k = _expr.GlobalVar(k)
if not isinstance(k, _expr.GlobalVar):
raise TypeError("Expect functions to be Dict[GlobalVar, Function]")
mapped_funcs[k] = v
functions = mapped_funcs
if type_definitions is None:
type_definitions = {}
elif isinstance(type_definitions, dict):
mapped_type_defs = {}
for k, v in type_definitions.items():
if isinstance(k, string_types):
k = _ty.GlobalTypeVar(k)
if not isinstance(k, _ty.GlobalTypeVar):
raise TypeError("Expect type_definitions to be Dict[GlobalTypeVar, Type]")
mapped_type_defs[k] = v
type_definitions = mapped_type_defs
self.__init_handle_by_constructor__(_ffi_api.IRModule, functions, type_definitions)
def __setitem__(self, var, val):
"""Add a mapping to the module.
Parameters
---------
var: GlobalVar
The global variable.
val: Union[Function, Type]
The value.
"""
return self._add(var, val, True)
def _add(self, var, val, update=True):
if isinstance(val, _expr.RelayExpr):
if isinstance(var, string_types):
if _ffi_api.Module_ContainGlobalVar(self, var):
var = _ffi_api.Module_GetGlobalVar(self, var)
else:
var = _expr.GlobalVar(var)
_ffi_api.Module_Add(self, var, val, update)
else:
assert isinstance(val, _ty.Type)
if isinstance(var, string_types):
var = _ty.GlobalTypeVar(var)
_ffi_api.Module_AddDef(self, var, val, update)
def __getitem__(self, var):
"""Lookup a global definition by name or by variable.
Parameters
----------
var: Union[String, GlobalVar, GlobalTypeVar]
The name or global variable.
Returns
-------
val: Union[Function, Type]
The definition referenced by :code:`var` (either a function or type).
"""
if isinstance(var, string_types):
return _ffi_api.Module_Lookup_str(self, var)
if isinstance(var, _expr.GlobalVar):
return _ffi_api.Module_Lookup(self, var)
return _ffi_api.Module_LookupDef(self, var)
def update(self, other):
"""Insert functions in another Module to current one.
Parameters
----------
other: IRModule
The module to merge into the current Module.
"""
if isinstance(other, dict):
other = IRModule(other)
return _ffi_api.Module_Update(self, other)
def update_func(self, var, func):
"""Update the function corresponding to a global variable in the
module.
Parameters
----------
var: GlobalVar
The global variable.
func: tvm.relay.Function
The function to be inserted.
"""
return _ffi_api.Module_UpdateFunction(self, var, func)
def get_global_var(self, name):
"""Get a global variable in the function by name.
Parameters
----------
name: str
The name of the global variable.
Returns
-------
global_var: GlobalVar
The global variable mapped to :code:`name`.
Raises
------
tvm.error.TVMError if we cannot find corresponding global var.
"""
return _ffi_api.Module_GetGlobalVar(self, name)
def get_global_vars(self):
"""Collect all global vars defined in this module.
Returns
-------
global_vars: Array[GlobalVar]
An array of global vars.
"""
return _ffi_api.Module_GetGlobalVars(self)
def get_global_type_vars(self):
"""Collect all global type vars defined in this module.
Returns
-------
global_type_vars: Array[GlobalTypeVar]
An array of global type vars.
"""
return _ffi_api.Module_GetGlobalTypeVars(self)
def get_global_type_var(self, name):
"""Get a global type variable in the function by name.
Parameters
----------
name: str
The name of the global type variable.
Returns
-------
global_type_var: GlobalTypeVar
The global variable mapped to :code:`name`.
Raises
------
tvm.error.TVMError if we cannot find corresponding global type var.
"""
return _ffi_api.Module_GetGlobalTypeVar(self, name)
def get_constructor(self, tag):
"""Look up an ADT constructor by tag.
Parameters
----------
tag: int
The tag for a constructor.
Returns
-------
constructor: Constructor
The constructor associated with the given tag,
Raises
------
tvm.error.TVMError if the corresponding constructor cannot be found.
"""
return _ffi_api.Module_LookupTag(self, tag)
def get_type(self, name):
ty_var = self.get_global_type_var(name)
ty_data = self.type_definitions[ty_var]
return tuple([ty_var] + list(ty_data.constructors))
@staticmethod
def from_expr(expr, functions=None, type_defs=None):
"""Construct a module from a standalone expression.
Parameters
----------
expr: RelayExpr
The starting expression
global_funcs: Optional[dict]
Map of global vars to function definitions
type_defs: Optional[dict]
Map of global type vars to type definitions
Returns
-------
mod: Module
A module containing the passed definitions,
where expr is set as the entry point
(wrapped in a function if necessary)
"""
funcs = functions if functions is not None else {}
defs = type_defs if type_defs is not None else {}
return _ffi_api.Module_FromExpr(expr, funcs, defs)
def _import(self, file_to_import):
return _ffi_api.Module_Import(self, file_to_import)
def import_from_std(self, file_to_import):
# TODO(@jroesch): clean up prelude
_ffi_api.Module_ImportFromStd(self, file_to_import)
return tvm.relay.transform.InferType()(self)
def __str__(self):
return _ffi_api.PrettyPrint(self)
def __repr__(self):
return self.astext()
def script(self, tir_prefix: str = "T", show_meta: bool = False) -> str:
"""Print IRModule into TVMScript
Parameters
----------
tir_prefix : str
The tir namespace prefix
show_meta : bool
Whether to show meta information
Returns
-------
script : str
The TVM Script of the IRModule
"""
return tvm._ffi.get_global_func("script.AsTVMScript")(
self, tir_prefix, show_meta
) # type: ignore
def show(self, style: Optional[str] = None) -> None:
"""
A sugar for print highlighted TVM script.
Parameters
----------
style : str, optional
Pygments styles extended by "light" (default) and "dark", by default "light"
"""
from tvm.script.highlight import cprint # pylint: disable=import-outside-toplevel
# Use deferred import to avoid circular import while keeping cprint under tvm/script
cprint(self, style=style)
def get_attr(self, attr_key):
"""Get the IRModule attribute.
Parameters
----------
attr_key : str
The attribute key.
Returns
-------
attr_value : Any
Attribute value
"""
return _ffi_api.Module_GetAttr(self, attr_key)
def with_attr(self, attr_key, attr_value):
"""Copy the IRModule and add an attribute to it.
Parameters
----------
attr_key : str
The attribute key.
attr_value : Object
The new attribute value.
Returns
-------
mod : IRModule
A new copy of the IRModule with the attribute
"""
return _ffi_api.Module_WithAttr(self, attr_key, attr_value)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Primitive operators in the TVM IR."""
import tvm._ffi
from .expr import RelayExpr
from . import _ffi_api
@tvm._ffi.register_object("Op")
class Op(RelayExpr):
"""Primitive operator in the IR."""
def __init__(self):
raise RuntimeError("Cannot create op, use get instead")
@staticmethod
def get(op_name):
"""Get the Op for a given name
Parameters
----------
op_name : str
The operator name
Returns
-------
op : Op
The op of the corresponding name
"""
return _ffi_api.GetOp(op_name)
def get_attr(self, attr_name):
"""Get additional attribute about the operator.
Parameters
----------
attr_name : str
The attribute name.
Returns
-------
value : object
The attribute value
"""
return _ffi_api.OpGetAttr(self, attr_name)
def has_attr(self, attr_name):
"""Check whether the operator has additional attribute.
Parameters
----------
attr_name : str
The attribute name.
Returns
-------
value : bool
Whether the operator has additional attribute
"""
return _ffi_api.OpHasAttr(self, attr_name)
def set_attr(self, attr_name, value, plevel=10):
"""Set attribute about the operator.
Parameters
----------
attr_name : str
The attribute name
value : object
The attribute value
plevel : int
The priority level
"""
_ffi_api.OpSetAttr(self, attr_name, value, plevel)
def reset_attr(self, attr_name):
"""Reset attribute about the operator.
Parameters
----------
attr_name : str
The attribute name
"""
_ffi_api.OpResetAttr(self, attr_name)
def add_type_rel(self, rel_name, type_rel_func=None):
"""Attach the type function corresponding to the return type.
Parameters
----------
rel_name : str
The type relation name to register.
type_rel_func : Optional[function (args: List[Type], attrs: Attrs) -> Type]
The backing relation function which can solve an arbitrary relation on variables.
Differences with type_rel_func in C++:
1) When type_rel_func is not None
a) OpAddTypeRel on C++ side will adjust type_rel_func with TypeReporter to
calling convention of relay type system.
b) type_rel_func returns output argument's type, return None means can't
infer output's type.
c) only support single output operators for now, the last argument is output tensor.
2) when type_rel_func is None, will call predefined type_rel_funcs in relay
according to ``tvm.relay.type_relation.`` + rel_name.
"""
_ffi_api.OpAddTypeRel(self, rel_name, type_rel_func)
def add_argument(self, name, type, description): # pylint: disable=redefined-builtin
"""Add arguments information to the function.
Parameters
----------
name : str
The argument name.
type : str
The argument type.
description : str
The argument description.
"""
_ffi_api.OpAddArgument(self, name, type, description)
def set_support_level(self, level):
"""Set the support level of op.
Parameters
----------
level : int
The support level.
"""
_ffi_api.OpSetSupportLevel(self, level)
def set_num_inputs(self, n):
"""Set the support level of op.
Parameters
----------
n : int
The input number.
"""
_ffi_api.OpSetNumInputs(self, n)
def set_attrs_type_key(self, key):
"""Set the attribute type key of op.
Parameters
----------
key : str
The type key.
"""
_ffi_api.OpSetAttrsTypeKey(self, key)
@staticmethod
def list_op_names():
"""List all the op names in the op registry.
Returns
-------
value : List[str]
The registered op names
"""
return _ffi_api.ListOpNames()
def register_op_attr(op_name, attr_key, value=None, level=10):
"""Register an operator property of an operator by name.
Parameters
----------
op_name : str
The name of operator
attr_key : str
The attribute name.
value : object, optional
The value to set
level : int, optional
The priority level
Returns
-------
fregister : function
Register function if value is not specified.
"""
def _register(v):
"""internal register function"""
_ffi_api.RegisterOpAttr(op_name, attr_key, v, level)
return v
return _register(value) if value is not None else _register
def register_intrin_lowering(
op_name,
target,
*,
f=None,
level=10,
):
"""Register Op lowering function
Parameters
----------
op_name : str
The op name
target : str
The target string for given intrinsic lowering function
f : function, optional
The function to be registered.
level : int
The priority level
Returns
-------
fregister : function
Register op lowering function if f is not specified.
"""
def _register(f):
"""internal register function"""
_ffi_api.RegisterOpLowerIntrinsic(op_name, f, target, level)
return f
return _register(f) if f is not None else _register
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/supply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Suppliers that are used to guarantee uniqueness of names and GlobalVars."""
import tvm
from tvm import Object, IRModule
from . import _ffi_api
@tvm._ffi.register_object("NameSupply")
class NameSupply(Object):
"""NameSupply that can be used to generate unique names.
Parameters
----------
prefix: The prefix to be added to the generated names.
"""
def __init__(self, prefix=""):
self.__init_handle_by_constructor__(_ffi_api.NameSupply, prefix)
def fresh_name(self, name, add_prefix=True):
"""Generates a unique name from this NameSupply.
Parameters
----------
name: String
The name from which the generated name is derived.
add_prefix: bool
If set to true, then the prefix of this NameSupply will be prepended to the name.
"""
return _ffi_api.NameSupply_FreshName(self, name, add_prefix)
def reserve_name(self, name, add_prefix=True):
"""Reserves an existing name with this NameSupply.
Parameters
----------
name: String
The name to be reserved.
add_prefix: bool
If set to true, then the prefix of this NameSupply will be prepended to the name
before reserving it.
"""
return _ffi_api.NameSupply_ReserveName(self, name, add_prefix)
def contains_name(self, name, add_prefix=True):
"""Checks if this NameSupply already generated a name.
Parameters
----------
name: String
The name to check.
add_prefix: bool
If set to true, then the prefix of this NameSupply will be prepended to the name
before checking for it.
"""
return _ffi_api.NameSupply_ContainsName(self, name, add_prefix)
@tvm._ffi.register_object("GlobalVarSupply")
class GlobalVarSupply(Object):
"""GlobalVarSupply that holds a mapping between names and GlobalVars.
GlobalVarSupply can be used to generate new GlobalVars with a unique name.
It also can be used to retrieve previously generated GlobalVars based on a name.
Parameters
----------
value: Union[List[IRModule], IRModule, NameSupply]
The IRModules used to build this GlobalVarSupply or a NameSupply.
"""
def __init__(self, value=None):
if value is None:
name_supply = NameSupply("")
self.__init_handle_by_constructor__(_ffi_api.GlobalVarSupply_NameSupply, name_supply)
elif isinstance(value, NameSupply):
self.__init_handle_by_constructor__(_ffi_api.GlobalVarSupply_NameSupply, value)
elif isinstance(value, (list, tvm.container.Array)):
self.__init_handle_by_constructor__(_ffi_api.GlobalVarSupply_IRModules, value)
elif isinstance(value, IRModule):
self.__init_handle_by_constructor__(_ffi_api.GlobalVarSupply_IRModule, value)
def fresh_global(self, name, add_prefix=True):
"""Generates a unique GlobalVar from this supply.
Parameters
----------
name: String
The name from which the name of the GlobalVar is derived.
add_prefix: bool
If set to true, then the prefix of the contained NameSupply will be prepended
to the name.
"""
return _ffi_api.GlobalVarSupply_FreshGlobal(self, name, add_prefix)
def unique_global_for(self, name, add_prefix=True):
"""Looks up for a GlobalVar with the given name in this supply. If no entry is found
, creates one, places it in the cache and returns it.
Parameters
----------
name: String
The name of the GlobalVar to search for.
add_prefix: bool
If set to true, the prefix of the contained NameSupply will be prepended to the
name before performing the search.
"""
return _ffi_api.GlobalVarSupply_UniqueGlobalFor(self, name, add_prefix)
def reserve_global(self, global_var, allow_conflict=False):
"""Reserves an existing GlobalVar with this supply.
Parameters
----------
global_var: GlobalVar
The GlobalVar to be registered.
allow_conflict: bool
Allow conflict with other GlobalVars that have the same name
"""
return _ffi_api.GlobalVarSupply_ReserveGlobalVar(self, global_var, allow_conflict)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/tensor_type.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Type relation and function for type checking."""
import tvm._ffi
from .type import Type
from . import _ffi_api
@tvm._ffi.register_object("relay.TensorType")
class TensorType(Type):
"""A concrete TensorType in Relay.
This is the type assigned to tensors with a known dtype and shape.
For example, a tensor of `float32` and `(5, 5)`.
Parameters
----------
shape : List[tvm.ir.PrimExpr]
The shape of the Tensor
dtype : Optional[str]
The content data type.
"""
def __init__(self, shape, dtype="float32"):
self.__init_handle_by_constructor__(_ffi_api.TensorType, shape, dtype)
@property
def concrete_shape(self):
"""Get shape of the type as concrete tuple of int.
Returns
-------
shape : List[int]
The concrete shape of the Type.
Raises
------
TypeError : If the shape is symbolic
"""
return tuple(int(x) for x in self.shape)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Common pass infrastructure across IR variants."""
import types
import inspect
import functools
import tvm._ffi
import tvm.runtime
from . import _ffi_transform_api
@tvm._ffi.register_object("transform.PassInfo")
class PassInfo(tvm.runtime.Object):
"""The class contains the meta data required by a pass. It is the
container of information needed by running an optimization or analysis.
This class can be extended by adding new members when more meta data is
needed.
Parameters
----------
opt_level : int
The optimization level of this pass.
name : str
The pass name.
required : List[str]
The list of passes that are required by a certain pass.
"""
def __init__(self, opt_level, name, required=None):
self.__init_handle_by_constructor__(_ffi_transform_api.PassInfo, opt_level, name, required)
@tvm._ffi.register_object("transform.PassContext")
class PassContext(tvm.runtime.Object):
"""The basis where a Relay optimization/analysis runs on.
Each pass context contains a number of auxiliary information that is used
to help an optimization pass. Such information includes the error reporter
to record the errors of during the optimization, etc.
opt_level : Optional[int]
The optimization level of this pass.
required_pass : Optional[Union[List[str], Set[str], Tuple[str]]]
The list of passes that are required by a certain pass.
disabled_pass : Optional[Union[List[str], Set[str], Tuple[str]]]
The list of passes that are disabled.
instruments : Optional[Sequence[PassInstrument]]
The list of pass instrument implementations.
config : Optional[Dict[str, Object]]
Additional configurations for specific passes.
"""
def __init__(
self,
opt_level=2,
required_pass=None,
disabled_pass=None,
instruments=None,
config=None,
):
required = list(required_pass) if required_pass else []
if not isinstance(required, (list, tuple)):
raise TypeError("required_pass is expected to be the type of " + "list/tuple/set.")
disabled = list(disabled_pass) if disabled_pass else []
if not isinstance(disabled, (list, tuple)):
raise TypeError("disabled_pass is expected to be the type of " + "list/tuple/set.")
instruments = list(instruments) if instruments else []
if not isinstance(instruments, (list, tuple)):
raise TypeError("instruments is expected to be the type of " + "list/tuple/set.")
config = config if config else None
self.__init_handle_by_constructor__(
_ffi_transform_api.PassContext, opt_level, required, disabled, instruments, config
)
def __enter__(self):
_ffi_transform_api.EnterPassContext(self)
return self
def __exit__(self, ptype, value, trace):
_ffi_transform_api.ExitPassContext(self)
def override_instruments(self, instruments):
"""Override instruments within this PassContext.
If there are existing instruments, their ``exit_pass_ctx`` callbacks are called.
Then switching to new instruments and calling new ``enter_pass_ctx`` callbacks.
instruments : Sequence[PassInstrument]
The list of pass instrument implementations.
"""
_ffi_transform_api.OverrideInstruments(self, instruments)
@staticmethod
def current():
"""Return the current pass context."""
return _ffi_transform_api.GetCurrentPassContext()
@staticmethod
def list_configs():
"""List all registered `PassContext` configuration names and metadata.
Returns
-------
configs : Dict[str, Dict[str, str]]
"""
return _ffi_transform_api.ListConfigs()
@tvm._ffi.register_object("transform.Pass")
class Pass(tvm.runtime.Object):
"""The base class of all passes. All methods here are just simple wrappers
that are implemented in the backend. They are defined for users to
conveniently interact with the base class.
"""
@property
def info(self):
"""Get the pass meta."""
return _ffi_transform_api.Info(self)
def __call__(self, mod):
"""Execute the pass. Note that for sequential pass, the dependency among
different passes will be resolved in the backend.
Parameters
----------
mod : tvm.IRModule
The module that a certain optimization is performed on.
Returns
-------
mod : tvm.IRModule
The updated module after applying this pass.
"""
return _ffi_transform_api.RunPass(self, mod)
@tvm._ffi.register_object("transform.ModulePass")
class ModulePass(Pass):
"""A pass that works on tvm.IRModule. Users don't need to interact with
this class directly. Instead, a module pass should be created through
`module_pass`, because the design of the `module_pass` API is flexible
enough to handle the creation of a module pass in different manners. In
addition, all members of a module pass can be accessed from the base class.
The same rule applies to FunctionPass as well.
"""
@tvm._ffi.register_object("transform.Sequential")
class Sequential(Pass):
"""A pass that works on a sequence of pass objects. Multiple passes can be
executed sequentially using this class.
Note that users can also provide a series of passes that they don't want to
apply when running a sequential pass. Pass dependency will be resolved in
the backend as well.
Parameters
----------
passes : Optional[List[Pass]]
A sequence of passes candidate for optimization.
opt_level : Optional[int]
The optimization level of this sequential pass.
The opt_level of a default sequential pass is set to 0.
Note that some of the passes within the Sequantial may still not be executed
if their opt_level is higher than the provided opt_level.
name : Optional[str]
The name of the sequential pass.
required : Optional[List[str]]
The list of passes that the sequential pass is dependent on.
"""
def __init__(self, passes=None, opt_level=0, name="sequential", required=None):
passes = passes if passes else []
if not isinstance(passes, (list, tuple)):
raise TypeError("passes must be a list of Pass objects.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of list/tuple.")
self.__init_handle_by_constructor__(
_ffi_transform_api.Sequential, passes, opt_level, name, required
)
def _wrap_class_module_pass(pass_cls, pass_info):
"""Wrap a python class as function pass"""
class PyModulePass(ModulePass):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in cass pass_cls creation failed.fg
self.handle = None
inst = pass_cls(*args, **kwargs)
# it is important not to capture self to
# avoid a cyclic dependency
def _pass_func(mod, ctx):
return inst.transform_module(mod, ctx)
self.__init_handle_by_constructor__(
_ffi_transform_api.MakeModulePass, _pass_func, pass_info
)
self._inst = inst
def __getattr__(self, name):
# fall back to instance attribute if there is not any
return self._inst.__getattribute__(name)
functools.update_wrapper(PyModulePass.__init__, pass_cls.__init__)
PyModulePass.__name__ = pass_cls.__name__
PyModulePass.__doc__ = pass_cls.__doc__
PyModulePass.__module__ = pass_cls.__module__
return PyModulePass
def module_pass(pass_func=None, opt_level=None, name=None, required=None):
"""Decorate a module pass.
This function returns a callback when pass_func is provided.
Otherwise, it serves a decorator function.
pass_func can also be a class type with a method transform_module.
This function will create a decorated ModulePass using transform_module
as the pass function.
Parameters
----------
pass_func : Optional[Callable[(Module, PassContext) ->Module]]
The transformation function or class.
opt_level : int
The optimization level of this module pass.
name : Optional[str]
The name of the module pass. The name could be empty. In this case, the
name of the optimization function will be used as the pass name.
required : Optional[List[str]]
The list of passes that the module pass is dependent on.
Returns
-------
create_module_pass : Union[Callable, ModulePass]
A decorator will be returned if pass_func is not provided,
otherwise return the decorated result.
The returned decorator has two behaviors depending on the input:
A new ModulePass will be returned when we decorate a pass function.
A new ModulePass class will be returned when we decorate a class type.
Examples
--------
The following code block decorates a module pass class.
.. code-block:: python
@relay.transform.module_pass
class CustomPipeline:
def __init__(self, enable_fold):
self.enable_fold = enable_fold
self.cse = relay.transform.EliminateCommonSubexpr()
self.const_fold = relay.transform.FoldConstant()
def transform_module(self, mod, ctx):
mod = self.cse(mod, ctx)
if self.enable_fold:
mod = self.const_fold(mod, ctx)
return mod
# create an instance of customized pipeline
pipeline = CustomPipeline(enable_fold=False)
assert isinstance(pipeline, transform.ModulePass)
# run the pipeline.
output_module = pipeline(input_module)
The following code creates a module pass by decorating
a user defined transform function.
.. code-block:: python
@relay.transform.module_pass(opt_level=2)
def transform(mod, ctx):
tp = relay.TensorType((10,), "float32")
x = relay.var("x", tp)
gv = relay.GlobalVar("var")
func = relay.Function([x], relay.abs(x))
new_mod = tvm.IRModule({gv: func})
new_mod.update(mod)
return new_mod
module_pass = transform
assert isinstance(module_pass, transform.ModulePass)
assert module_pass.info.opt_level == 2
# Given a module m, the optimization could be invoked as the follwoing:
updated_mod = module_pass(m)
# Now a function abs should be added to the module m.
"""
if opt_level is None:
raise ValueError("Please provide opt_level for the module pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " + "list/tuple.")
def create_module_pass(pass_arg):
"""Internal function that creates a module pass"""
fname = name if name else pass_arg.__name__
info = PassInfo(opt_level, fname, required)
if inspect.isclass(pass_arg):
return _wrap_class_module_pass(pass_arg, info)
if not isinstance(pass_arg, (types.FunctionType, types.LambdaType)):
raise TypeError("pass_func must be a callable for Module pass")
return _ffi_transform_api.MakeModulePass(pass_arg, info)
if pass_func:
return create_module_pass(pass_func)
return create_module_pass
def PrintIR(header="", show_meta_data=False):
"""A special trace pass that prints the header and IR.
Parameters
----------
header : str
The header to be displayed along with the dump.
show_meta_data : bool
A boolean flag to indicate if meta data should be printed.
Returns
--------
The pass
"""
return _ffi_transform_api.PrintIR(header, show_meta_data)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/type.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unified type system in the project."""
from enum import IntEnum
import tvm
import tvm._ffi
from .base import Node
from . import _ffi_api
class Type(Node):
"""The base class of all types."""
def __eq__(self, other):
"""Compare two types for structural equivalence."""
return bool(tvm.ir.structural_equal(self, other))
def __ne__(self, other):
return not self.__eq__(other)
def same_as(self, other):
"""Compares two Relay types by referential equality."""
return super().__eq__(other)
class TypeKind(IntEnum):
"""Possible kinds of TypeVars."""
Type = 0
ShapeVar = 1
BaseType = 2
Constraint = 4
AdtHandle = 5
TypeData = 6
@tvm._ffi.register_object("PrimType")
class PrimType(Type):
"""Primitive data type in the low level IR
Parameters
----------
dtype : str
The runtime data type relates to the primtype.
"""
def __init__(self, dtype):
self.__init_handle_by_constructor__(_ffi_api.PrimType, dtype)
@tvm._ffi.register_object("PointerType")
class PointerType(Type):
"""PointerType used in the low-level TIR.
Parameters
----------
element_type : tvm.ir.Type
The type of pointer's element.
storage_scope : str
The storage scope into which the pointer addresses.
"""
def __init__(self, element_type, storage_scope=""):
self.__init_handle_by_constructor__(_ffi_api.PointerType, element_type, storage_scope)
@tvm._ffi.register_object("TypeVar")
class TypeVar(Type):
"""Type parameter in functions.
A type variable represents a type placeholder which will
be filled in later on. This allows the user to write
functions which are generic over types.
Parameters
----------
name_hint: str
The name of the type variable. This name only acts as a hint, and
is not used for equality.
kind : Optional[TypeKind]
The kind of the type parameter.
"""
def __init__(self, name_hint, kind=TypeKind.Type):
self.__init_handle_by_constructor__(_ffi_api.TypeVar, name_hint, kind)
def __call__(self, *args):
"""Create a type call from this type.
Parameters
----------
args: List[Type]
The arguments to the type call.
Returns
-------
call: Type
The result type call.
"""
# pylint: disable=import-outside-toplevel
from .type_relation import TypeCall
return TypeCall(self, args)
@tvm._ffi.register_object("GlobalTypeVar")
class GlobalTypeVar(Type):
"""A global type variable that is used for defining new types or type aliases.
Parameters
----------
name_hint: str
The name of the type variable. This name only acts as a hint, and
is not used for equality.
kind : Optional[TypeKind]
The kind of the type parameter.
"""
def __init__(self, name_hint, kind=TypeKind.AdtHandle):
self.__init_handle_by_constructor__(_ffi_api.GlobalTypeVar, name_hint, kind)
def __call__(self, *args):
"""Create a type call from this type.
Parameters
----------
args: List[Type]
The arguments to the type call.
Returns
-------
call: Type
The result type call.
"""
# pylint: disable=import-outside-toplevel
from .type_relation import TypeCall
return TypeCall(self, args)
@tvm._ffi.register_object("TupleType")
class TupleType(Type):
"""The type of tuple values.
Parameters
----------
fields : List[Type]
The fields in the tuple
"""
def __init__(self, fields):
self.__init_handle_by_constructor__(_ffi_api.TupleType, fields)
@tvm._ffi.register_object("TypeConstraint")
class TypeConstraint(Type):
"""Abstract class representing a type constraint."""
@tvm._ffi.register_object("FuncType")
class FuncType(Type):
"""Function type.
A function type consists of a list of type parameters to enable
the definition of generic functions,
a set of type constraints which we omit for the time being,
a sequence of argument types, and a return type.
We can informally write them as:
`forall (type_params), (arg_types) -> ret_type where type_constraints`
Parameters
----------
arg_types : List[tvm.relay.Type]
The argument types
ret_type : tvm.relay.Type
The return type.
type_params : Optional[List[tvm.relay.TypeVar]]
The type parameters
type_constraints : Optional[List[tvm.relay.TypeConstraint]]
The type constraints.
"""
def __init__(self, arg_types, ret_type, type_params=None, type_constraints=None):
if type_params is None:
type_params = []
if type_constraints is None:
type_constraints = []
self.__init_handle_by_constructor__(
_ffi_api.FuncType, arg_types, ret_type, type_params, type_constraints
)
@tvm._ffi.register_object("IncompleteType")
class IncompleteType(Type):
"""Incomplete type during type inference.
kind : Optional[TypeKind]
The kind of the incomplete type.
"""
def __init__(self, kind=TypeKind.Type):
self.__init_handle_by_constructor__(_ffi_api.IncompleteType, kind)
@tvm._ffi.register_object("relay.RefType")
class RelayRefType(Type):
"""Reference Type in relay.
Parameters
----------
value: Type
The value type.
"""
def __init__(self, value):
self.__init_handle_by_constructor__(_ffi_api.RelayRefType, value)
| https://github.com/zk-ml/tachikoma |
python/tvm/ir/type_relation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Type relation and function for type checking."""
import tvm._ffi
from .type import Type, TypeConstraint
from . import _ffi_api
@tvm._ffi.register_object("TypeCall")
class TypeCall(Type):
"""Type function application.
Parameters
----------
func: tvm.ir.Type
The function.
args: List[tvm.ir.Type]
The arguments.
Returns
-------
type_call: TypeCall
The type function application.
"""
def __init__(self, func, args):
self.__init_handle_by_constructor__(_ffi_api.TypeCall, func, args)
@tvm._ffi.register_object("TypeRelation")
class TypeRelation(TypeConstraint):
"""User defined type relation, it is an input-output relation on types.
TypeRelation is more generalized than TypeCall as it allows inference
of both inputs and outputs.
Parameters
----------
func : EnvFunc
User defined relation function.
args : [tvm.ir.Type]
List of types to the func.
num_inputs : int
Number of input arguments in args,
this act as a hint for type inference.
attrs : Attrs
The attribute attached to the relation information
Returns
-------
type_relation : tvm.ir.TypeRelation
The type relation.
"""
def __init__(self, func, args, num_inputs, attrs):
self.__init_handle_by_constructor__(_ffi_api.TypeRelation, func, args, num_inputs, attrs)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Package `tvm.meta_schedule`. The meta schedule infrastructure."""
from . import (
arg_info,
builder,
cost_model,
database,
feature_extractor,
measure_callback,
mutator,
postproc,
relay_integration,
runner,
schedule,
schedule_rule,
search_strategy,
space_generator,
tir_integration,
trace_apply,
)
from .builder import Builder
from .cost_model import CostModel
from .database import Database
from .extracted_task import ExtractedTask
from .feature_extractor import FeatureExtractor
from .measure_callback import MeasureCallback
from .mutator import Mutator
from .postproc import Postproc
from .profiler import Profiler
from .relay_integration import is_meta_schedule_enabled
from .runner import Runner
from .schedule_rule import ScheduleRule
from .search_strategy import MeasureCandidate, SearchStrategy
from .space_generator import SpaceGenerator
from .task_scheduler import TaskScheduler
from .tir_integration import tune_tir
from .tune import tune_tasks
from .tune_context import TuneContext
from .utils import derived_object
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.meta_schedule"""
from .._ffi import _init_api
_init_api("meta_schedule", __name__) # pylint: disable=protected-access
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/arg_info.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The argument information"""
from typing import Any, List, Union
from tvm._ffi import register_object
from tvm.ir import IRModule
from tvm.runtime import DataType, Object, ShapeTuple
from tvm.tir import PrimFunc
from . import _ffi_api
from .utils import _json_de_tvm
@register_object("meta_schedule.ArgInfo")
class ArgInfo(Object):
"""Argument information"""
def as_json(self) -> Any:
"""Converts the ArgInfo to its corresponding JSON representation."""
return _json_de_tvm(_ffi_api.ArgInfoAsJSON(self)) # type: ignore # pylint: disable=no-member
@staticmethod
def from_json(json_obj: Any) -> "ArgInfo":
"""Parse the argument information from a JSON object.
Parameters
----------
json_obj : Any
The json object to parse.
Returns
-------
parsed : ArgInfo
The argument information parsed.
"""
return _ffi_api.ArgInfoFromJSON(json_obj) # type: ignore # pylint: disable=no-member
@staticmethod
def from_prim_func(func: PrimFunc) -> List["ArgInfo"]:
"""Extract a list of the argument information from PrimFunc.
Parameters
----------
func : PrimFunc
The PrimFunc to get argument information from.
Returns
-------
extracted : List[ArgInfo]
An array of the argument information derived.
"""
return _ffi_api.ArgInfoFromPrimFunc(func) # type: ignore # pylint: disable=no-member
@staticmethod
def from_entry_func(mod: IRModule, remove_preproc: bool = True) -> List["ArgInfo"]:
"""Extract a list of the argument information from the entry func of an IRModule.
Parameters
----------
mod : IRModule
The IRModule to get argument information from.
remove_preproc : bool
Whether to remove the preprocessing blocks.
Returns
-------
extracted : List[ArgInfo]
An array of the argument information derived.
"""
return _ffi_api.ArgInfoFromEntryFunc(mod, remove_preproc) # type: ignore # pylint: disable=no-member
@register_object("meta_schedule.TensorInfo")
class TensorInfo(ArgInfo):
"""Tensor argument information
Parameters
----------
dtype : DataType
The data type of the tensor.
shape : ShapeTuple
The shape of the tensor.
"""
dtype: DataType
shape: ShapeTuple
def __init__(
self,
dtype: DataType,
shape: Union[ShapeTuple, List[int]],
) -> None:
"""Constructor
Parameters
----------
dtype : DataType
The data type of the tensor.
shape : ShapeTuple
The shape of the tensor.
"""
if isinstance(shape, ShapeTuple):
shape_tuple = shape
else:
shape_tuple = ShapeTuple(shape)
self.__init_handle_by_constructor__(
_ffi_api.TensorInfo, # type: ignore # pylint: disable=no-member
dtype,
shape_tuple,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/builder/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.builder package.
Meta Schedule builders that translate IRModule to runtime.Module,
and then export
"""
from .builder import Builder, BuilderInput, BuilderResult, PyBuilder, create
from .local_builder import LocalBuilder
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/builder/builder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule builders that translate IRModule to runtime.Module, and then export"""
from typing import Callable, Dict, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.ir import IRModule
from tvm.runtime import NDArray, Object
from tvm.target import Target
from .. import _ffi_api
@register_object("meta_schedule.BuilderInput")
class BuilderInput(Object):
"""The builder's input.
Parameters
----------
mod : IRModule
The IRModule to be built.
target : Target
The target to be built for.
params: Optional[Dict[str, NDArray]]
The parameters for Relay build module
"""
mod: IRModule
target: Target
params: Optional[Dict[str, NDArray]]
def __init__(
self,
mod: IRModule,
target: Target,
params: Optional[Dict[str, NDArray]] = None,
) -> None:
"""Constructor.
Parameters
----------
mod : IRModule
The IRModule to be built.
target : Target
The target to be built for.
params: Optional[Dict[str, NDArray]]
The parameters for Relay build module
"""
self.__init_handle_by_constructor__(
_ffi_api.BuilderInput, # type: ignore # pylint: disable=no-member
mod,
target,
params,
)
@register_object("meta_schedule.BuilderResult")
class BuilderResult(Object):
"""The builder's result.
Parameters
----------
artifact_path : Optional[str]
The path to the artifact.
error_msg : Optional[str]
The error message.
"""
artifact_path: Optional[str]
error_msg: Optional[str]
def __init__(
self,
artifact_path: Optional[str],
error_msg: Optional[str],
) -> None:
"""Constructor.
Parameters
----------
artifact_path : Optional[str]
The path to the artifact.
error_msg : Optional[str]
The error message.
"""
self.__init_handle_by_constructor__(
_ffi_api.BuilderResult, # type: ignore # pylint: disable=no-member
artifact_path,
error_msg,
)
@register_object("meta_schedule.Builder")
class Builder(Object):
"""The abstract builder interface."""
BuilderType = Union["Builder", Literal["local"]]
def build(self, build_inputs: List[BuilderInput]) -> List[BuilderResult]:
"""Build the given inputs.
Parameters
----------
build_inputs : List[BuilderInput]
The inputs to be built.
Returns
-------
build_results : List[BuilderResult]
The results of building the given inputs.
"""
return _ffi_api.BuilderBuild(self, build_inputs) # type: ignore # pylint: disable=no-member
@staticmethod
def create( # pylint: disable=keyword-arg-before-vararg
kind: Literal["local"] = "local",
*args,
**kwargs,
) -> "Builder":
"""Create a Builder.
Parameters
----------
kind : Literal["local"]
The kind of the builder. For now, only "local" is supported.
Returns
-------
builder : Builder
The builder created.
"""
from . import LocalBuilder # pylint: disable=import-outside-toplevel
if kind == "local":
return LocalBuilder(*args, **kwargs) # type: ignore
raise ValueError(f"Unknown Builder: {kind}")
create = Builder.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyBuilder")
class _PyBuilder(Builder):
"""
A TVM object builder to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyBuilder
"""
def __init__(self, f_build: Callable = None):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.BuilderPyBuilder, # type: ignore # pylint: disable=no-member
f_build,
)
class PyBuilder:
"""
An abstract builder with customized build method on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {"cls": _PyBuilder, "methods": ["build"]}
def build(self, build_inputs: List[BuilderInput]) -> List[BuilderResult]:
"""Build the given inputs.
Parameters
----------
build_inputs : List[BuilderInput]
The inputs to be built.
Returns
-------
build_results : List[BuilderResult]
The results of building the given inputs.
"""
raise NotImplementedError
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/builder/local_builder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Local builder that compile on the local host"""
import os
import tempfile
from typing import Callable, Dict, List, Optional, Union
from tvm._ffi import register_func
from tvm.ir import IRModule
from tvm.runtime import Module, NDArray, load_param_dict, save_param_dict
from tvm.target import Target
from ...contrib.popen_pool import MapResult, PopenPoolExecutor, StatusKind
from ..logging import get_logger
from ..utils import cpu_count, derived_object, get_global_func_with_default_on_worker
from .builder import BuilderInput, BuilderResult, PyBuilder
logger = get_logger(__name__) # pylint: disable=invalid-name
T_BUILD = Callable[ # pylint: disable=invalid-name
[IRModule, Target, Optional[Dict[str, NDArray]]], Module
]
T_EXPORT = Callable[[Module], str] # pylint: disable=invalid-name
def _serialize_params(params: Optional[Dict[str, NDArray]]) -> Optional[bytearray]:
if params is None:
return None
return save_param_dict(params)
def _deserialize_params(params: Optional[bytearray]) -> Optional[Dict[str, NDArray]]:
if params is None:
return None
return load_param_dict(params)
@derived_object
class LocalBuilder(PyBuilder):
"""A builder that builds the given input on local host.
Parameters
----------
pool : PopenPoolExecutor
The process pool to run the build.
max_workers: int
The max number of Popen workers.
timeout_sec : float
The timeout in seconds for the build.
initializer: Optional[Callable[[], None]]
The initializer function for each popen worker.
f_build : Union[None, str, T_BUILD]
Name of the build function to be used.
Defaults to `meta_schedule.builder.default_build`.
f_export : Union[None, str, T_EXPORT]
Name of the export function to be used.
Defaults to `meta_schedule.builder.default_export`.
Attributes
----------
T_BUILD : typing._GenericAlias
The signature of the function `f_build`, which is
.. code-block:: python
def default_build(
mod: IRModule,
target: Target,
params: Optional[Dict[str, NDArray]]
) -> Module:
...
T_EXPORT : typing._GenericAlias
The signature of the function `f_export`, which is
.. code-block:: python
def default_export(mod: Module) -> str:
...
Note
----
The build function and export function should be registered in the worker process.
The worker process is only aware of functions registered in TVM package,
if there are extra functions to be registered,
please send the registration logic via initializer.
"""
max_workers: int
timeout_sec: float
initializer: Optional[Callable[[], None]]
f_build: Union[None, str, T_BUILD]
f_export: Union[None, str, T_EXPORT]
def __init__(
self,
*,
max_workers: Optional[int] = None,
timeout_sec: float = 30.0,
f_build: Union[None, str, T_BUILD] = None,
f_export: Union[None, str, T_EXPORT] = None,
initializer: Optional[Callable[[], None]] = None,
) -> None:
"""Constructor.
Parameters
----------
max_workers : Optional[int]
The maximum number of worker processes to be used.
Defaults to number of CPUs.
timeout_sec : float
The timeout in seconds for the build.
f_build : T_BUILD
Name of the build function to be used.
Defaults to `meta_schedule.builder.default_build`.
f_export : T_EXPORT
Name of the export function to be used.
Defaults to `meta_schedule.builder.default_export`.
initializer : Optional[Callable[[], None]]
The initializer to be used for the worker processes.
"""
super().__init__()
if max_workers is None:
max_workers = cpu_count(logical=True)
logger.info("LocalBuilder: max_workers = %d", max_workers)
self.max_workers = max_workers
self.timeout_sec = timeout_sec
self.initializer = initializer
self.f_build = f_build
self.f_export = f_export
self._sanity_check()
def build(self, build_inputs: List[BuilderInput]) -> List[BuilderResult]:
results: List[BuilderResult] = []
map_result: MapResult
# Here we restart the PopenPool everytime because of a known memory leak issue with the
# PopenPool workers after a couple times of usage. We don't apply the same to runners to
# avoid potential problem caused by async behaviour.
pool = PopenPoolExecutor(
max_workers=self.max_workers,
timeout=self.timeout_sec,
initializer=self.initializer,
)
# Dispatch the build inputs to the worker processes.
for map_result in pool.map_with_error_catching(
lambda x: _worker_func(*x),
[
(
self.f_build,
self.f_export,
build_input.mod,
build_input.target,
_serialize_params(build_input.params),
)
for build_input in build_inputs
],
):
if map_result.status == StatusKind.COMPLETE:
results.append(BuilderResult(map_result.value, None))
elif map_result.status == StatusKind.TIMEOUT:
results.append(
BuilderResult(
None,
f"LocalBuilder: Timeout, killed after {self.timeout_sec} seconds",
)
)
elif map_result.status == StatusKind.EXCEPTION:
results.append(
BuilderResult(
None,
"LocalBuilder: An exception occurred\n" + str(map_result.value),
)
)
else:
raise ValueError("Unreachable: unexpected result: {map_result}")
del pool
return results
def _sanity_check(self) -> None:
def _check(f_build, f_export) -> None:
get_global_func_with_default_on_worker(name=f_build, default=None)
get_global_func_with_default_on_worker(name=f_export, default=None)
# Same reason for the single use PopenPool as mentioned above
pool = PopenPoolExecutor(
max_workers=self.max_workers,
timeout=self.timeout_sec,
initializer=self.initializer,
)
value = pool.submit(_check, self.f_build, self.f_export)
value.result()
del pool
def _worker_func(
_f_build: Union[None, str, T_BUILD],
_f_export: Union[None, str, T_EXPORT],
mod: IRModule,
target: Target,
params: Optional[bytearray],
) -> str:
# Step 0. Get the registered functions
f_build: T_BUILD = get_global_func_with_default_on_worker(
_f_build,
default_build,
)
f_export: T_EXPORT = get_global_func_with_default_on_worker(
_f_export,
default_export,
)
# Step 1. Build the IRModule
rt_mod: Module = f_build(mod, target, _deserialize_params(params))
# Step 2. Export the Module
artifact_path: str = f_export(rt_mod)
return artifact_path
@register_func("meta_schedule.builder.default_build")
def default_build(mod: IRModule, target: Target, _params: Optional[Dict[str, NDArray]]) -> Module:
"""Default build function.
Parameters
----------
mod : IRModule
The IRModule to be built.
target : Target
The target to be built.
_params : Optional[Dict[str, NDArray]]
The parameters to be used for the build. Must be None.
Returns
-------
rt_mod : Module
The built Module.
"""
# pylint: disable=import-outside-toplevel
from tvm.driver import build as tvm_build
from tvm.tir.transform import RemoveWeightLayoutRewriteBlock
# pylint: enable=import-outside-toplevel
mod = RemoveWeightLayoutRewriteBlock(skip_ndarray_rewrite=True)(mod)
return tvm_build(mod, target=target)
@register_func("meta_schedule.builder.default_export")
def default_export(mod: Module) -> str:
"""Default export function.
Parameters
----------
mod : Module
The Module to be exported.
Returns
-------
artifact_path : str
The path to the exported Module.
"""
from tvm.contrib.tar import tar # pylint: disable=import-outside-toplevel
artifact_path = os.path.join(tempfile.mkdtemp(), "tvm_tmp_mod." + tar.output_format)
mod.export_library(artifact_path, tar)
return artifact_path
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/cost_model/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.cost_model package.
"""
from .cost_model import CostModel, PyCostModel
from .random_model import RandomModel
from .xgb_model import XGBModel
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/cost_model/cost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule CostModel."""
import ctypes
from typing import Callable, List, Union
# isort: off
from typing_extensions import Literal
# isort: on
import numpy as np # type: ignore
from tvm._ffi import register_object
from tvm.runtime import Object
from .. import _ffi_api
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
from ..utils import _get_default_str
@register_object("meta_schedule.CostModel")
class CostModel(Object):
"""Cost model."""
CostModelType = Union["CostModel", Literal["xgb", "mlp", "random"]]
def load(self, path: str) -> None:
"""Load the cost model from given file location.
Parameters
----------
path : str
The file path.
"""
_ffi_api.CostModelLoad(self, path) # type: ignore # pylint: disable=no-member
def save(self, path: str) -> None:
"""Save the cost model to given file location.
Parameters
----------
path : str
The file path.
"""
_ffi_api.CostModelSave(self, path) # type: ignore # pylint: disable=no-member
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the cost model given running results.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
_ffi_api.CostModelUpdate(self, context, candidates, results) # type: ignore # pylint: disable=no-member
def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:
"""Predict normalized score with the cost model.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted normalized score.
"""
n = len(candidates)
results = np.zeros(shape=(n,), dtype="float64")
_ffi_api.CostModelPredict( # type: ignore # pylint: disable=no-member
self,
context,
candidates,
results.ctypes.data_as(ctypes.c_void_p),
)
return results
@staticmethod
def create(
kind: Literal["xgb", "mlp", "random", "none"],
*args,
**kwargs,
) -> "CostModel":
"""Create a CostModel.
Parameters
----------
kind : Literal["xgb", "mlp", "random", "none"]
The kind of the cost model. Can be "xgb", "mlp", "random" or "none".
Returns
-------
cost_model : CostModel
The created cost model.
"""
from . import RandomModel, XGBModel # pylint: disable=import-outside-toplevel
if kind == "xgb":
return XGBModel(*args, **kwargs) # type: ignore
if kind == "random":
return RandomModel(*args, **kwargs) # type: ignore
if kind == "mlp":
from .mlp_model import ( # type: ignore # pylint: disable=import-outside-toplevel
MLPModel,
)
return MLPModel(*args, **kwargs) # type: ignore
if kind == "none":
return None # no cost model required
raise ValueError(f"Unknown CostModel: {kind}")
create = CostModel.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyCostModel")
class _PyCostModel(CostModel):
"""
A TVM object cost model to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyCostModel
"""
def __init__(
self,
f_load: Callable = None,
f_save: Callable = None,
f_update: Callable = None,
predict_func: Callable = None,
f_as_string: Callable = None,
):
"""Constructor."""
def f_predict(context: TuneContext, candidates: List[MeasureCandidate], return_ptr) -> None:
n = len(candidates)
return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_double))
array_wrapper = np.ctypeslib.as_array(return_ptr, shape=(n,))
res = predict_func(context, candidates)
array_wrapper[:] = res
assert (
array_wrapper.dtype == "float64"
), "ValueError: Invalid data type returned from CostModel Predict!"
self.__init_handle_by_constructor__(
_ffi_api.CostModelPyCostModel, # type: ignore # pylint: disable=no-member
f_load,
f_save,
f_update,
f_predict,
f_as_string,
)
class PyCostModel:
"""
An abstract cost model with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyCostModel,
"methods": ["load", "save", "update", "predict", "__str__"],
}
def load(self, path: str) -> None:
"""Load the cost model from given file location.
Parameters
----------
path : str
The file path.
"""
raise NotImplementedError
def save(self, path: str) -> None:
"""Save the cost model to given file location.
Parameters
----------
path : str
The file path.
"""
raise NotImplementedError
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the cost model given running results.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
raise NotImplementedError
def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:
"""Predict given the measure candidates.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted normalized score.
"""
raise NotImplementedError
def __str__(self) -> str:
"""Get the cost model as string with name.
Return
------
result : str
Get the cost model as string with name.
"""
return _get_default_str(self)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/cost_model/metric.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Cost model metrics for meta schedule"""
import numpy as np # type: ignore
def max_curve(trial_scores: np.ndarray) -> np.ndarray:
"""f(n) = max([s[i] fo i < n])
Parameters
----------
trial_scores : List[float]
the score of i-th trial
Returns
-------
curve : np.ndarray
A vector, the max-curve function values
"""
ret = np.empty(len(trial_scores))
keep = -1e9
for i, score in enumerate(trial_scores):
keep = max(keep, score)
ret[i] = keep
return ret
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/cost_model/mlp_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# type: ignore[import]
"""
Segment Sum MLP cost model
"""
import glob
import math
import os
import random
import tempfile
from collections import OrderedDict
from itertools import chain as itertools_chain
from typing import Dict, List, NamedTuple, Optional, Tuple
import numpy as np # type: ignore
import torch # type: ignore
import tvm
from ...contrib.tar import tar, untar
from ...runtime import NDArray
from ...target import Target
from ..cost_model import PyCostModel
from ..database import JSONDatabase
from ..feature_extractor import FeatureExtractor, PerStoreFeature
from ..logging import get_logger
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
from ..utils import derived_object, shash2hex
logger = get_logger("mlp_model") # pylint: disable=invalid-name
# pylint: disable=no-member,import-outside-toplevel
class SegmentSumMLPConfig(NamedTuple):
"""SegmentSum MLP model configuration
Parameters
----------
input_dim : int
The input dim for the model.
hidden_dim : int
The hidden dim for the model.
output_dim : int
The output dim for the model.
use_norm : bool
Whether to normalize the segment sum or not.
use_sigmoid : bool
Whether to use sigmoid on the final output or not.
"""
input_dim: int = 172
hidden_dim: int = 256
output_dim: int = 1
use_norm: bool = False
use_sigmoid: bool = False
def to_dict(self): # pylint: disable=missing-function-docstring
return {
"input_dim": self.input_dim,
"hidden_dim": self.hidden_dim,
"output_dim": self.output_dim,
"use_norm": self.use_norm,
"use_sigmoid": self.use_sigmoid,
}
class TrainerConfig(NamedTuple):
"""Trainer configuration
Parameters
----------
batch_size : int
The batch size.
learning rate : float
The learning rate.
weight decay : float
The weight decay.
num_epoch_full : int
The number of epochs used in full training.
num_epoch_incremental : int
The number of epochs used in incremental training.
grad_clip_norm: float
The norm of gradient clipping.
train_verbose: int
The verbose frequency for training in batches.
test_interval: int
The testing interval in epochs.
test_split: float
The fraction of data for testing.
frozen: bool
Determine whether to re-train the model or not.
"""
batch_size: int = 128
learning_rate: float = 7e-4
weight_decay: float = 1e-6
num_epoch_full: int = 50
num_epoch_incremental: int = 5
grad_clip_norm: float = 0.5
train_verbose: int = 1000
test_interval: int = 1
test_split: float = 0.2
frozen: bool = False
def to_dict(self): # pylint: disable=missing-function-docstring
return {
"batch_size": self.batch_size,
"learning_rate": self.learning_rate,
"weight_decay": self.weight_decay,
"num_epoch_full": self.num_epoch_full,
"num_epoch_incremental": self.num_epoch_incremental,
"grad_clip_norm": self.grad_clip_norm,
"train_verbose": self.train_verbose,
"test_interval": self.test_interval,
"test_split": self.test_split,
"frozen": self.frozen,
}
# pylint: disable=too-few-public-methods
class FeatureGroup:
"""Feature group
Parameters
----------
group_hash : str
The hash of the group
features : List[np.ndarray]
The features
costs : List[float]
The costs
min_cost : float
The minimum cost
"""
group_hash: str
features: List[np.ndarray]
costs: np.ndarray
min_cost: float
def __init__(
self,
group_hash: str,
features: List[np.ndarray],
costs: np.ndarray,
) -> None:
self.group_hash = group_hash
self.features = features
self.costs = costs
self.min_cost = np.min(costs)
def append( # pylint: disable=missing-function-docstring
self,
features: List[np.ndarray],
costs: np.ndarray,
) -> None:
self.features.extend(features)
self.costs = np.append(self.costs, costs)
self.min_cost = np.min(self.costs)
# pylint: disable=too-many-instance-attributes
class SegmentDataLoader:
"""Dataloader for Segment Sum MLP model.
Parameters
----------
features : List[np.ndarray]
The features
results : np.ndarray
The measured results, can be None.
batch_size : int
The batch size
shuffle : bool
Whether to shuffle the dataset or not
"""
def __init__(
self,
features,
results=None,
batch_size=128,
shuffle=True,
):
self.batch_size = batch_size
self.shuffle = shuffle
self.data_size = len(features)
# flatten features and store the starting indices
self.segment_sizes = torch.tensor([len(feature) for feature in features], dtype=torch.int32)
self.feature_offsets = (
torch.cumsum(self.segment_sizes, 0, dtype=torch.int32) - self.segment_sizes
)
features = torch.cat([torch.tensor(feature) for feature in features])
norm, _ = features.max(dim=0)
norm[norm == 0] = 1
self.features = features / norm
self.results = torch.tensor(results) if results is not None else None
self.iter_order = self.pointer = None
def __len__(self):
return self.data_size
def __iter__(self):
if self.shuffle:
self.iter_order = torch.randperm(self.data_size)
else:
self.iter_order = torch.arange(self.data_size)
self.pointer = 0
return self
def __next__(self):
if self.pointer >= self.data_size:
raise StopIteration
batch_indices = self.iter_order[self.pointer : self.pointer + self.batch_size]
self.pointer += self.batch_size
return self._fetch_indices(batch_indices)
def _fetch_indices(self, indices):
segment_sizes, feature_offsets = self.segment_sizes[indices], self.feature_offsets[indices]
feature_indices = torch.empty(segment_sizes.sum(), dtype=torch.int32)
idx = 0
for offset, seg_size in zip(feature_offsets, segment_sizes):
feature_indices[idx : idx + seg_size] = torch.arange(offset, offset + seg_size)
idx += seg_size
features = self.features[feature_indices.long()]
results = None
if self.results is not None:
results = self.results[indices.long()]
return segment_sizes, features, results
def lambda_rank_loss( # pylint: disable=too-many-locals
preds: "torch.Tensor",
labels: "torch.Tensor",
k: int = None,
eps: float = 1e-10,
sigma: float = 1.0,
) -> "torch.Tensor":
"""
LambdaLoss: Metric-Driven Loss for Learning-to-Rank
Parameters
----------
preds : Tensor
The predicted runtime for each candidate.
labels : Tensor
The measured runtime for each candidate.
k : int
Loss for top k.
Default is None, which means computing all scores.
eps : float
The minimum value to the denominator and argument of log if they reach 0.
sigma : float
The scaling factor to the input of the sigmoid function.
Returns
-------
loss : Tensor
The lambda rank loss.
"""
device = preds.device
y_pred, y_true = preds[None, :], labels[None, :]
y_pred_sorted, indices_pred = y_pred.sort(descending=True, dim=-1)
y_true_sorted, _ = y_true.sort(descending=True, dim=-1)
true_sorted_by_preds = torch.gather(y_true, dim=1, index=indices_pred)
true_diffs = true_sorted_by_preds[:, :, None] - true_sorted_by_preds[:, None, :]
padded_pairs_mask = torch.isfinite(true_diffs) & (true_diffs > 0)
ndcg_at_k_mask = torch.zeros(
(y_pred.shape[1], y_pred.shape[1]), dtype=torch.bool, device=device
)
ndcg_at_k_mask[:k, :k] = 1
true_sorted_by_preds.clamp_(min=0.0)
y_true_sorted.clamp_(min=0.0)
pos_idxs = torch.arange(1, y_pred.shape[1] + 1).to(device)
D = torch.log2(1.0 + pos_idxs.float())[None, :] # pylint: disable=invalid-name
maxDCGs = torch.sum( # pylint: disable=invalid-name
((torch.pow(2, y_true_sorted) - 1) / D)[:, :k], dim=-1
).clamp(min=eps)
G = (torch.pow(2, true_sorted_by_preds) - 1) / maxDCGs[:, None] # pylint: disable=invalid-name
weights = torch.abs(
torch.pow(D[:, :, None], -1.0) - torch.pow(D[:, None, :], -1.0)
) * torch.abs(G[:, :, None] - G[:, None, :])
scores_diffs = (y_pred_sorted[:, :, None] - y_pred_sorted[:, None, :]).clamp(min=-1e8, max=1e8)
scores_diffs[torch.isnan(scores_diffs)] = 0.0
weighted_probs = (torch.sigmoid(sigma * scores_diffs).clamp(min=eps) ** weights).clamp(min=eps)
losses = torch.log2(weighted_probs)
masked_losses = losses[padded_pairs_mask & ndcg_at_k_mask]
loss = -torch.sum(masked_losses)
return loss
def topk_score(
pred_results: "torch.Tensor",
gt_results: "torch.Tensor",
k: int,
) -> float:
"""
Evaluate the top-k score
Parameters
----------
pred_results: Tensor
The raw prediction
gt_results: Tensor
The measured results
k : int
The k in top k score
Returns
-------
score : float
The top-k score
"""
k = min(k, len(pred_results))
topk_indices = torch.topk(pred_results, k, largest=False).indices
score = gt_results.min() / gt_results[topk_indices].min()
return score.item()
class SegmentSumMLP(torch.nn.Module):
"""Segment Sum MLP model.
Parameters
----------
input_dim : int
The input dim for the model.
hidden_dim : int
The hidden dim for the model.
output_dim : int
The output dim for the model.
use_norm : bool
Whether to normalize the segment sum or not.
use_sigmoid : bool
Whether to use sigmoid on the final output or not.
"""
input_dim: int
hidden_dim: int
output_dim: int
use_norm: bool
use_sigmoid: bool
def __init__( # pylint: disable=too-many-arguments
self,
input_dim: int = 172,
hidden_dim: int = 256,
output_dim: int = 1,
use_norm: bool = False,
use_sigmoid: bool = False,
):
from torch import nn # type: ignore
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
)
self.norm = nn.BatchNorm1d(hidden_dim) if use_norm else nn.Identity()
self.layer0 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
)
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
)
self.decoder = nn.Linear(hidden_dim, output_dim)
self.sigmoid = nn.Sigmoid() if use_sigmoid else nn.Identity()
def forward( # pylint: disable=missing-function-docstring
self,
segment_sizes: "torch.Tensor",
features: "torch.Tensor",
) -> "torch.Tensor":
n_seg = len(segment_sizes)
encoded_features = self.encoder(features)
segment_indices = torch.repeat_interleave(
torch.arange(n_seg, device=features.device),
segment_sizes.long(),
)
n_dim = encoded_features.shape[1]
segment_sum = torch.scatter_add(
input=torch.zeros((n_seg, n_dim), dtype=encoded_features.dtype, device=features.device),
dim=0,
index=segment_indices.view(-1, 1).expand(-1, n_dim),
src=encoded_features,
)
out = self.norm(segment_sum)
out = self.layer0(out) + out
out = self.layer1(out) + out
out = self.decoder(out).squeeze()
out = self.sigmoid(out)
return out
def extract_features(
context: TuneContext,
candidates: List[MeasureCandidate],
results: Optional[List[RunnerResult]] = None,
extractor: Optional[FeatureExtractor] = None,
):
"""Extract feature vectors and compute mean costs.
Parameters
----------
context: TuneContext
The tuning context.
candidates: List[MeasureCandidate]
The measure candidates.
results: Optional[List[RunnerResult]]
The measured results, can be None if used in prediction.
extractor: Optional[FeatureExtractor]
The feature extractor.
Returns
-------
new_features: List[np.ndarray]
The extracted features.
new_mean_costs: np.ndarray
The mean costs.
"""
extractor = extractor or PerStoreFeature(extract_workload=True)
def _feature(feature: NDArray) -> np.ndarray:
return feature.numpy().astype("float32")
def _mean_cost(res: RunnerResult) -> float:
if not res.run_secs:
return 1e10
return float(np.median([float(s) for s in res.run_secs]))
new_features = [_feature(x) for x in extractor.extract_from(context, candidates)]
new_mean_costs = (
np.array([_mean_cost(x) for x in results]).astype("float32")
if results is not None
else None
)
return new_features, new_mean_costs
class State:
"""State of the trainer
Parameters
----------
model: SegmentSumMLP
The cost model.
data: Dict[str, FeatureGroup]
The data groups.
data_size: int
The size of all data.
untrained_size: int
The size of the untrained data.
"""
model: SegmentSumMLP
data: Dict[str, FeatureGroup]
data_size: int
untrained_size: int
def __init__(
self,
model_config: Optional[SegmentSumMLPConfig] = None,
extractor: Optional[FeatureExtractor] = None,
):
model_config = model_config or SegmentSumMLPConfig()
extractor = extractor or PerStoreFeature(extract_workload=True)
self.model = SegmentSumMLP(**model_config.to_dict())
self.data = OrderedDict()
self.data_size = 0
self.untrained_size = 0
self.extractor = extractor
def load( # pylint: disable=too-many-locals
self,
path: str,
target: str = "nvidia/nvidia-v100",
) -> None:
"""Load the cached model, cached features, or raw data.
Parameters
----------
path: str
The path to the tar file containing cached model, cached features,
or raw data.
target: str
The target for the tuning context.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = os.path.join(tmp_dir, "model.pth")
cache_path = os.path.join(tmp_dir, "cached_data.npy")
raw_path = os.path.join(tmp_dir, "raw_data")
untar(path, tmp_dir)
if os.path.exists(model_path):
self.model.load_state_dict(torch.load(model_path))
if os.path.exists(cache_path):
for group_hash, features, costs in np.load(cache_path, allow_pickle=True):
self.data[group_hash] = FeatureGroup(
group_hash=group_hash,
features=list(features),
costs=costs,
)
self.data_size += len(costs)
self.untrained_size += len(costs)
elif os.path.exists(raw_path):
from tqdm import tqdm # type: ignore
model_dirs = glob.glob(os.path.join(raw_path, "*"))
workload_paths = []
for model_dir in model_dirs:
json_files = glob.glob(os.path.join(model_dir, "*.json"))
for json_file in json_files:
if json_file.endswith("_workload.json"):
workload_paths.append(json_file)
for workload_path in tqdm(workload_paths):
try:
database = JSONDatabase(
path_workload=workload_path,
path_tuning_record=workload_path.replace(
"_workload.json", "_candidates.json"
),
)
except tvm._ffi.base.TVMError: # pylint: disable=protected-access
continue
candidates, results = [], []
tuning_records = database.get_all_tuning_records()
if len(tuning_records) == 0:
continue
for record in tuning_records:
candidates.append(record.as_measure_candidate())
results.append(RunnerResult(run_secs=record.run_secs, error_msg=None))
assert len(candidates) == len(results)
context = TuneContext(mod=tuning_records[0].workload.mod, target=Target(target))
features, mean_costs = extract_features(
context, candidates, results, self.extractor
)
self.add_to_group(features, mean_costs, shash2hex(context.mod))
def save(self, path: str) -> None:
"""Cache the model and data.
Parameters
----------
path: str
The path to the cached tar file.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = os.path.join(tmp_dir, "model.pth")
cache_path = os.path.join(tmp_dir, "cached_data.npy")
torch.save(self.model.state_dict(), model_path)
data = [
(
g.group_hash,
g.features,
g.costs,
)
for g in self.data.values()
]
np.save(
file=cache_path,
arr=np.array(data, dtype=object),
)
tar(path, [x for x in [model_path, cache_path] if x is not None])
logger.info("Saved MLPModel to %s", path)
def add_to_group(
self,
features: List[np.ndarray],
costs: np.ndarray,
group_hash: str,
):
"""Add features and costs to the data groups with key group_hash.
Parameters
----------
features: List[np.ndarray]
The feature vectors.
costs: np.ndarray
The measured results.
group_hash: str
The structural hash of the candidates.
"""
group = self.data.get(group_hash, None)
if group is None:
group = FeatureGroup(
group_hash=group_hash,
features=features,
costs=costs,
)
else:
group.append(features, costs)
self.data[group_hash] = group
self.data_size += len(features)
self.untrained_size += len(features)
class SegmentSumMLPTrainer:
"""The trainer for Segment Sum MLP model.
Parameters
----------
state: State
The state of the trainer.
batch_size : int
The batch size.
learning rate : float
The learning rate.
weight decay : float
The weight decay.
num_epoch_full : int
The number of epochs used in full training.
num_epoch_incremental : int
The number of epochs used in incremental training.
grad_clip_norm: float
The norm of gradient clipping.
train_verbose: int
The verbose frequency for training in batches.
test_interval: int
The testing interval in epochs.
test_split: float
The fraction of data for testing.
frozen: bool
Determine whether to re-train the model or not.
optimizer: "torch.optim.adam.Adam"
The optimizer.
scheduler: "torch.optim.lr_scheduler.StepLR"
The scheduler.
"""
state: State
batch_size: int = 128
learning_rate: float = 7e-4
weight_decay: float = 1e-6
num_epoch_full: int = 50
num_epoch_incremental: int = 5
grad_clip_norm: float = 0.5
train_verbose: int = 1000
test_interval: int = 1
test_split: float = 0.2
frozen: bool = False
optimizer: "torch.optim.adam.Adam" # type: ignore
scheduler: "torch.optim.lr_scheduler.StepLR" # type: ignore
def __init__(
self,
train_config: Optional[TrainerConfig] = None,
state: Optional[State] = None,
):
train_config = train_config or TrainerConfig()
state = state or State()
config = train_config.to_dict()
for attr in config:
setattr(self, attr, config[attr])
self.state = state
self.device = "cuda" if torch.cuda.device_count() else "cpu"
self.optimizer, self.scheduler = None, None
def train_step(
self,
data: Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"],
batch: int = 0,
train_loss: Optional[float] = None,
) -> float:
"""Helper function for training on a single batch.
Parameters
----------
data: Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]
A batch of data, should be a tuple of (segment_sizes, features, gt_results).
batch: int = 0
The current batch number.
train_loss: Optional[float] = None
The previous averaged training loss, None if it is the first batch.
Returns
-------
train_loss: float
The averaged training loss after the current batch.
"""
segment_sizes, features, gt_results = (
data[0].to(self.device),
data[1].to(self.device),
data[2].to(self.device),
)
self.optimizer.zero_grad()
pred_results = self.state.model(segment_sizes, features)
loss = lambda_rank_loss(pred_results, gt_results)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.state.model.parameters(), self.grad_clip_norm)
self.optimizer.step()
loss = loss.detach().cpu()
train_loss = (
train_loss * 0.95 + loss.item() * 0.05 if train_loss is not None else loss.item()
)
segment_sizes, features, gt_results, pred_results = (
segment_sizes.detach().cpu(),
features.detach().cpu(),
gt_results.detach().cpu(),
pred_results.detach().cpu(),
)
if batch % self.train_verbose == 0:
logger.info("Batch: %d, train loss: %6f", batch, train_loss)
return train_loss
def predict_step(
self,
data: Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"],
):
"""Helper function for predicting (validating) on a single batch.
Parameters
----------
data: Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]
A batch of data, should be a tuple of (segment_sizes, features, gt_results).
gt_results can be None if it is used for predicting.
Returns
-------
pred_results: np.ndarray
The predicted results for the current batch.
test_loss_batch: float
If used for validation, return the test loss for the current batch.
test_scores_batch: List[float]
If used for validation, return the topk scores for the current batch.
"""
test_loss_batch, test_scores_batch = None, []
segment_sizes, features = (
data[0].to(self.device),
data[1].to(self.device),
)
gt_results = data[2]
pred_results = self.state.model(segment_sizes, features)
segment_sizes, features, pred_results = (
segment_sizes.detach().cpu(),
features.detach().cpu(),
pred_results.detach().cpu(),
)
if gt_results is not None:
test_loss_batch = lambda_rank_loss(pred_results, gt_results).item()
for k in [1, 5, 10]:
test_scores_batch.append(topk_score(pred_results, gt_results, k))
return pred_results.numpy(), test_loss_batch, test_scores_batch
def train_full(self): # pylint: disable=too-many-locals
"""Training on the full dataset."""
# split into training and testing set
keys = list(self.state.data.keys())
test_keys = random.sample(keys, k=math.floor(len(keys) * self.test_split))
train_data = OrderedDict()
test_data = OrderedDict()
for key in keys:
if key in test_keys:
test_data[key] = self.state.data[key]
else:
train_data[key] = self.state.data[key]
train_features = list(
itertools_chain.from_iterable([g.features for g in train_data.values()])
)
test_features = list(
itertools_chain.from_iterable([g.features for g in test_data.values()])
)
train_results = np.concatenate([g.min_cost / g.costs for g in train_data.values()])
test_results = np.concatenate([g.min_cost / g.costs for g in test_data.values()])
train_loader = SegmentDataLoader(
train_features, train_results, batch_size=self.batch_size, shuffle=True
)
test_loader = SegmentDataLoader(
test_features, test_results, batch_size=self.batch_size, shuffle=False
)
self.optimizer = torch.optim.Adam(
self.state.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay
)
self.scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer,
step_size=self.num_epoch_full // 10,
gamma=0.8,
verbose=True,
)
self.state.model = self.state.model.to(self.device)
min_test_loss = 1e10
logger.info("Training size: %d; Testing size: %d", len(train_loader), len(test_loader))
model_cache_path = tempfile.NamedTemporaryFile().name # pylint: disable=consider-using-with
for epoch in range(self.num_epoch_full):
logger.info("Epoch: %d", epoch)
# training
self.state.model.train()
train_loss = None
for batch, data in enumerate(train_loader):
train_loss = self.train_step(data, batch, train_loss)
self.scheduler.step()
# testing
if epoch % self.test_interval == 0:
self.state.model.eval()
test_losses, test_scores = [], []
for data in test_loader:
_, test_loss_batch, test_scores_batch = self.predict_step(data)
test_losses.append(test_loss_batch)
test_scores.append(test_scores_batch)
test_loss = (
np.array(test_losses[:-1]).mean() if len(test_losses) > 1 else test_losses[0]
)
logger.info(
"Average test loss: %6f, top1 score: %5f, top5 score: %5f, top10 score: %5f",
test_loss,
np.array(test_scores)[:, 0].mean(),
np.array(test_scores)[:, 1].mean(),
np.array(test_scores)[:, 2].mean(),
)
if test_loss < min_test_loss:
min_test_loss = test_loss
torch.save(self.state.model.state_dict(), model_cache_path)
self.state.model.to("cpu").load_state_dict(torch.load(model_cache_path))
self.state.untrained_size = 0
def train_incremental(
self,
features: List[np.ndarray],
results: np.ndarray,
):
"""Training on incremental data.
Parameters
----------
features: List[np.ndarray]
The extracted features.
results: np.ndarray
The measured results.
"""
results = np.min(results) / results
loader = SegmentDataLoader(features, results, batch_size=self.batch_size, shuffle=True)
self.optimizer = torch.optim.Adam(
self.state.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay
)
self.state.model = self.state.model.to(self.device)
logger.info("Incremental training size: %d", len(loader))
for epoch in range(self.num_epoch_incremental):
logger.info("Epoch: %d", epoch)
self.state.model.train()
loss = None
for batch, data in enumerate(loader):
loss = self.train_step(data, batch, loss)
self.state.model.to("cpu")
self.state.untrained_size = max(0, self.state.untrained_size - len(loader))
def predict_incremental(
self,
features: List[np.ndarray],
results: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Predicting (validating) on incremental data.
Parameters
----------
features: List[np.ndarray]
The extracted features.
results: Optional[np.ndarray]
The measured results, can be None if used for predicting.
Returns
-------
pred_results: np.ndarray
The predicted results.
"""
if results is not None:
results = np.min(results) / results
loader = SegmentDataLoader(features, results, batch_size=self.batch_size, shuffle=False)
self.state.model = self.state.model.to(self.device).eval()
logger.info("Incremental testing size: %d", len(loader))
pred_results, losses, scores = [], [], []
for data in loader:
pred_results_batch, losses_batch, scores_batch = self.predict_step(data)
pred_results.append(pred_results_batch)
losses.append(losses_batch)
scores.append(scores_batch)
pred_results = np.concatenate(pred_results)
if results is not None:
losses = np.array(losses[:-1]).mean() if len(losses) > 1 else losses[0]
logger.info(
"Average test loss: %6f, top1 score: %5f, top5 score: %5f, top10 score: %5f",
losses,
np.array(scores)[:, 0].mean(),
np.array(scores)[:, 1].mean(),
np.array(scores)[:, 2].mean(),
)
return pred_results
def update(
self,
features: List[np.ndarray],
costs: np.ndarray,
group_hash: str,
):
"""Update the dataset and re-train the model if not frozen.
Parameters
----------
features: List[np.ndarray]
The extracted features.
costs: np.ndarray
The measured results.
group_hash: str
The hash of the group.
"""
self.state.add_to_group(features, costs, group_hash)
if not self.frozen:
self.predict_incremental(features, costs)
if self.state.untrained_size / self.state.data_size > 0.2:
self.train_full()
else:
self.train_incremental(features, costs)
@derived_object
class MLPModel(PyCostModel):
"""Segment Sum MLP Model
Parameters
----------
trainer: SegmentSumMLPTrainer
The trainer for the model, handling the training interface.
"""
trainer: SegmentSumMLPTrainer
def __init__(
self,
*,
trainer: Optional[SegmentSumMLPTrainer] = None,
):
super().__init__()
self.trainer = trainer or SegmentSumMLPTrainer()
def load(self, path: str) -> None:
"""Load the cost model, cached data or raw data from given file location.
Parameters
----------
path : str
The file path.
"""
self.trainer.state.load(path)
def save(self, path: str) -> None:
"""Save the cost model and data to given file location.
Parameters
----------
path : str
The file path.
"""
self.trainer.state.save(path)
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the dataset, re-train the cost model if not frozen.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
features, mean_costs = extract_features(
context, candidates, results, self.trainer.state.extractor
)
self.trainer.update(features, mean_costs, shash2hex(context.mod))
def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:
"""Predict given the measure candidates.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted normalized score.
"""
features, _ = extract_features(context, candidates, None, self.trainer.state.extractor)
pred_results = self.trainer.predict_incremental(features)
return pred_results
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.