file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
python/tvm/meta_schedule/cost_model/random_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Random cost model
"""
from typing import List, Optional, Tuple, Union
from ..cost_model import PyCostModel
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
from ..utils import derived_object # type: ignore
@derived_object
class RandomModel(PyCostModel):
"""Random cost model
Parameters
----------
random_state : Union[Tuple[str, np.ndarray, int, int, float], dict]
The random state of the random number generator.
path : Optional[str]
The path of the random cost model.
max_range : Optional[int]
The maximum range of random results, [0, max_range].
Reference
---------
https://numpy.org/doc/stable/reference/random/generated/numpy.random.get_state.html
"""
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
random_state: Union[Tuple[str, np.ndarray, int, int, float], dict]
path: Optional[str]
def __init__(
self,
*,
seed: Optional[int] = None,
path: Optional[str] = None,
max_range: Optional[int] = 100,
):
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
super().__init__()
if path is not None:
self.load(path)
else:
np.random.seed(seed)
self.random_state = np.random.get_state()
self.max_range = max_range
def load(self, path: str) -> None:
"""Load the cost model from given file location.
Parameters
----------
path : str
The file path.
"""
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
self.random_state = tuple(np.load(path, allow_pickle=True)) # type: ignore
def save(self, path: str) -> None:
"""Save the cost model to given file location.
Parameters
----------
path : str
The file path.
"""
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
np.save(path, np.array(self.random_state, dtype=object), allow_pickle=True)
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the cost model given running results.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
def predict(
self, context: TuneContext, candidates: List[MeasureCandidate]
) -> np.ndarray: # type: ignore # pylint: disable=used-before-assignment
"""Update the cost model given running results.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted running results.
"""
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
np.random.set_state(self.random_state)
# TODO(@zxybazh): Use numpy's RandState object:
# https://numpy.org/doc/1.16/reference/generated/numpy.random.RandomState.html#numpy.random.RandomState
result = np.random.rand(len(candidates)) * self.max_range # type: ignore
self.random_state = np.random.get_state()
return result
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/cost_model/xgb_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""XGBoost-based cost model"""
import os
import tempfile
from collections import OrderedDict
from itertools import chain as itertools_chain
from typing import TYPE_CHECKING, Any, Callable, Dict, List, NamedTuple, Optional, Tuple
import numpy as np # type: ignore
from ...contrib.tar import tar, untar
from ...runtime import NDArray
from ..cost_model import PyCostModel
from ..feature_extractor import FeatureExtractor
from ..logging import get_logger
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..utils import cpu_count, derived_object, shash2hex
from .metric import max_curve
if TYPE_CHECKING:
import xgboost as xgb # type: ignore
from xgboost.callback import TrainingCallback # type: ignore
from ..tune_context import TuneContext
logger = get_logger(__name__) # pylint: disable=invalid-name
def make_metric_sorter(focused_metric):
"""Make sure the focused metric is the first one."""
def metric_name_for_sort(name):
if focused_metric == name:
return "!" + name
return name
def sort_key(key):
key, _ = key
return metric_name_for_sort(key)
return sort_key
class PackSum:
"""The pack-sum format
Parameters
----------
dmatrix : xgb.DMatrix
A float64 array of shape [n, m],
where `n` is the packed number of blocks,
and `m` is the length of feature vector on each block
ids : np.ndarray
An int64 array of shape [n] containing nonnegative integers,
indicating which the index of a sample that a block belongs to
"""
dmatrix: "xgb.DMatrix" # type: ignore # pylint: disable=invalid-name
ids: np.ndarray
def __init__(
self,
xs: List[np.ndarray], # pylint: disable=invalid-name
ys: Optional[np.ndarray], # pylint: disable=invalid-name
):
"""Create PackSum format given a batch of samples
Parameters
----------
xs : List[np.ndarray]
A batch of input samples
ys : Optional[List[float]]
A batch of labels. None means no labels available.
"""
import xgboost as xgb # type: ignore # pylint: disable=import-outside-toplevel
repeats = [x.shape[0] for x in xs]
xs = np.concatenate(xs, axis=0)
self.ids = np.concatenate([[i] * repeat for i, repeat in enumerate(repeats)], axis=0)
if ys is None:
self.dmatrix = xgb.DMatrix(data=xs, label=None)
else:
ys = np.concatenate([[y] * repeat for y, repeat in zip(ys, repeats)], axis=0)
self.dmatrix = xgb.DMatrix(data=xs, label=ys)
self.dmatrix.set_weight(ys)
def predict_with_score(self, pred: np.ndarray) -> np.ndarray:
"""Predict the labels given the block level prediction scores.
Parameters
----------
pred : np.ndarray
The block level predictions
Returns
-------
result : np.ndarray
The predictions for each candidate.
"""
return np.bincount(self.ids, weights=pred)
def obj_square_error(self, ys_pred: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Implement square error loss on pack-sum format as
a custom objective function for xgboost.
Parameters
----------
ys_pred: np.ndarray
The predictions
Returns
-------
gradient: np.ndarray
The gradient according to the xgboost format
hessian: np.ndarray
The hessian according to the xgboost format
"""
# Making prediction
ys_pred = self.predict_with_score(ys_pred)
# Propagate prediction to each block
ys_pred = ys_pred[self.ids] # pylint: disable=invalid-sequence-index
# The gradient and hessian
ys = self.dmatrix.get_label() # type: ignore # pylint: disable=invalid-name
gradient = ys_pred - ys
hessian = np.ones_like(gradient)
return gradient * ys, hessian * ys
def rmse(self, ys_pred: np.ndarray) -> Tuple[str, float]:
"""Evaluate RMSE (rooted mean square error) in the pack-sum format
Parameters
----------
ys_pred: np.ndarray
The raw predictions
Returns
-------
name: str
The name of the metric
score: float
The score of the metric
"""
# Making prediction
ys_pred = self.predict_with_score(ys_pred)
# Propagate prediction to each block
ys_pred = ys_pred[self.ids] # pylint: disable=invalid-sequence-index
# The RMSE
ys = self.dmatrix.get_label() # type: ignore # pylint: disable=invalid-name
square_error = np.square(ys_pred - ys)
rmse = np.sqrt(square_error.mean())
return "p-rmse", rmse
def average_peak_score(
self,
ys_pred: np.ndarray,
n: int,
) -> Tuple[str, float]:
"""Evaluate average-peak-score@N in the pack-sum format
Parameters
----------
ys_pred: np.ndarray
The raw prediction
n : int
The N in average-peak-score@N
Returns
-------
name: str
The name of the metric
score: float
The score of the metric
"""
ys = self.dmatrix.get_label() # type: ignore # pylint: disable=invalid-name
ys = self.predict_with_score(ys) # type: ignore # pylint: disable=invalid-name
ys = ys / np.unique(self.ids, return_counts=True)[1] # type: ignore # pylint: disable=invalid-name
ys_pred = self.predict_with_score(ys_pred)
trials = np.argsort(ys_pred)[::-1][:n]
trial_scores = ys[trials]
curve = max_curve(trial_scores) / np.max(ys)
score = np.mean(curve)
return f"a-peak@{n}", score
class XGBConfig(NamedTuple):
"""XGBoost model configuration
Parameters
----------
max_depth : int
The maximum depth.
gamma : float
The gamma.
min_child_weight : float
The minimum child weight.
eta : float
The eta, learning rate.
seed : int
The random seed.
nthread : Optional[int],
The number of threads to use.
Default is None, which means to use physical number of cores.
"""
max_depth: int = 10
gamma: float = 0.001
min_child_weight: float = 0
eta: float = 0.2
seed: int = 43
nthread: Optional[int] = None
def to_dict(self):
return {
"max_depth": self.max_depth,
"gamma": self.gamma,
"min_child_weight": self.min_child_weight,
"eta": self.eta,
"seed": self.seed,
"nthread": self.nthread,
}
class FeatureGroup:
"""Feature group
Parameters
----------
group_hash : str
The hash of the group
features : List[np.ndarray]
The features
costs : List[float]
The costs
min_cost : float
The minimum cost
"""
group_hash: str
features: List[np.ndarray]
costs: np.ndarray
min_cost: float
def __init__(
self,
group_hash: str,
features: List[np.ndarray],
costs: np.ndarray,
) -> None:
self.group_hash = group_hash
self.features = features
self.costs = costs
self.min_cost = np.min(costs)
def append(
self,
features: List[np.ndarray],
costs: np.ndarray,
) -> None:
self.features.extend(features)
self.costs = np.append(self.costs, costs)
self.min_cost = np.min(self.costs)
@derived_object
class XGBModel(PyCostModel):
"""XGBoost model
Parameters
----------
extractor : FeatureExtractor
The feature extractor for the model.
config : XGBConfig
The XGBoost model config.
num_warmup_samples : int
The number of samples that are used for warmup, i.e., the first few samples are predicted
with random results.
early_stopping_rounds : int
The number of rounds for early stopping.
verbose_eval : int
The verbose level when doing evaluation.
average_peak_n : int
The number to calculate average peak score.
adaptive_training : bool
Whether use adaptive training to reduce tuning time.
"""
# feature extractor
extractor: FeatureExtractor
# xgboost model config
config: XGBConfig
# behavior of randomness
num_warmup_samples: int
# evaluation
early_stopping_rounds: int
verbose_eval: int
average_peak_n: int
# states
data: Dict[str, FeatureGroup]
data_size: int
booster: Optional["xgb.Booster"]
# adaptive training
adaptive_training: bool
last_train_size: int
def __init__(
self,
*,
# feature extractor
extractor: FeatureExtractor.FeatureExtractorType = "per-store-feature",
# xgboost model config
config: XGBConfig = XGBConfig(),
# random result before enough samples
num_warmup_samples: int = 100,
# evaluation
early_stopping_rounds: int = 50,
verbose_eval: int = 25,
average_peak_n: int = 32,
adaptive_training: bool = True,
):
super().__init__()
if not isinstance(extractor, FeatureExtractor):
extractor = FeatureExtractor.create(extractor)
# feature extractor
self.extractor = extractor
# model-related
if config.nthread is None:
# use physical core number
config = config._replace(nthread=cpu_count(logical=False))
self.config = config
# behavior of randomness
self.num_warmup_samples = num_warmup_samples
# evaluation
self.early_stopping_rounds = early_stopping_rounds
self.verbose_eval = verbose_eval
self.average_peak_n = average_peak_n
# states
self.data = OrderedDict()
self.data_size = 0
self.booster = None
# adaptive training
self.adaptive_training = adaptive_training
self.last_train_size = 0
def load(self, path: str) -> None:
"""Load the cost model from given file location.
Parameters
----------
path : str
The file path.
Note
----
Since XGBoost model trains from scratch, each time this method loads the model together with
previously cached feature vectors and results, so that the subsequent training process could
use all the existing data being stored on disk.
"""
import xgboost as xgb # pylint: disable=import-outside-toplevel
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = os.path.join(tmp_dir, "model.bin")
data_path = os.path.join(tmp_dir, "data.npy")
# Step 1. Untar
untar(path, tmp_dir)
# Step 2. Load data
data = OrderedDict()
data_size = 0
for group_hash, features, costs in np.load(data_path, allow_pickle=True):
data[group_hash] = FeatureGroup(
group_hash=group_hash,
features=list(features),
costs=costs,
)
data_size += len(costs)
# Step 3. Load the model
if os.path.exists(model_path):
booster = xgb.Booster()
booster.load_model(model_path)
else:
self.booster = None
self.data = data
self.data_size = data_size
self.booster = booster
def save(self, path: str) -> None:
"""Save the cost model to given file location.
Parameters
----------
path : str
The file path.
Note
----
Since XGBoost model trains from scratch, each time this method saves the model together with
previously cached feature vectors and results, so that the subsequent training process could
use all the existing data being stored on disk.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = os.path.join(tmp_dir, "model.bin")
data_path = os.path.join(tmp_dir, "data.npy")
# Step 1. Save the model
booster = self.booster
if booster is not None:
booster.save_model(model_path)
else:
model_path = None
# Step 2. Save data
data = [
(
g.group_hash,
g.features,
g.costs,
)
for g in self.data.values()
]
np.save(
file=data_path,
arr=np.array(data, dtype=object),
)
# Step 3. Tar it
tar(path, [x for x in [model_path, data_path] if x is not None])
logger.info("Saved XGBModel to %s", path)
def update(
self,
context: "TuneContext",
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the cost model given running results.
Parameters
----------
context : TuneContext
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
assert len(candidates) == len(results)
if len(candidates) == 0:
return
# Step 1. Get the feature group
new_group_hash = shash2hex(context.mod)
group = self.data.get(new_group_hash, None)
# Step 2. Extract features
def _feature(x: NDArray) -> np.ndarray:
return x.numpy().astype("float32")
def _mean_cost(x: RunnerResult) -> float:
if not x.run_secs:
return 1e10
return float(np.median([float(s) for s in x.run_secs]))
new_features = [_feature(x) for x in self.extractor.extract_from(context, candidates)]
new_mean_costs = np.array([_mean_cost(x) for x in results]).astype("float32")
# Steps 3. Run validation
if group is not None and self.booster is not None:
logger.debug(
"XGB validation: %s",
"\t".join(
f"{key}: {score:.6f}"
for key, score in self._validate(
xs=new_features,
ys=group.min_cost / new_mean_costs,
)
),
)
# Step 4. Add the features into the data points
if group is None:
group = FeatureGroup(
group_hash=new_group_hash,
features=new_features,
costs=new_mean_costs,
)
else:
group.append(new_features, new_mean_costs)
self.data[new_group_hash] = group
self.data_size += len(new_features)
if (
self.adaptive_training
and self.data_size - self.last_train_size < self.last_train_size / 5
):
# Set a training threshold related to `last_train_size` to reduce the training
# overhead when there're too many results
return
self.last_train_size = self.data_size
# Step 5. Re-train the model
self._train(
xs=list(itertools_chain.from_iterable([g.features for g in self.data.values()])),
ys=np.concatenate(
[g.min_cost / g.costs for g in self.data.values()],
axis=0,
),
)
def predict(
self,
context: "TuneContext",
candidates: List[MeasureCandidate],
) -> np.ndarray:
"""Predict the normalized score using the cost model.
Parameters
----------
context : TuneContext
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted normalized score.
"""
if self.data_size >= self.num_warmup_samples and self.booster is not None:
ret = self._predict(
xs=[
x.numpy().astype("float32")
for x in self.extractor.extract_from(
context,
candidates,
)
]
)
else:
ret = np.random.uniform(
low=0,
high=1,
size=(len(candidates),),
)
return ret.astype("float64")
def _train( # type: ignore # pylint: disable=invalid-name
self,
xs: List[np.ndarray],
ys: np.ndarray,
) -> None:
import xgboost as xgb # type: ignore # pylint: disable=import-outside-toplevel
self.d_train = PackSum(xs=xs, ys=ys)
def obj(ys_pred: np.ndarray, d_train: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument
return self.d_train.obj_square_error(ys_pred)
def rmse(ys_pred: np.ndarray, d_train: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument
return self.d_train.rmse(ys_pred)
def avg_peak_score(ys_pred: np.ndarray, d_train: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument
return self.d_train.average_peak_score(ys_pred, self.average_peak_n)
self.booster = xgb.train(
self.config.to_dict(),
self.d_train.dmatrix,
num_boost_round=10000,
obj=obj,
callbacks=[
_get_custom_call_back(
early_stopping_rounds=self.early_stopping_rounds,
verbose_eval=self.verbose_eval,
fevals=[rmse, avg_peak_score],
evals=[(self.d_train.dmatrix, "tr")],
cvfolds=None,
)
],
)
del self.d_train
def _predict( # type: ignore # pylint: disable=invalid-name
self,
xs: List[np.ndarray],
) -> np.ndarray:
d_test = PackSum(xs=xs, ys=None)
pred = self.booster.predict(d_test.dmatrix)
ret = d_test.predict_with_score(pred)
return ret
def _validate( # type: ignore # pylint: disable=invalid-name
self,
xs: List[np.ndarray],
ys: np.ndarray,
) -> List[Tuple[str, float]]:
"""Evaluate the score of inputs.
Parameters
----------
xs : List[np.ndarray]
A batch of input samples
ys : List[float]
A batch of labels
Returns
-------
scores: np.ndarray
The predicted result for all inputs.
"""
assert self.booster is not None
d_valid = PackSum(xs=xs, ys=ys)
def average_peak_score(ys_pred: np.ndarray):
return d_valid.average_peak_score(ys_pred, n=self.average_peak_n)
ys_pred = self.booster.predict(d_valid.dmatrix)
eval_result: List[Tuple[str, float]] = [
feval(ys_pred)
for feval in (
average_peak_score,
d_valid.rmse,
)
]
eval_result.sort(key=make_metric_sorter("p-rmse"))
return eval_result
def _get_custom_call_back(
early_stopping_rounds: int,
verbose_eval: int,
fevals: List[Callable],
evals: List[Tuple["xgb.DMatrix", str]],
focused_metric: str = "tr-p-rmse",
cvfolds: List["xgb.training.CVPack"] = None,
) -> "TrainingCallback":
"""Get a customized callback function for XGBoost. Work around xgboost import."""
def optional_xgboost_callback(cls):
"""Decorator for importing TrainingCallback from xgboost"""
# pylint:disable = import-outside-toplevel
try:
from xgboost.callback import TrainingCallback # type: ignore
# pylint:enable = import-outside-toplevel
except ImportError:
class TrainingCallback: # type: ignore
pass
class OptXGBoostCustomCallback(cls, TrainingCallback): # type: ignore
pass
return OptXGBoostCustomCallback
@optional_xgboost_callback
class XGBoostCustomCallback:
"""Custom callback class for xgboost to support multiple custom evaluation functions"""
def __init__(
self,
early_stopping_rounds: int,
verbose_eval: int,
fevals: List[Callable],
evals: List[Tuple["xgb.DMatrix", str]],
focused_metric: str = "tr-p-rmse",
cvfolds: List["xgb.training.CVPack"] = None,
):
self.early_stopping_rounds = early_stopping_rounds
self.verbose_eval = verbose_eval
self.fevals = fevals
self.evals = evals
self.state: Dict[str, Any] = {}
self.focused_metric = focused_metric
self.sort_key = make_metric_sorter(focused_metric=focused_metric)
self.cvfolds = cvfolds
if cvfolds is not None:
self.aggregated_cv = None
def __call__(self, env: "xgb.core.CallbackEnv"):
# Compatibility with xgboost < 1.3
return self.after_iteration(env.model, env.iteration, env.evaluation_result_list)
def init(self, model: "xgb.Booster"):
"""Internal function for initialization"""
booster: "xgb.Booster" = model
self.state["best_iteration"] = 0
self.state["best_score"] = float("inf")
if booster is None:
assert self.cvfolds is not None
return
if booster.attr("best_score") is not None:
self.state["best_score"] = float(booster.attr("best_score"))
self.state["best_iteration"] = int(booster.attr("best_iteration"))
self.state["best_msg"] = booster.attr("best_msg")
else:
booster.set_attr(best_iteration=str(self.state["best_iteration"]))
booster.set_attr(best_score=str(self.state["best_score"]))
def after_iteration(
self, model: "xgb.Booster", epoch: int, evals_log: Dict
): # pylint: disable = unused-argument
"""Internal function for after_iteration"""
# pylint:disable = import-outside-toplevel
try:
from xgboost.callback import _fmt_metric # type: ignore
except ImportError:
# Compatibility with xgboost >= 1.6
def _fmt_metric(value, show_stdv=True):
if len(value) == 2:
return f"{value[0]}:{value[1]:.5f}"
if len(value) == 3:
if show_stdv:
return f"{value[0]}:{value[1]:.5f}+{value[2]:.5f}"
return f"{value[0]}:{value[1]:.5f}"
raise ValueError("wrong metric value", value)
import xgboost as xgb
from xgboost import rabit # type: ignore
try:
from xgboost.training import aggcv # type: ignore
except ImportError:
from xgboost.callback import _aggcv as aggcv # type: ignore
# pylint:enable = import-outside-toplevel
if not self.state:
self.init(model)
booster: xgb.Booster = model
iteration: int = epoch
cvfolds: List[xgb.training.CVPack] = self.cvfolds
##### Evaluation #####
# `eval_result` is a list of (key, score)
eval_result: List[Tuple[str, float]] = []
if cvfolds is None:
eval_result = list(
itertools_chain.from_iterable(
[
(key, float(value))
for key, value in map(
lambda x: x.split(":"),
booster.eval_set(
evals=self.evals,
iteration=iteration,
feval=feval,
).split()[1:],
)
]
for feval in self.fevals
)
)
else:
eval_result = list(
itertools_chain.from_iterable(
[
(key, score)
for key, score, _std in aggcv(
fold.eval(
iteration=iteration,
feval=feval,
)
for fold in cvfolds
)
]
for feval in self.fevals
)
)
eval_result = list(eval_result)
eval_result.sort(key=self.sort_key)
##### Print eval result #####
if self.verbose_eval and iteration % self.verbose_eval == 0:
info = []
for key, score in eval_result:
if "null" not in key:
info.append(f"{key}: {score:.6f}")
logger.debug("XGB iter %3d: %s", iteration, "\t".join(info))
##### Choose score and do early stopping #####
score = None
for key, _score in eval_result:
if key == self.focused_metric:
score = _score
break
assert score is not None
best_score = self.state["best_score"]
best_iteration = self.state["best_iteration"]
if score < best_score:
tab = "\t" # to work with f-string
msg = f"[{epoch}] {tab.join([_fmt_metric(x) for x in eval_result])}"
self.state["best_msg"] = msg
self.state["best_score"] = score
self.state["best_iteration"] = epoch
# save the property to attributes, so they will occur in checkpoint.
if model is not None:
model.set_attr(
best_score=str(self.state["best_score"]),
best_iteration=str(self.state["best_iteration"]),
best_msg=self.state["best_msg"],
)
elif epoch - best_iteration >= self.early_stopping_rounds:
best_msg = self.state["best_msg"]
if self.verbose_eval and rabit.get_rank() == 0:
logger.debug("XGB stopped. Best iteration: %s ", best_msg)
# instead of raising EarlyStopException, returning True to end the training
return True
# False to indicate training should not stop.
return False
return XGBoostCustomCallback(
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
fevals=fevals,
evals=evals,
focused_metric=focused_metric,
cvfolds=cvfolds,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/database/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.database package.
The database that stores serialized tuning records and workloads
"""
from .database import Database, PyDatabase, TuningRecord, Workload, create
from .json_database import JSONDatabase
from .memory_database import MemoryDatabase
from .ordered_union_database import OrderedUnionDatabase
from .schedule_fn_database import ScheduleFnDatabase
from .union_database import UnionDatabase
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/database/database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TuningRecord database"""
from typing import Any, Callable, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.ir.module import IRModule
from tvm.runtime import Object
from tvm.target import Target
from tvm.tir.schedule import Schedule, Trace
from .. import _ffi_api
from ..arg_info import ArgInfo
from ..utils import _json_de_tvm
@register_object("meta_schedule.Workload")
class Workload(Object):
"""A workload, i.e. an IRModule and its structural hash.
Parameters
----------
mod : IRModule
The workload's IRModule
"""
mod: IRModule
def __init__(self, mod: IRModule) -> None:
self.__init_handle_by_constructor__(
_ffi_api.Workload, # type: ignore # pylint: disable=no-member
mod,
)
def as_json(self) -> Any:
"""Export the workload to JSON as a python object.
Returns
-------
json : Any
The JSON serialized as a python object (e.g. a Dict or List).
Use json.dumps() to get the associated json string.
"""
return _json_de_tvm(_ffi_api.WorkloadAsJSON(self)) # type: ignore # pylint: disable=no-member
@staticmethod
def from_json(json_obj: Any) -> "Workload":
"""Create a workload from a json object.
Parameters
----------
json_obj : Any
The json object to parse.
Returns
-------
tuning_record : TuningRecord
The parsed tuning record.
"""
return _ffi_api.WorkloadFromJSON(json_obj) # type: ignore # pylint: disable=no-member
@register_object("meta_schedule.TuningRecord")
class TuningRecord(Object):
"""The class of tuning records.
Parameters
----------
trace : tvm.ir.Trace
The trace of the tuning record.
workload : Workload
The workload of the tuning record.
run_secs : Optional[List[float]]
The run time of the tuning record.
target : Optional[Target]
The target of the tuning record.
args_info : Optional[List[ArgInfo]]
The argument information of the tuning record.
"""
trace: Trace
workload: Workload
run_secs: Optional[List[float]]
target: Optional[Target]
args_info: Optional[List[ArgInfo]]
def __init__( # type: ignore # pylint: disable=too-many-arguments
self,
trace: Trace,
workload: Workload,
run_secs: Optional[List[float]] = None,
target: Optional[Target] = None,
args_info: Optional[List[ArgInfo]] = None,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.TuningRecord, # type: ignore # pylint: disable=no-member
trace,
workload,
run_secs,
target,
args_info,
)
def as_measure_candidate(self) -> Any:
"""Generate a measure candidate given an initial IR module and a trace
stored in the tuning record.
Returns
-------
candidate : MeasureCandidate
A generated candidate.
"""
return _ffi_api.TuningRecordAsMeasureCandidate(self) # type: ignore # pylint: disable=no-member
def as_json(self) -> Any:
"""Export the tuning record to a JSON string.
Returns
-------
json_str : str
The JSON string exported.
"""
return _json_de_tvm(_ffi_api.TuningRecordAsJSON(self)) # type: ignore # pylint: disable=no-member
@staticmethod
def from_json(json_obj: Any, workload: Workload) -> "TuningRecord":
"""Create a tuning record from a json object.
Parameters
----------
json_obj : Any
The json object to parse.
workload : Workload
The workload.
Returns
-------
tuning_record : TuningRecord
The parsed tuning record.
"""
return _ffi_api.TuningRecordFromJSON(json_obj, workload) # type: ignore # pylint: disable=no-member
@register_object("meta_schedule.Database")
class Database(Object):
"""The abstract database interface."""
DatabaseType = Union["Database", Literal["json", "memory"]]
def has_workload(self, mod: IRModule) -> bool:
"""Check if the database has the given workload.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
Returns
-------
result : bool
Whether the database has the given workload.
"""
return _ffi_api.DatabaseHasWorkload(self, mod) # type: ignore # pylint: disable=no-member
def commit_workload(self, mod: IRModule) -> Workload:
"""Commit a workload to the database if missing.
Parameters
----------
mod : IRModule
The IRModule to be searched for or added.
Returns
-------
workload : Workload
The workload corresponding to the given IRModule.
"""
return _ffi_api.DatabaseCommitWorkload(self, mod) # type: ignore # pylint: disable=no-member
def commit_tuning_record(self, record: TuningRecord) -> None:
"""Commit a tuning record to the database.
Parameters
----------
record : TuningRecord
The tuning record to add.
"""
_ffi_api.DatabaseCommitTuningRecord(self, record) # type: ignore # pylint: disable=no-member
def get_top_k(self, workload: Workload, top_k: int) -> List[TuningRecord]:
"""Get the top K tuning records of given workload from the database.
Parameters
----------
workload : Workload
The workload to be searched for.
top_k : int
The number of top records to get.
Returns
-------
top_k_records : List[TuningRecord]
The top K records.
"""
return _ffi_api.DatabaseGetTopK(self, workload, top_k) # type: ignore # pylint: disable=no-member
def get_all_tuning_records(self) -> List[TuningRecord]:
"""Get all the tuning records from the database.
Returns
-------
tuning_records : List[TuningRecord]
All tuning records from the database.
"""
return _ffi_api.DatabaseGetAllTuningRecords(self) # type: ignore # pylint: disable=no-member
def __len__(self) -> int:
"""Get the number of records in the database.
Returns
-------
num_records : int
The number of records in the database
"""
return _ffi_api.DatabaseSize(self) # type: ignore # pylint: disable=no-member
def query_tuning_record(
self,
mod: IRModule,
target: Target,
workload_name: str,
) -> Optional[TuningRecord]:
"""Query the best record of the given workload from the database.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
workload_name : str
The name of the workload to be searched for.
Returns
-------
tuning_record : Optional[TuningRecord]
The best record of the given workload; None if not found.
"""
return _ffi_api.DatabaseQueryTuningRecord(self, mod, target, workload_name) # type: ignore # pylint: disable=no-member
def query_schedule(
self,
mod: IRModule,
target: Target,
workload_name: str,
) -> Optional[Schedule]:
"""Query the best schedule of the given workload from the database.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
workload_name : str
The name of the workload to be searched for.
Returns
-------
schedule : Optional[Schedule]
The best schedule of the given workload; None if not found.
"""
return _ffi_api.DatabaseQuerySchedule(self, mod, target, workload_name) # type: ignore # pylint: disable=no-member
def query_ir_module(
self,
mod: IRModule,
target: Target,
workload_name: str,
) -> Optional[IRModule]:
"""Query the best IRModule of the given workload from the database.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
workload_name : str
The name of the workload to be searched for.
Returns
-------
ir_module : Optional[IRModule]
The best IRModule of the given workload; None if not found.
"""
return _ffi_api.DatabaseQueryIRModule(self, mod, target, workload_name) # type: ignore # pylint: disable=no-member
def query(
self,
mod: IRModule,
target: Target,
*,
workload_name: str = "main",
kind: Union[
Literal["schedule"],
Literal["record"],
Literal["ir_module"],
] = "schedule",
) -> Union[Schedule, IRModule, TuningRecord]:
"""Query the database to retrieve the best optimization outcome of the given workload.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
kind : str = "schedule" | "record" | "ir_module"
The kind of the optimization outcome to be returned.
Returns
-------
result : Union[Schedule, IRModule, TuningRecord]
The best optimization outcome of the given workload.
"""
if kind == "schedule":
return self.query_schedule(mod, target, workload_name)
if kind == "record":
return self.query_tuning_record(mod, target, workload_name)
if kind == "ir_module":
return self.query_ir_module(mod, target, workload_name)
raise ValueError(f'Unknown kind: {kind}. Candidates are: "schedule", "record", "ir_module"')
def __enter__(self) -> "Database":
"""Entering the scope of the context manager"""
_ffi_api.DatabaseEnterWithScope(self) # type: ignore # pylint: disable=no-member
return self
def __exit__(self, ptype, value, trace) -> None:
"""Exiting the scope of the context manager"""
_ffi_api.DatabaseExitWithScope(self) # type: ignore # pylint: disable=no-member
@staticmethod
def current() -> Optional["Database"]:
"""Get the current database under scope."""
return _ffi_api.DatabaseCurrent() # type: ignore # pylint: disable=no-member
@staticmethod
def create( # pylint: disable=keyword-arg-before-vararg
kind: Union[
Literal[
"json",
"memory",
"union",
"ordered_union",
],
Callable[[Schedule], bool],
] = "json",
*args,
**kwargs,
) -> "Database":
"""Create a Database.
Parameters
----------
kind : str = "json" | "memory" | "union" | "ordered_union" | Callable[[Schedule], bool]
The kind of the database to be created. The following kinds are supported:
"json", "memory", "union", "ordered_union", and a custom schedule function.
Returns
-------
database : Database
The created database.
"""
from . import ( # pylint: disable=import-outside-toplevel
JSONDatabase,
MemoryDatabase,
OrderedUnionDatabase,
ScheduleFnDatabase,
UnionDatabase,
)
if callable(kind):
return ScheduleFnDatabase(kind, *args, **kwargs) # type: ignore
if kind == "json":
return JSONDatabase(*args, **kwargs)
if kind == "memory":
return MemoryDatabase(*args, **kwargs) # type: ignore
if kind == "union":
return UnionDatabase(*args, **kwargs) # type: ignore
if kind == "ordered_union":
return OrderedUnionDatabase(*args, **kwargs) # type: ignore
raise ValueError(f"Unknown Database: {kind}")
create = Database.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyDatabase")
class _PyDatabase(Database):
"""
A TVM object database to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyDatabase
"""
def __init__(
self,
f_has_workload: Callable = None,
f_commit_workload: Callable = None,
f_commit_tuning_record: Callable = None,
f_get_top_k: Callable = None,
f_get_all_tuning_records: Callable = None,
f_query_tuning_record: Callable = None,
f_query_schedule: Callable = None,
f_query_ir_module: Callable = None,
f_size: Callable = None,
module_equality: str = "structural",
):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.DatabasePyDatabase, # type: ignore # pylint: disable=no-member
f_has_workload,
f_commit_workload,
f_commit_tuning_record,
f_get_top_k,
f_get_all_tuning_records,
f_query_tuning_record,
f_query_schedule,
f_query_ir_module,
f_size,
module_equality,
)
class PyDatabase:
"""
An abstract database with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyDatabase,
"methods": [
"has_workload",
"commit_workload",
"commit_tuning_record",
"get_top_k",
"get_all_tuning_records",
"query_tuning_record",
"query_schedule",
"query_ir_module",
"__len__",
],
}
def has_workload(self, mod: IRModule) -> bool:
"""Check if the database has the given workload.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
Returns
-------
result : bool
Whether the database has the given workload.
"""
raise NotImplementedError
def commit_workload(self, mod: IRModule) -> Workload:
"""Commit a workload to the database if missing.
Parameters
----------
mod : IRModule
The IRModule to be searched for or added.
Returns
-------
workload : Workload
The workload corresponding to the given IRModule.
"""
raise NotImplementedError
def commit_tuning_record(self, record: TuningRecord) -> None:
"""Commit a tuning record to the database.
Parameters
----------
record : TuningRecord
The tuning record to add.
"""
raise NotImplementedError
def get_top_k(self, workload: Workload, top_k: int) -> List[TuningRecord]:
"""Get the top K tuning records of given workload from the database.
Parameters
----------
workload : Workload
The workload to be searched for.
top_k : int
The number of top records to get.
Returns
-------
top_k_records : List[TuningRecord]
The top K records.
"""
raise NotImplementedError
def get_all_tuning_records(self) -> List[TuningRecord]:
"""Get all the tuning records from the database.
Returns
-------
tuning_records : List[TuningRecord]
All tuning records from the database.
"""
raise NotImplementedError
def query_tuning_record(
self, mod: IRModule, target: Target, workload_name: Optional[str] = None
) -> Optional[TuningRecord]:
"""Query a tuning record from the database.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
workload_name : Optional[str]
The workload name to be searched for.
Returns
-------
record : Optional[TuningRecord]
The tuning record corresponding to the given workload.
"""
# Using self._outer to replace the self pointer
return _ffi_api.DatabaseQueryTuningRecord( # type: ignore # pylint: disable=no-member
self._outer(), mod, target, workload_name # type: ignore # pylint: disable=no-member
)
def query_schedule(
self, mod: IRModule, target: Target, workload_name: Optional[str] = None
) -> Optional[Schedule]:
"""Query a schedule from the database.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
workload_name : Optional[str]
The workload name to be searched for.
Returns
-------
schedule : Optional[Schedule]
The schedule corresponding to the given workload.
"""
# Using self._outer to replace the self pointer
return _ffi_api.DatabaseQuerySchedule( # type: ignore # pylint: disable=no-member
self._outer(), mod, target, workload_name # type: ignore # pylint: disable=no-member
)
def query_ir_module(
self, mod: IRModule, target: Target, workload_name: Optional[str] = None
) -> Optional[IRModule]:
"""Query an IRModule from the database.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
workload_name : Optional[str]
The workload name to be searched for.
Returns
-------
mod : Optional[IRModule]
The IRModule corresponding to the given workload.
"""
# Using self._outer to replace the self pointer
return _ffi_api.DatabaseQueryIRModule( # type: ignore # pylint: disable=no-member
self._outer(), mod, target, workload_name # type: ignore # pylint: disable=no-member
)
def __len__(self) -> int:
"""Get the number of records in the database.
Returns
-------
num_records : int
The number of records in the database
"""
raise NotImplementedError
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/database/json_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The default database that uses a JSON File to store tuning records"""
import os.path as osp
from typing import Optional
from tvm._ffi import register_object
from .. import _ffi_api
from .database import Database
@register_object("meta_schedule.JSONDatabase")
class JSONDatabase(Database):
"""Database class backed by JSON.
Parameters
----------
path_workload : str
The path to the workload table.
path_tuning_record : str
The path to the tuning record table.
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
It must be one of the followings:
- "structural": Use StructuralEqual/Hash
- "ignore-ndarray": Same as "structural", but ignore ndarray raw data during
equality testing and hashing.
- "anchor-block": Apply equality testing and hashing on the anchor block extracted from a
given module. The "ignore-ndarray" varint is used for the extracted
blocks or in case no anchor block is found.
For the definition of the anchor block, see tir/analysis/analysis.py.
"""
path_workload: str
path_tuning_record: str
def __init__(
self,
path_workload: Optional[str] = None,
path_tuning_record: Optional[str] = None,
*,
work_dir: Optional[str] = None,
allow_missing: bool = True,
module_equality: str = "structural",
) -> None:
"""Constructor.
Parameters
----------
path_workload : Optional[str] = None
The path to the workload table. If not specified,
will be generated from `work_dir` as `$work_dir/database_workload.json`.
path_tuning_record : Optional[str] = None
The path to the tuning record table. If not specified,
will be generated from `work_dir` as `$work_dir/database_tuning_record.json`.
work_dir : Optional[str] = None
The work directory, if specified, will be used to generate `path_tuning_record`
and `path_workload`.
allow_missing : bool
Whether to create new file when the given path is not found.
"""
if work_dir is not None:
if path_workload is None:
path_workload = osp.join(work_dir, "database_workload.json")
if path_tuning_record is None:
path_tuning_record = osp.join(work_dir, "database_tuning_record.json")
if path_workload is None:
raise ValueError("`path_workload` is not specified.")
if path_tuning_record is None:
raise ValueError("`path_tuning_record` is not specified.")
self.__init_handle_by_constructor__(
_ffi_api.DatabaseJSONDatabase, # type: ignore # pylint: disable=no-member
path_workload,
path_tuning_record,
allow_missing,
module_equality,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/database/memory_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A database that stores TuningRecords in memory"""
from tvm._ffi import register_object
from .. import _ffi_api
from .database import Database
@register_object("meta_schedule.MemoryDatabase")
class MemoryDatabase(Database):
"""An in-memory database
Parameters
----------
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
It must be one of the followings:
- "structural": Use StructuralEqual/Hash
- "ignore-ndarray": Same as "structural", but ignore ndarray raw data during
equality testing and hashing.
- "anchor-block": Apply equality testing and hashing on the anchor block extracted from a
given module. The "ignore-ndarray" varint is used for the extracted
blocks or in case no anchor block is found.
For the definition of the anchor block, see tir/analysis/analysis.py.
"""
def __init__(
self,
module_equality: str = "structural",
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.DatabaseMemoryDatabase, # type: ignore # pylint: disable=no-member,
module_equality,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/database/ordered_union_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A database consists of multiple databases."""
from tvm._ffi import register_object
from .. import _ffi_api
from .database import Database
@register_object("meta_schedule.OrderedUnionDatabase")
class OrderedUnionDatabase(Database):
"""A database composed of multiple databases, allowing users to guide IR rewriting using
combined knowledge of those databases. To each query, it returns the record from the first
database that responds to the query.
Examples
--------
Examples below demonstrate the usecases of and difference between UnionDatabase and
OrderDatabase.
Assumption:
* db1, db2 do not have tuning records for the target workload.
* Each of db3, db4, db5 has tuning records r3, r4, r5 for target workload respectively.
.. code-block:: python
#### Case 1. `UnionDatabase`:
merged_db = ms.database.UnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
db4 # has r4
)
# returns the better one between r3 and r4
merged_db.query_tuning_record(..., target_workload)
### Case 2. `OrderedUnionDatabase`
merged_db = ms.database.OrderedUnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
db4 # has r4
)
# returns r3
merged_db.query_tuning_record(..., target_workload)
### Case 3. Mix-use scenario
merged_db = ms.database.UnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
ms.database.OrderedUnionDatabase( # returns r4
db4, # has r4
db5, # has r5
)
)
# returns the better one between r3 and r4
merged_db.query_tuning_record(..., target_workload)
### Case 4. Another mix-use scenario
merged_db = ms.database.UnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
ms.database.UnionDatabase( # returns best one between r4 and r5
db4, # has r4
db5, # has r5
)
)
# returns the best one among r3, r4 and r5
merged_db.query_tuning_record(..., target_workload)
### Case 5. Yet another mix-use scenario
merged_db = ms.database.OrderedUnionDatabase(
db1, # no record
db2, # no record
ms.database.UnionDatabase( # returns best one between r3 and r4
db3, # has r3
db4, # has r4
)
db5, # has r5
)
# returns the better one between r3 and r4
merged_db.query_tuning_record(..., target_workload)
"""
def __init__(self, *databases: Database) -> None:
"""Construct a merged database from multiple databases.
Parameters
----------
*databases : Database
The list of databases to combine.
"""
self.__init_handle_by_constructor__(
_ffi_api.DatabaseOrderedUnionDatabase, # type: ignore # pylint: disable=no-member
databases,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/database/schedule_fn_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A database for injecting handcrafted schedule functions."""
from typing import Callable
from tvm._ffi import register_object
from tvm.tir import Schedule
from .. import _ffi_api
from .database import Database
@register_object("meta_schedule.ScheduleFnDatabase")
class ScheduleFnDatabase(Database):
"""A database for injecting handcrafted schedule functions.
Parameters
----------
schedule_fn : Callable[[Schedule], bool],
The function to do scheduling, which takes a TIR schedule, and returns
a boolean indicating if the schedule is committed to the database.
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
It must be one of the followings:
- "structural": Use StructuralEqual/Hash
- "ignore-ndarray": Same as "structural", but ignore ndarray raw data during
equality testing and hashing.
- "anchor-block": Apply equality testing and hashing on the anchor block extracted from a
given module. The "ignore-ndarray" varint is used for the extracted
blocks or in case no anchor block is found.
For the definition of the anchor block, see tir/analysis/analysis.py.
"""
def __init__(
self,
schedule_fn: Callable[[Schedule], bool],
module_equality: str = "structural",
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.DatabaseScheduleFnDatabase, # type: ignore # pylint: disable=no-member
schedule_fn,
module_equality,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/database/union_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A database consists of multiple databases."""
from tvm._ffi import register_object
from .. import _ffi_api
from .database import Database
@register_object("meta_schedule.UnionDatabase")
class UnionDatabase(Database):
"""A database composed of multiple databases, allowing users to guide IR rewriting using
combined knowledge of those databases. To each query, it returns the best record among all the
databases given.
Examples
--------
Examples below demonstrate the usecases of and difference between UnionDatabase and
OrderDatabase.
Assumption:
* db1, db2 do not have tuning records for the target workload.
* Each of db3, db4, db5 has tuning records r3, r4, r5 for target workload respectively.
.. code-block:: python
#### Case 1. `UnionDatabase`:
merged_db = ms.database.UnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
db4 # has r4
)
# returns the better one between r3 and r4
merged_db.query_tuning_record(..., target_workload)
### Case 2. `OrderedUnionDatabase`
merged_db = ms.database.OrderedUnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
db4 # has r4
)
# returns r3
merged_db.query_tuning_record(..., target_workload)
### Case 3. Mix-use scenario
merged_db = ms.database.UnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
ms.database.OrderedUnionDatabase( # returns r4
db4, # has r4
db5, # has r5
)
)
# returns the better one between r3 and r4
merged_db.query_tuning_record(..., target_workload)
### Case 4. Another mix-use scenario
merged_db = ms.database.UnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
ms.database.UnionDatabase( # returns best one between r4 and r5
db4, # has r4
db5, # has r5
)
)
# returns the best one among r3, r4 and r5
merged_db.query_tuning_record(..., target_workload)
### Case 5. Yet another mix-use scenario
merged_db = ms.database.OrderedUnionDatabase(
db1, # no record
db2, # no record
ms.database.UnionDatabase( # returns best one between r3 and r4
db3, # has r3
db4, # has r4
)
db5, # has r5
)
# returns the better one between r3 and r4
merged_db.query_tuning_record(..., target_workload)
"""
def __init__(self, *databases: Database) -> None:
"""Construct a merged database from multiple databases.
Parameters
----------
*databases : Database
The list of databases to combine.
"""
self.__init_handle_by_constructor__(
_ffi_api.DatabaseUnionDatabase, # type: ignore # pylint: disable=no-member
databases,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/extracted_task.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Extracted tasks from high-level IR."""
from typing import List
from tvm._ffi import register_object
from tvm.ir import IRModule
from tvm.runtime import Object
from tvm.target import Target
from . import _ffi_api
@register_object("meta_schedule.ExtractedTask")
class ExtractedTask(Object):
"""A tuning task extracted from the high-level IR
Parameters
----------
task_name : str
The name of the task extracted
mod : IRModule
The high-level IR
target: Target
Target information
dispatched : List[IRModule]
A list of low-level IRs that the high-level IR could potentially dispatch to
weight : int
The weight of the task
"""
task_name: str
mod: IRModule
dispatched: List[IRModule]
weight: int
def __init__(
self,
task_name: str,
mod: IRModule,
target: Target,
dispatched: List[IRModule],
weight: int,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ExtractedTask, # type: ignore # pylint: disable=no-member
task_name,
mod,
target,
dispatched,
weight,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/feature_extractor/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.feature_extractor package.
Meta Schedule feature extractors that extracts features from
measure candidates for use in cost model.
"""
from .feature_extractor import FeatureExtractor, PyFeatureExtractor
from .per_store_feature import PerStoreFeature
from .random_feature_extractor import RandomFeatureExtractor
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/feature_extractor/feature_extractor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule FeatureExtractor."""
from typing import Callable, List, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.runtime.ndarray import NDArray
from .. import _ffi_api
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
from ..utils import _get_default_str
@register_object("meta_schedule.FeatureExtractor")
class FeatureExtractor(Object):
"""Extractor for features from measure candidates for use in cost model."""
FeatureExtractorType = Union[Literal["per-store-feature"], "FeatureExtractor"]
def extract_from(
self, context: TuneContext, candidates: List[MeasureCandidate]
) -> List[NDArray]:
"""Extract features from the given measure candidate.
Parameters
----------
context : TuneContext
The tuning context for feature extraction.
candidates : List[MeasureCandidate]
The measure candidates to extract features from.
Returns
-------
features : List[NDArray]
The feature tvm ndarray extracted.
"""
result = _ffi_api.FeatureExtractorExtractFrom( # type: ignore # pylint: disable=no-member
self, context, candidates
)
return result
@staticmethod
def create(
kind: Literal["per-store-feature"],
*args,
**kwargs,
) -> "FeatureExtractor":
"""Create a CostModel."""
from . import PerStoreFeature # pylint: disable=import-outside-toplevel
if kind == "per-store-feature":
return PerStoreFeature(*args, **kwargs) # type: ignore
raise ValueError(f"Unknown CostModel: {kind}")
@register_object("meta_schedule.PyFeatureExtractor")
class _PyFeatureExtractor(FeatureExtractor):
"""
A TVM object feature extractor to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyFeatureExtractor
"""
def __init__(self, f_extract_from: Callable, f_as_string: Callable = None):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.FeatureExtractorPyFeatureExtractor, # type: ignore # pylint: disable=no-member
f_extract_from,
f_as_string,
)
class PyFeatureExtractor:
"""
An abstract feature extractor with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyFeatureExtractor,
"methods": ["extract_from", "__str__"],
}
def extract_from(
self, context: TuneContext, candidates: List[MeasureCandidate]
) -> List[NDArray]:
"""Extract features from the given measure candidate.
Parameters
----------
context : TuneContext
The tuning context for feature extraction.
candidates : List[MeasureCandidate]
The measure candidates to extract features from.
Returns
-------
features : List[NDArray]
The feature tvm ndarray extracted.
"""
raise NotImplementedError
def __str__(self) -> str:
return _get_default_str(self)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/feature_extractor/per_store_feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""We extract one feature vector per BufferStoreNode statement in a TIR Stmt,
so we call this feature as "per-store" feature.
"""
from tvm._ffi import register_object
from .. import _ffi_api
from .feature_extractor import FeatureExtractor
@register_object("meta_schedule.PerStoreFeature")
class PerStoreFeature(FeatureExtractor):
"""PerStoreFeature extracts one feature vector per BufferStoreNode
Parameters
----------
buffers_per_store : int
The number of buffers in each BufferStore; Pad or truncate if necessary.
arith_intensity_curve_num_samples : int
The number of samples used in the arithmetic intensity curve.
cache_line_bytes : int
The number of bytes in a cache line.
extract_workload : bool
Whether to extract features in the workload in tuning context or not.
"""
buffers_per_store: int
"""The number of buffers in each BufferStore; Pad or truncate if necessary."""
arith_intensity_curve_num_samples: int # pylint: disable=invalid-name
"""The number of samples used in the arithmetic intensity curve."""
cache_line_bytes: int
"""The number of bytes in a cache line."""
extract_workload: bool
"""Whether to extract features in the workload in tuning context or not."""
feature_vector_length: int
"""Length of the feature vector."""
def __init__(
self,
buffers_per_store: int = 5,
arith_intensity_curve_num_samples: int = 10,
cache_line_bytes: int = 64,
extract_workload: bool = False,
):
self.__init_handle_by_constructor__(
_ffi_api.FeatureExtractorPerStoreFeature, # type: ignore # pylint: disable=no-member
buffers_per_store,
arith_intensity_curve_num_samples,
cache_line_bytes,
extract_workload,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/feature_extractor/random_feature_extractor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Random Feature Extractor."""
from typing import List, Tuple, Union
import numpy as np # type: ignore
from tvm.runtime.ndarray import NDArray, array
from ..feature_extractor import PyFeatureExtractor
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
from ..utils import derived_object
@derived_object
class RandomFeatureExtractor(PyFeatureExtractor):
"""Random Feature Extractor
Parameters
----------
feature_size : int
The size of each block's feature vector.
max_block_num : int
The maximum number of blocks in each schedule.
random_state : Union[Tuple[str, np.ndarray, int, int, float], dict]
The current random state of the f
"""
feature_size: int
max_block_num: int
random_state: Union[Tuple[str, np.ndarray, int, int, float], dict]
def __init__(self, *, feature_size: int = 30, max_block_num: int = 5, seed=0):
super().__init__()
assert max_block_num >= 1, "Max block number must be greater or equal to one!"
self.max_block_num = max_block_num
self.feature_size = feature_size
np.random.seed(seed)
self.random_state = np.random.get_state()
def extract_from(
self, context: TuneContext, candidates: List[MeasureCandidate]
) -> List[NDArray]:
np.random.set_state(self.random_state)
result = [
np.random.rand(np.random.randint(1, self.max_block_num + 1), self.feature_size)
for candidate in candidates
]
self.random_state = np.random.get_state()
return [array(x) for x in result]
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/logging.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Logging interface in MetaSchedule"""
import logging
import logging.config
import os
import os.path as osp
from logging import Logger
from typing import Any, Callable, Dict, List, Optional
def get_logger(name: str) -> Logger:
"""Create or get a logger by its name. This is essentially a wrapper of python's native logger.
Parameters
----------
name : str
The name of the logger.
Returns
-------
logger : Logger
The logger instance.
"""
return logging.getLogger(name)
def get_logging_func(logger: Logger) -> Optional[Callable[[int, str, int, str], None]]:
"""Get the logging function.
Parameters
----------
logger : Logger
The logger instance.
Returns
-------
result : Optional[Callable]
The function to do the specified level of logging.
"""
if logger is None:
return None
level2log = {
logging.DEBUG: logger.debug,
logging.INFO: logger.info,
logging.WARNING: logger.warning,
logging.ERROR: logger.error,
# logging.FATAL not included
}
def logging_func(level: int, filename: str, lineo: int, msg: str):
if level < 0: # clear the output in notebook / console
from IPython.display import ( # type: ignore # pylint: disable=import-outside-toplevel
clear_output,
)
clear_output(wait=True)
else:
level2log[level](f"[{os.path.basename(filename)}:{lineo}] " + msg)
return logging_func
def create_loggers(
log_dir: str,
params: List[Dict[str, Any]],
logger_config: Optional[Dict[str, Any]] = None,
disable_existing_loggers: bool = False,
):
"""Create loggers from configuration"""
if logger_config is None:
config = {}
else:
config = logger_config
config.setdefault("loggers", {})
config.setdefault("handlers", {})
config.setdefault("formatters", {})
global_logger_name = "tvm.meta_schedule"
global_logger = logging.getLogger(global_logger_name)
if global_logger.level is logging.NOTSET:
global_logger.setLevel(logging.DEBUG)
console_logging_level = logging._levelToName[ # pylint: disable=protected-access
global_logger.level
]
config["loggers"].setdefault(
global_logger_name,
{
"level": logging.DEBUG,
"handlers": [handler.get_name() for handler in global_logger.handlers]
+ [global_logger_name + ".console", global_logger_name + ".file"],
"propagate": False,
},
)
config["loggers"].setdefault(
"{logger_name}",
{
"level": "DEBUG",
"handlers": [
"{logger_name}.file",
],
"propagate": False,
},
)
config["handlers"].setdefault(
global_logger_name + ".console",
{
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
"formatter": "tvm.meta_schedule.standard_formatter",
"level": console_logging_level,
},
)
config["handlers"].setdefault(
global_logger_name + ".file",
{
"class": "logging.FileHandler",
"filename": "{log_dir}/" + __name__ + ".task_scheduler.log",
"mode": "a",
"level": "DEBUG",
"formatter": "tvm.meta_schedule.standard_formatter",
},
)
config["handlers"].setdefault(
"{logger_name}.file",
{
"class": "logging.FileHandler",
"filename": "{log_dir}/{logger_name}.log",
"mode": "a",
"level": "DEBUG",
"formatter": "tvm.meta_schedule.standard_formatter",
},
)
config["formatters"].setdefault(
"tvm.meta_schedule.standard_formatter",
{
"format": "%(asctime)s [%(levelname)s] %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
)
# set up dictConfig loggers
p_config = {"version": 1, "disable_existing_loggers": disable_existing_loggers}
for k, v in config.items():
if k in ["formatters", "handlers", "loggers"]:
p_config[k] = _batch_parameterize_config(v, params) # type: ignore
else:
p_config[k] = v
logging.config.dictConfig(p_config)
# check global logger
if global_logger.level not in [logging.DEBUG, logging.INFO]:
global_logger.warning(
"Logging level set to %s, please set to logging.INFO"
" or logging.DEBUG to view full log.",
logging._levelToName[global_logger.level], # pylint: disable=protected-access
)
global_logger.info("Logging directory: %s", log_dir)
def _batch_parameterize_config(
config: Dict[str, Any],
params: List[Dict[str, str]],
) -> Dict[str, Any]:
"""Parameterize the given configuration with multiple parameters sets.
Parameters
----------
config : Dict[str, Any]
The given config dict.
Params : List[Dict[str, str]]
List of the given multiple parameters sets.
Returns
-------
result : Dict[str, Any]
The parameterized configuration.
"""
results = {}
for name, cfg in config.items():
for p in params:
p_name = name.format(**p)
if p_name not in results:
p_cfg = _parameterize_config(cfg, p)
results[p_name] = p_cfg
return results
def _parameterize_config(
config: Dict[str, Any],
params: Dict[str, str],
) -> Dict[str, Any]:
"""Parameterize the given configuration.
Parameters
----------
config : Dict[str, Any]
The given config dict.
Params : Dict[str, str]
The given parameters.
Returns
-------
result : Dict[str, Any]
The parameterized configuration.
"""
result = {}
for k, v in config.items():
if isinstance(k, str):
k = k.format(**params)
if isinstance(v, str):
v = v.format(**params)
elif isinstance(v, dict):
v = _parameterize_config(v, params)
elif isinstance(v, list):
v = [t.format(**params) for t in v]
result[k] = v
return result
def get_loggers_from_work_dir(
work_dir: str,
task_names: List[str],
) -> List[Logger]:
"""Create loggers from work directory
Parameters
----------
work_dir : str
The work directory.
task_names : List[str]
The list of task names.
Returns
-------
loggers : List[Logger]
The list of loggers.
"""
log_dir = osp.join(work_dir, "logs")
os.makedirs(log_dir, exist_ok=True)
pattern = __name__ + ".task_{i:0" + f"{len(str(len(task_names) - 1))}" + "d}_{name}"
loggers = [pattern.format(i=i, name=name) for i, name in enumerate(task_names)]
create_loggers(
log_dir=log_dir,
params=[{"log_dir": log_dir, "logger_name": logger} for logger in loggers],
)
return [get_logger(logger) for logger in loggers]
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/measure_callback/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The tvm.meta_schedule.measure_callback package."""
from .add_to_database import AddToDatabase
from .measure_callback import MeasureCallback, PyMeasureCallback
from .remove_build_artifact import RemoveBuildArtifact
from .update_cost_model import UpdateCostModel
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/measure_callback/add_to_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A callback that adds the measurement results into the database"""
from tvm._ffi import register_object
from .. import _ffi_api
from .measure_callback import MeasureCallback
@register_object("meta_schedule.AddToDatabase")
class AddToDatabase(MeasureCallback):
def __init__(self) -> None:
"""A callback that adds the measurement results into the database"""
self.__init_handle_by_constructor__(
_ffi_api.MeasureCallbackAddToDatabase, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/measure_callback/measure_callback.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule MeasureCallback."""
from typing import TYPE_CHECKING, Callable, List, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from .. import _ffi_api
from ..builder import BuilderResult
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..utils import _get_default_str
if TYPE_CHECKING:
from ..task_scheduler import TaskScheduler
@register_object("meta_schedule.MeasureCallback")
class MeasureCallback(Object):
"""Rules to apply after measure results is available."""
CallbackListType = Union[List["MeasureCallback"], "MeasureCallback", Literal["default"]]
def apply(
self,
task_scheduler: "TaskScheduler",
task_id: int,
measure_candidates: List[MeasureCandidate],
builder_results: List[BuilderResult],
runner_results: List[RunnerResult],
) -> None:
"""Apply a measure callback to the given schedule.
Parameters
----------
task_scheduler: TaskScheduler
The task scheduler.
task_id: int
The task id.
measure_candidates: List[MeasureCandidate]
The measure candidates.
builder_results: List[BuilderResult]
The builder results by building the measure candidates.
runner_results: List[RunnerResult]
The runner results by running the built measure candidates.
"""
return _ffi_api.MeasureCallbackApply( # type: ignore # pylint: disable=no-member
self,
task_scheduler,
task_id,
measure_candidates,
builder_results,
runner_results,
)
@staticmethod
def create(kind: Literal["default"]) -> List["MeasureCallback"]:
"""Create a list of measure callbacks."""
if kind == "default":
return _ffi_api.MeasureCallbackDefault() # type: ignore # pylint: disable=no-member
raise ValueError(f"Unknown kind of MeasureCallback list: {kind}")
@register_object("meta_schedule.PyMeasureCallback")
class _PyMeasureCallback(MeasureCallback):
"""
A TVM object measure callback to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyMeasureCallback
"""
def __init__(self, f_apply: Callable, f_as_string: Callable = None):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.MeasureCallbackPyMeasureCallback, # type: ignore # pylint: disable=no-member
f_apply,
f_as_string,
)
class PyMeasureCallback:
"""
An abstract measure callback with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyMeasureCallback,
"methods": ["apply", "__str__"],
}
def apply(
self,
task_scheduler: "TaskScheduler",
task_id: int,
measure_candidates: List[MeasureCandidate],
builder_results: List[BuilderResult],
runner_results: List[RunnerResult],
) -> None:
"""Apply a measure callback to the given schedule.
Parameters
----------
task_scheduler: TaskScheduler
The task scheduler.
task_id: int
The task id.
measure_candidates: List[MeasureCandidate]
The measure candidates.
builder_results: List[BuilderResult]
The builder results by building the measure candidates.
runner_results: List[RunnerResult]
The runner results by running the built measure candidates.
"""
raise NotImplementedError
def __str__(self) -> str:
return _get_default_str(self)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/measure_callback/remove_build_artifact.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A callback that removes the build artifacts from the disk"""
from tvm._ffi import register_object
from .. import _ffi_api
from .measure_callback import MeasureCallback
@register_object("meta_schedule.RemoveBuildArtifact")
class RemoveBuildArtifact(MeasureCallback):
def __init__(self) -> None:
"""A callback that removes the build artifacts from the disk"""
self.__init_handle_by_constructor__(
_ffi_api.MeasureCallbackRemoveBuildArtifact, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/measure_callback/update_cost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A measure callback that updates the cost model"""
from tvm._ffi import register_object
from .. import _ffi_api
from .measure_callback import MeasureCallback
@register_object("meta_schedule.UpdateCostModel")
class UpdateCostModel(MeasureCallback):
def __init__(self) -> None:
"""A measure callback that updates the cost model"""
self.__init_handle_by_constructor__(
_ffi_api.MeasureCallbackUpdateCostModel, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/mutator/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.mutator package.
Meta Schedule mutator that mutates the trace to explore the
design space.
"""
from .mutator import Mutator, PyMutator
from .mutate_compute_location import MutateComputeLocation
from .mutate_tile_size import MutateTileSize
from .mutate_thread_binding import MutateThreadBinding
from .mutate_parallel import MutateParallel
from .mutate_unroll import MutateUnroll
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/mutator/mutate_compute_location.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A mutator that mutates the compute-at location decision of SampleComputeLocation"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .mutator import Mutator
@register_object("meta_schedule.MutateComputeLocation")
class MutateComputeLocation(Mutator):
"""A mutator that mutates the compute-at location decision of SampleComputeLocation"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.MutatorMutateComputeLocation, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/mutator/mutate_parallel.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Mutator that mutates the parallel extent"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .mutator import Mutator
@register_object("meta_schedule.MutateParallel")
class MutateParallel(Mutator):
"""Mutator that mutates the parallel extent"""
def __init__(self, max_jobs_per_core: int) -> None:
"""Mutator that mutates the parallel extent"""
self.__init_handle_by_constructor__(
_ffi_api.MutatorMutateParallel, # type: ignore # pylint: disable=no-member
max_jobs_per_core,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/mutator/mutate_thread_binding.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Mutator that mutates the thread binding extent"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .mutator import Mutator
@register_object("meta_schedule.MutateThreadBinding")
class MutateThreadBinding(Mutator):
"""Mutator that mutates the binding extent"""
def __init__(self) -> None:
"""Mutator that mutates the binding extent"""
self.__init_handle_by_constructor__(
_ffi_api.MutateThreadBinding, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/mutator/mutate_tile_size.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Mutator that mutates the decision of instruction Sample-Perfect-Tile"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .mutator import Mutator
@register_object("meta_schedule.MutateTileSize")
class MutateTileSize(Mutator):
"""Mutator that mutates the decision of instruction Sample-Perfect-Tile"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.MutatorMutateTileSize, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/mutator/mutate_unroll.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Mutator that mutates auto unroll step"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .mutator import Mutator
@register_object("meta_schedule.MutateUnroll")
class MutateUnroll(Mutator):
"""Mutator that mutates auto unroll step"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.MutatorMutateUnroll, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/mutator/mutator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule Mutator."""
from typing import TYPE_CHECKING, Callable, Dict, Optional
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.tir.schedule import Trace
from .. import _ffi_api
from ..utils import _get_default_str
if TYPE_CHECKING:
from ..tune_context import TuneContext
class Mutator(Object):
"""Mutator is designed to mutate the trace to explore the design space."""
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the mutator with a tune context.
Parameters
----------
context : TuneContext
The tuning context for initializing the mutator.
"""
_ffi_api.MutatorInitializeWithTuneContext( # type: ignore # pylint: disable=no-member
self, context
)
def apply(self, trace: Trace) -> Optional[Trace]:
"""Apply the mutator function to the given trace.
Parameters
----------
trace : Trace
The given trace for mutation.
Returns
-------
trace : Optional[Trace]
None if mutator failed, otherwise return the mutated trace.
"""
return _ffi_api.MutatorApply(self, trace, -1) # type: ignore # pylint: disable=no-member
def clone(self) -> "Mutator":
"""Clone the mutator.
Returns
-------
mutator : Mutator
The cloned mutator.
"""
return _ffi_api.MutatorClone(self) # type: ignore # pylint: disable=no-member
@staticmethod
def create(
kind: Literal[
"llvm",
"cuda",
"cuda-tensorcore",
"hexagon",
]
) -> Dict["Mutator", float]:
"""Create a list of default mutators.
Parameters
----------
kind : Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"]
The kind of mutators.
Returns
-------
mutators : List[Mutator]
The list of mutators.
"""
funcs = {
# pylint: disable=no-member
"llvm": _ffi_api.MutatorDefaultLLVM, # type: ignore
"cuda": _ffi_api.MutatorDefaultCUDA, # type: ignore
"cuda-tensorcore": _ffi_api.MutatorDefaultCUDATensorCore, # type: ignore
"hexagon": _ffi_api.MutatorDefaultHexagon, # type: ignore
# pylint: enable=no-member
}
for k, v in funcs.items():
if k == kind:
return v()
raise ValueError(f"Unsupported kind {kind} for mutator creation.")
create = Mutator.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyMutator")
class _PyMutator(Mutator):
"""
A TVM object mutator to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyMutator
"""
def __init__(
self,
f_initialize_with_tune_context: Callable = None,
f_apply: Callable = None,
f_clone: Callable = None,
f_as_string: Callable = None,
):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.MutatorPyMutator, # type: ignore # pylint: disable=no-member
f_initialize_with_tune_context,
f_apply,
f_clone,
f_as_string,
)
class PyMutator:
"""
An abstract mutator with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyMutator,
"methods": ["_initialize_with_tune_context", "apply", "clone", "__str__"],
}
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the mutator with a tune context.
Parameters
----------
context : TuneContext
The tuning context for initializing the mutator.
"""
raise NotImplementedError
def apply(self, trace: Trace, _) -> Optional[Trace]:
"""Apply the mutator function to the given trace.
Parameters
----------
trace : Trace
The given trace for mutation.
Returns
-------
trace : Optional[Trace]
None if mutator failed, otherwise return the mutated trace.
"""
raise NotImplementedError
def clone(self) -> Mutator:
"""Clone the mutator.
Returns
-------
mutator : Mutator
The cloned mutator.
"""
raise NotImplementedError
def __str__(self) -> str:
"""Get the mutator as string with name.
Return
------
result : str
Get the mutator as string with name.
"""
return _get_default_str(self)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/postproc/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The tvm.meta_schedule.postproc package."""
from .disallow_dynamic_loop import DisallowDynamicLoop
from .postproc import Postproc, PyPostproc
from .rewrite_cooperative_fetch import RewriteCooperativeFetch
from .rewrite_layout import RewriteLayout
from .rewrite_parallel_vectorize_unroll import RewriteParallelVectorizeUnroll
from .rewrite_reduction_block import RewriteReductionBlock
from .rewrite_tensorize import RewriteTensorize
from .rewrite_unbound_block import RewriteUnboundBlock
from .verify_gpu_code import VerifyGPUCode
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/postproc/disallow_dynamic_loop.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that checks if the IRModule has any loop with non-constant extent"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.DisallowDynamicLoop")
class DisallowDynamicLoop(Postproc):
"""A postprocessor that checks if the IRModule has any loop with non-constant extent"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocDisallowDynamicLoop, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/postproc/postproc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule Postproc."""
from typing import TYPE_CHECKING, Callable, List
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.tir.schedule import Schedule
from .. import _ffi_api
from ..utils import _get_default_str
if TYPE_CHECKING:
from ..tune_context import TuneContext
@register_object("meta_schedule.Postproc")
class Postproc(Object):
"""Rules to apply a postprocessor to a schedule."""
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the postprocessor with a tune context.
Parameters
----------
context : TuneContext
The tuning context for initializing the postprocessor.
"""
_ffi_api.PostprocInitializeWithTuneContext( # type: ignore # pylint: disable=no-member
self, context
)
def apply(self, sch: Schedule) -> bool:
"""Apply a postprocessor to the given schedule.
Parameters
----------
sch : Schedule
The schedule to be post processed.
Returns
-------
result : bool
Whether the postprocessor was successfully applied.
"""
return _ffi_api.PostprocApply(self, sch) # type: ignore # pylint: disable=no-member
def clone(self) -> "Postproc":
"""Clone the postprocessor.
Returns
-------
cloned_postproc : Postproc
The cloned postprocessor.
"""
return _ffi_api.PostprocClone(self) # type: ignore # pylint: disable=no-member
@staticmethod
def create(kind: Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"]) -> List["Postproc"]:
"""Create a list of default postprocessors.
Parameters
----------
kind : Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"]
The kind of the postprocessors.
Returns
-------
postprocs : List[Mutator]
The list of postprocessors.
"""
funcs = {
# pylint: disable=no-member
"llvm": _ffi_api.PostprocDefaultLLVM, # type: ignore
"cuda": _ffi_api.PostprocDefaultCUDA, # type: ignore
"cuda-tensorcore": _ffi_api.PostprocDefaultCUDATensorCore, # type: ignore
"hexagon": _ffi_api.PostprocDefaultHexagon, # type: ignore
# pylint: enable=no-member
}
for k, v in funcs.items():
if k == kind:
return v()
raise ValueError(f"Unsupported kind {kind} for postproc creation.")
create = Postproc.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyPostproc")
class _PyPostproc(Postproc):
"""
A TVM object post processor to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyPostproc
"""
def __init__(
self,
f_initialize_with_tune_context: Callable = None,
f_apply: Callable = None,
f_clone: Callable = None,
f_as_string: Callable = None,
):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.PostprocPyPostproc, # type: ignore # pylint: disable=no-member
f_initialize_with_tune_context,
f_apply,
f_clone,
f_as_string,
)
class PyPostproc:
"""
An abstract post processor with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyPostproc,
"methods": ["_initialize_with_tune_context", "apply", "clone", "__str__"],
}
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the postprocessor with a tune context.
Parameters
----------
context : TuneContext
The tuning context for initializing the postprocessor.
"""
raise NotImplementedError
def apply(self, sch: Schedule) -> bool:
"""Apply a postprocessor to the given schedule.
Parameters
----------
sch : Schedule
The schedule to be post processed.
Returns
-------
result : bool
Whether the postprocessor was successfully applied.
"""
raise NotImplementedError
def clone(self) -> Postproc:
"""Clone the postprocessor.
Returns
-------
cloned_postproc : Postproc
The cloned postprocessor.
"""
raise NotImplementedError
def __str__(self) -> str:
"""Get the post processor as string with name.
Return
------
result : str
Get the post processor as string with name.
"""
return _get_default_str(self)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/postproc/rewrite_cooperative_fetch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that rewrites the cooperative fetch annotation to actual
vectorized cooperative fetching in loop bindings."""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.RewriteCooperativeFetch")
class RewriteCooperativeFetch(Postproc):
"""A postprocessor that rewrites the cooperative fetch annotation to actual vectorized
cooperative fetching in loop bindings.
"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocRewriteCooperativeFetch, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/postproc/rewrite_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that rewrites the layout of input tensor"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.RewriteLayout")
class RewriteLayout(Postproc):
"""A postprocessor that rewrites the layout of input tensor"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocRewriteLayout, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/postproc/rewrite_parallel_vectorize_unroll.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that applies parallelization, vectorization and auto unrolling
according to the annotation of each block"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.RewriteParallelVectorizeUnroll")
class RewriteParallelVectorizeUnroll(Postproc):
"""A postprocessor that applies parallelization, vectorization and auto unrolling
according to the annotation of each block"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocRewriteParallelVectorizeUnroll, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/postproc/rewrite_reduction_block.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that rewrites reduction block by moving the init block out."""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.RewriteReductionBlock")
class RewriteReductionBlock(Postproc):
"""A postprocessor that rewrites reduction block by moving the init block out."""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocRewriteReductionBlock, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/postproc/rewrite_tensorize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that tensorize related components."""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.RewriteTensorize")
class RewriteTensorize(Postproc):
"""A postprocessor that applies tensorization to annotated blocks.
Parameters
----------
vectorize_init_loop : bool
Whether or not vectorize the initialization loop produced by DecomposeReduction
"""
def __init__(self, vectorize_init_loop=False) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocRewriteTensorize, # type: ignore # pylint: disable=no-member
vectorize_init_loop,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/postproc/rewrite_unbound_block.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that adds thread binding to unbound blocks"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.RewriteUnboundBlock")
class RewriteUnboundBlock(Postproc):
"""A postprocessor that adds thread binding to unbound blocks"""
def __init__(self, max_threadblocks: int = 256) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocRewriteUnboundBlock, # type: ignore # pylint: disable=no-member
max_threadblocks,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/postproc/verify_gpu_code.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that verifies if the GPU code is correct"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.VerifyGPUCode")
class VerifyGPUCode(Postproc):
"""A postprocessor that verifies if the GPU code is correct"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocVerifyGPUCode, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/profiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A context manager that profiles tuning time cost for different parts."""
from contextlib import contextmanager
from typing import Dict, Optional
from tvm._ffi import register_object
from tvm.runtime import Object
from . import _ffi_api
@register_object("meta_schedule.Profiler")
class Profiler(Object):
"""Tuning time profiler."""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.Profiler, # type: ignore # pylint: disable=no-member
)
def get(self) -> Dict[str, float]:
"""Get the profiling results in seconds"""
return _ffi_api.ProfilerGet(self) # type: ignore # pylint: disable=no-member
def table(self) -> str:
"""Get the profiling results in a table format"""
return _ffi_api.ProfilerTable(self) # type: ignore # pylint: disable=no-member
def __enter__(self) -> "Profiler":
"""Entering the scope of the context manager"""
_ffi_api.ProfilerEnterWithScope(self) # type: ignore # pylint: disable=no-member
return self
def __exit__(self, ptype, value, trace) -> None:
"""Exiting the scope of the context manager"""
_ffi_api.ProfilerExitWithScope(self) # type: ignore # pylint: disable=no-member
@staticmethod
def current() -> Optional["Profiler"]:
"""Get the current profiler."""
return _ffi_api.ProfilerCurrent() # type: ignore # pylint: disable=no-member
@staticmethod
def timeit(name: str):
"""Timeit a block of code"""
@contextmanager
def _timeit():
try:
f = _ffi_api.ProfilerTimedScope(name) # type: ignore # pylint: disable=no-member
yield
finally:
if f:
f()
return _timeit()
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/relay_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MetaSchedule-Relay integration"""
from contextlib import contextmanager
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union
# isort: off
from typing_extensions import Literal
# isort: on
import numpy as np # type: ignore
from tvm import nd
from tvm._ffi import get_global_func
from tvm.ir import IRModule, transform
from tvm.runtime import NDArray
from tvm.target import Target
from .builder import Builder
from .cost_model import CostModel
from .database import Database
from .extracted_task import ExtractedTask
from .logging import get_loggers_from_work_dir
from .measure_callback import MeasureCallback
from .profiler import Profiler
from .runner import Runner
from .search_strategy import SearchStrategy
from .space_generator import SpaceGenerator
from .task_scheduler import TaskScheduler
from .tune import tune_tasks
from .tune_context import TuneContext
from .utils import fork_seed
if TYPE_CHECKING:
from tvm import relay
_extract_task = get_global_func( # pylint: disable=invalid-name
"relay.backend.MetaScheduleExtractTask",
allow_missing=True,
)
@contextmanager
def _autotvm_silencer():
"""A context manager that silences autotvm warnings."""
from tvm import autotvm # pylint: disable=import-outside-toplevel
silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
try:
yield
finally:
autotvm.GLOBAL_SCOPE.silent = silent
def _normalize_params(
mod: IRModule,
target: Union[Target, str],
params: Optional[Dict[str, NDArray]],
pass_config: Mapping[str, Any],
executor: Optional["relay.backend.Executor"],
) -> Tuple[
IRModule,
Target,
Dict[str, NDArray],
Dict[str, Any],
Optional["relay.backend.Executor"],
]:
from tvm import relay # pylint: disable=import-outside-toplevel
if isinstance(mod, relay.Function):
mod = IRModule.from_expr(mod)
if not isinstance(target, Target):
target = Target(target)
if params is None:
params = {}
relay_params = {}
for name, param in params.items():
if isinstance(param, np.ndarray):
param = nd.array(param)
relay_params[name] = param
if executor is None:
executor = relay.backend.Executor("graph")
if mod.get_attr("executor") is None:
mod = mod.with_attr("executor", executor)
else:
executor = mod.get_attr("executor")
pass_config = dict(pass_config)
return mod, target, relay_params, pass_config, executor
def extract_tasks(
mod: IRModule,
target: Union[Target, str],
params: Optional[Dict[str, NDArray]],
*,
opt_level: int = 3,
pass_config: Mapping[str, Any] = MappingProxyType(
{
"relay.backend.use_meta_schedule": True,
"relay.backend.tir_converter": "default",
}
),
executor: Optional["relay.backend.Executor"] = None,
module_equality: str = "structural",
) -> List[ExtractedTask]:
"""Extract tuning tasks from a relay program.
Parameters
----------
mod : IRModule
The module or function to tune
target : tvm.target.Target
The compilation target
params : Optional[Dict[str, tvm.runtime.NDArray]]
The associated parameters of the program
opt_level : int
The optimization level of the compilation
pass_config : Mapping[str, Any]
The pass configuration
executor : Optional[relay.backend.Executor]
The executor to use
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
It must be one of the followings:
- "structural": Use StructuralEqual/Hash
- "ignore-ndarray": Same as "structural", but ignore ndarray raw data during
equality testing and hashing.
- "anchor-block": Apply equality testing and hashing on the anchor block extracted from a
given module. The "ignore-ndarray" varint is used for the extracted
blocks or in case no anchor block is found.
For the definition of the anchor block, see tir/analysis/analysis.py.
Returns
-------
tasks: List[ExtractedTask]
The tasks extracted from this network
"""
# pylint: disable=import-outside-toplevel
from tvm import autotvm
# pylint: enable=import-outside-toplevel
mod, target, params, pass_config, _ = _normalize_params(
mod, target, params, pass_config, executor
)
if target.kind.name != "cuda" and isinstance(
autotvm.DispatchContext.current, autotvm.FallbackContext
):
tophub_context = autotvm.tophub.context(target)
else:
tophub_context = autotvm.utils.EmptyContext()
with Profiler.timeit("TaskExtraction"):
with target, _autotvm_silencer(), tophub_context:
with transform.PassContext(
opt_level=opt_level,
config=pass_config,
):
return list(_extract_task(mod, target, params, module_equality))
def extracted_tasks_to_tune_contexts(
extracted_tasks: List[ExtractedTask],
work_dir: str,
space: SpaceGenerator.SpaceGeneratorType = "post-order-apply",
strategy: SearchStrategy.SearchStrategyType = "evolutionary",
num_threads: Union[Literal["physical", "logical"], int] = "physical",
seed: Optional[int] = None,
) -> Tuple[List[TuneContext], List[float]]:
"""Convert ExtractedTask to TuneContext.
Parameters
----------
tasks : List[ExtractedTask]
The tasks to be converted
work_dir : str
The working directory to store logs and databases
space : SpaceGenerator.SpaceGeneratorType
The space generator to use.
strategy : SearchStrategy.SearchStrategyType
The search strategy to use.
num_threads : Union[Literal["physical", "logical"], int]
The number of threads to use in multi-threaded search algorithm.
seed : Optional[int]
The random seed to use.
Returns
-------
tasks : List[TuneContext]
The converted tasks
task_weights : List[float]
The weights of the tasks
"""
tasks: List[TuneContext] = []
task_weights: List[float] = []
for task, logger, rand_state in zip(
extracted_tasks,
get_loggers_from_work_dir(work_dir, [t.task_name for t in extracted_tasks]),
fork_seed(seed, n=len(extracted_tasks)),
):
tasks.append(
TuneContext(
mod=task.dispatched[0],
target=task.target,
space_generator=space,
search_strategy=strategy,
task_name=task.task_name,
logger=logger,
rand_state=rand_state,
num_threads=num_threads,
).clone()
)
task_weights.append(task.weight)
return tasks, task_weights
def tune_relay(
mod: IRModule,
params: Dict[str, NDArray],
target: Union[str, Target],
work_dir: str,
max_trials_global: int,
*,
max_trials_per_task: Optional[int] = None,
num_trials_per_iter: int = 64,
builder: Builder.BuilderType = "local",
runner: Runner.RunnerType = "local",
database: Database.DatabaseType = "json",
cost_model: CostModel.CostModelType = "xgb",
measure_callbacks: MeasureCallback.CallbackListType = "default",
task_scheduler: TaskScheduler.TaskSchedulerType = "gradient",
space: SpaceGenerator.SpaceGeneratorType = "post-order-apply",
strategy: SearchStrategy.SearchStrategyType = "evolutionary",
seed: Optional[int] = None,
module_equality: str = "structural",
) -> Database:
"""Tune a Relay program.
Parameters
----------
mod : Union[IRModule, tir.PrimFunc]
The module or function to tune
params : Optional[Dict[str, tvm.runtime.NDArray]]
The associated parameters of the program
target : Union[Target, str]
The compilation target
work_dir : str
The working directory to store the tuning records
max_trials_global : int
The maximum number of trials to run
max_trials_per_task : Optional[int]
The maximum number of trials to run for each task
num_trials_per_iter : int
The number of trials to run per iteration
builder : BuilderType
The builder to use
runner : RunnerType
The runner to use
database : DatabaseType
The database to use
cost_model : CostModelType
The cost model to use
measure_callbacks : CallbackListType
The measure callbacks to use
task_scheduler : TaskSchedulerType
The task scheduler to use
space : SpaceGeneratorType
The space generator to use
strategy : SearchStrategyType
The search strategy to use
seed : Optional[int]
The random seed
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
It must be one of the followings:
- "structural": Use StructuralEqual/Hash
- "ignore-ndarray": Same as "structural", but ignore ndarray raw data during
equality testing and hashing.
- "anchor-block": Apply equality testing and hashing on the anchor block extracted from a
given module. The "ignore-ndarray" varint is used for the extracted
blocks or in case no anchor block is found.
For the definition of the anchor block, see tir/analysis/analysis.py.
Returns
-------
database : Database
The database that contains the tuning records
"""
tasks, task_weights = extracted_tasks_to_tune_contexts(
extracted_tasks=extract_tasks(mod, target, params, module_equality=module_equality),
work_dir=work_dir,
space=space,
strategy=strategy,
seed=seed,
)
return tune_tasks(
tasks=tasks,
task_weights=task_weights,
work_dir=work_dir,
max_trials_global=max_trials_global,
max_trials_per_task=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
builder=builder,
runner=runner,
database=database,
cost_model=cost_model,
measure_callbacks=measure_callbacks,
task_scheduler=task_scheduler,
module_equality=module_equality,
)
def compile_relay(
database: Database,
mod: IRModule,
target: Union[Target, str],
params: Optional[Dict[str, NDArray]],
*,
backend: Literal["graph", "vm"] = "graph",
opt_level: int = 3,
pass_config: Mapping[str, Any] = MappingProxyType(
{
"relay.backend.use_meta_schedule": True,
"relay.backend.tir_converter": "default",
}
),
executor: Optional["relay.backend.Executor"] = None,
):
"""Compile a relay program with a MetaSchedule database.
Parameters
----------
database : Database
The database to use
mod : IRModule
The Relay program to be compiled
target : tvm.target.Target
The compilation target
params : Optional[Dict[str, tvm.runtime.NDArray]]
The associated parameters of the program
backend : str
The backend to use. Builtin backends:
- "graph"
- "vm"
opt_level : int
The optimization level of the compilation
pass_config : Mapping[str, Any]
The pass configuration
executor : Optional[relay.backend.Executor]
The executor to use in relay.build. It is not supported by RelayVM.
Returns
-------
lib : Union[Module, tvm.runtime.vm.Executable]
The built runtime module or vm Executable for the given relay workload.
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
# pylint: enable=import-outside-toplevel
mod, target, params, pass_config, executor = _normalize_params(
mod, target, params, pass_config, executor
)
pass_config.setdefault("relay.backend.use_meta_schedule_dispatch", True)
with Profiler.timeit("PostTuningCompilation"):
with target, _autotvm_silencer(), database:
with transform.PassContext(
opt_level=opt_level,
config=pass_config,
):
if backend == "graph":
return relay.build(mod, target=target, params=params, executor=executor)
elif backend == "vm":
return relay.vm.compile(mod, target=target, params=params)
else:
raise ValueError(f"Unknown backend: {backend}")
def is_meta_schedule_enabled() -> bool:
"""Return whether the meta-schedule is enabled.
Returns
-------
enabled: bool
Whether the meta schedule is enabled
"""
return transform.PassContext.current().config.get(
"relay.backend.use_meta_schedule",
False,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/runner/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.runner package.
Meta Schedule runners that runs an artifact either locally or through the RPC interface
"""
from .config import EvaluatorConfig, RPCConfig
from .local_runner import LocalRunner, LocalRunnerFuture
from .rpc_runner import RPCRunner
from .runner import (
PyRunner,
PyRunnerFuture,
Runner,
RunnerFuture,
RunnerInput,
RunnerResult,
create,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/runner/config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Configurations for measurements in the runner"""
import os
from threading import Thread
from typing import NamedTuple, Optional, Union
from tvm import rpc
class EvaluatorConfig(NamedTuple):
"""Config Details of Evaluator
Parameters
----------
number: int
The number of times to run this function for taking average.
We call these runs as one `repeat` of measurement.
repeat: int
The number of times to repeat the measurement.
In total, the function will be invoked (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int
Minimum repeat time in ms. if the execution latency is too short,
increase the number of runs to the given time (in ms) to reduce the measurement error.
enable_cpu_cache_flush: bool
Whether to flush the cache on CPU.
Note
----
The total number of actual executions is 1+number*repeat because we would warm up 1 time before
actual run. The number of runs would be increased if run time is below min_repeat_ms.
"""
number: int = 3
repeat: int = 1
min_repeat_ms: int = 100
enable_cpu_cache_flush: bool = False
@staticmethod
def _normalized(config: Optional["EvaluatorConfig"]) -> "EvaluatorConfig":
if config is None:
return EvaluatorConfig()
config = EvaluatorConfig(
number=config.number,
repeat=config.repeat,
min_repeat_ms=config.min_repeat_ms,
enable_cpu_cache_flush=config.enable_cpu_cache_flush,
)
return config
class RPCConfig(NamedTuple):
"""RPC configuration
Parameters
----------
tracker_host: str
Host of the RPC Tracker
tracker_port: int
Port of the RPC Tracker
tracker_key: str
Key of the Tracker
session_timeout_sec: float
Timeout of the RPC session
session_priority: int
Priority of the RPC session
"""
tracker_host: Optional[str] = None
tracker_port: Union[None, int, str] = None
tracker_key: Optional[str] = None
session_priority: int = 1
session_timeout_sec: int = 10
def _sanity_check(self) -> None:
err_str = (
"RPCConfig.{0} is not provided. Please provide it explicitly,"
"or set environment variable {1}"
)
if self.tracker_host is None:
raise ValueError(err_str.format("tracker_host", "TVM_TRACKER_HOST"))
if self.tracker_port is None:
raise ValueError(err_str.format("tracker_port", "TVM_TRACKER_PORT"))
if self.tracker_key is None:
raise ValueError(err_str.format("tracker_key", "TVM_TRACKER_KEY"))
@staticmethod
def _normalized(config: Optional["RPCConfig"]) -> "RPCConfig":
if config is None:
config = RPCConfig()
config = RPCConfig(
tracker_host=config.tracker_host or os.environ.get("TVM_TRACKER_HOST", None),
tracker_port=config.tracker_port or os.environ.get("TVM_TRACKER_PORT", None),
tracker_key=config.tracker_key or os.environ.get("TVM_TRACKER_KEY", None),
session_priority=config.session_priority,
session_timeout_sec=config.session_timeout_sec,
)
config._sanity_check() # pylint: disable=protected-access
return config
def connect_tracker(self) -> rpc.TrackerSession:
"""Connect to the tracker
Returns
-------
tracker : TrackerSession
The connected tracker session
"""
tracker: Optional[rpc.TrackerSession] = None
def _connect():
nonlocal tracker
tracker = rpc.connect_tracker(self.tracker_host, self.tracker_port)
t = Thread(target=_connect)
t.start()
t.join(self.session_timeout_sec)
if t.is_alive() or tracker is None:
raise ValueError(
"Unable to connect to the tracker using the following configuration:\n"
f" tracker host: {self.tracker_host}\n"
f" tracker port: {self.tracker_port}\n"
f" timeout (sec): {self.session_timeout_sec}\n"
"Please check the tracker status via the following command:\n"
" python3 -m tvm.exec.query_rpc_tracker "
f"--host {self.tracker_host} --port {self.tracker_port}"
)
return tracker
def connect_server(self) -> rpc.RPCSession:
"""Connect to the server
Returns
-------
session : RPCSession
The connected rpc session
"""
tracker = self.connect_tracker()
session: rpc.RPCSession = tracker.request(
key=self.tracker_key,
priority=self.session_priority,
session_timeout=self.session_timeout_sec,
)
return session
def count_num_servers(self, allow_missing=True) -> int:
"""Count the number of servers available in the tracker
Parameters
----------
allow_missing : bool
Whether to allow no server to be found.
Returns
-------
num_servers : int
The number of servers
"""
tracker = self.connect_tracker()
tracker_summary = tracker.summary()
result: int = 0
for item in tracker_summary["server_info"]:
_, item_key = item["key"].split(":")
if item_key == self.tracker_key:
result += 1
if result == 0 and not allow_missing:
raise ValueError(
"Unable to find servers with the specific key using the following configuration:\n"
f" tracker host: {self.tracker_host}\n"
f" tracker port: {self.tracker_port}\n"
f" tracker key: {self.tracker_key}\n"
f" timeout (sec): {self.session_timeout_sec}\n"
"Please check the tracker status via the following command:\n"
" python3 -m tvm.exec.query_rpc_tracker "
f"--host {self.tracker_host} --port {self.tracker_port}\n"
f'and look for key: "{self.tracker_key}"'
)
return result
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/runner/local_runner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Local Runner"""
from contextlib import contextmanager
from typing import Callable, List, Optional, Union
import subprocess
import tvm
from ...contrib.popen_pool import PopenPoolExecutor
from ...runtime import Device, Module
from ..logging import get_logger
from ..profiler import Profiler
from ..utils import derived_object, get_global_func_with_default_on_worker
from .config import EvaluatorConfig
from .runner import PyRunner, PyRunnerFuture, RunnerFuture, RunnerInput, RunnerResult
from .utils import (
T_ARG_INFO_JSON_OBJ_LIST,
T_ARGUMENT_LIST,
alloc_argument_common,
run_evaluator_common,
)
logger = get_logger(__name__) # pylint: disable=invalid-name
T_ALLOC_ARGUMENT = Callable[ # pylint: disable=invalid-name
[
Device, # The device on the remote
T_ARG_INFO_JSON_OBJ_LIST, # The metadata information of the arguments to be allocated
int, # The number of repeated allocations to be done
],
List[T_ARGUMENT_LIST], # A list of argument lists
]
T_RUN_EVALUATOR = Callable[ # pylint: disable=invalid-name
[
Module, # The Module opened on the remote
Device, # The device on the remote
EvaluatorConfig, # The evaluator configuration
List[T_ARGUMENT_LIST], # A list of argument lists
],
List[float], # A list of running time
]
T_CLEANUP = Callable[ # pylint: disable=invalid-name
[],
None,
]
@derived_object
class LocalRunnerFuture(PyRunnerFuture):
"""Local based runner future
Parameters
----------
res: Optional[List[float]]
The optional result as a list of float.
error_message: Optional[str]
The optional error message.
Note
----
Only one of the parameters should be None upon the creation
of LocalRunnerFuture object
"""
res: Optional[List[float]]
error_message: Optional[str]
def __init__(
self, res: Optional[List[float]] = None, error_message: Optional[str] = None
) -> None:
"""Constructor
Parameters
----------
res: Optional[List[float]]
The result of this LocalRunnerFuture
error_message: Optional[str]
The stringfied error message of any exception during execution
"""
super().__init__()
self.res = res
self.error_message = error_message
# sanity check upon the creation of LocalRunnerFuture object
if (res is None and error_message is None) or (
res is not None and error_message is not None
):
raise AttributeError(
"Only one of the two parameters should be None upon the creation"
"of LocalRunnerFuture object."
)
def done(self) -> bool:
return True
def result(self) -> RunnerResult:
return RunnerResult(self.res, self.error_message)
def _worker_func(
_f_alloc_argument: Optional[str],
_f_run_evaluator: Optional[str],
_f_cleanup: Optional[str],
evaluator_config: EvaluatorConfig,
alloc_repeat: int,
artifact_path: str,
device_type: str,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
) -> List[float]:
f_alloc_argument: T_ALLOC_ARGUMENT = get_global_func_with_default_on_worker(
_f_alloc_argument, default_alloc_argument
)
f_run_evaluator: T_RUN_EVALUATOR = get_global_func_with_default_on_worker(
_f_run_evaluator, default_run_evaluator
)
f_cleanup: T_CLEANUP = get_global_func_with_default_on_worker(_f_cleanup, default_cleanup)
@contextmanager
def resource_handler():
try:
yield
finally:
# Final step. Always clean up
with Profiler.timeit("LocalRunner/cleanup"):
f_cleanup()
with resource_handler():
# Step 1: create the local runtime module
with Profiler.timeit("LocalRunner/load_module"):
rt_mod = tvm.runtime.load_module(artifact_path)
# Step 2: Allocate input arguments
with Profiler.timeit("LocalRunner/alloc_argument"):
device = tvm.runtime.device(dev_type=device_type, dev_id=0)
repeated_args: List[T_ARGUMENT_LIST] = f_alloc_argument(
device,
args_info,
alloc_repeat,
)
# Step 3: Run time_evaluator
with Profiler.timeit("LocalRunner/run_evaluator"):
costs: List[float] = f_run_evaluator(
rt_mod,
device,
evaluator_config,
repeated_args,
)
return costs
@derived_object
class LocalRunner(PyRunner):
"""Local runner
Parameters
----------
evaluator_config: EvaluatorConfig
The evaluator configuration.
cooldown_sec: float
The cooldown in seconds.
alloc_repeat: int
The number of times to repeat the allocation.
f_alloc_argument: Optional[str, Callable]
The function name to allocate the arguments or the function itself.
f_run_evaluator: Optional[str, Callable]
The function name to run the evaluator or the function itself.
f_cleanup: Optional[str, Callable]
The function name to cleanup the session or the function itself.
pool: PopenPoolExecutor
The popen pool executor.
Attributes
----------
T_ALLOC_ARGUMENT : typing._GenericAlias
The signature of the function `f_alloc_argument`, which is:
.. code-block:: python
def default_alloc_argument(
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> List[T_ARGUMENT_LIST]:
...
T_RUN_EVALUATOR : typing._GenericAlias
The signature of the function `f_run_evaluator`, which is:
.. code-block:: python
def default_run_evaluator(
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[T_ARGUMENT_LIST],
) -> List[float]:
...
T_CLEANUP : typing._GenericAlias
The signature of the function `f_cleanup`, which is:
.. code-block:: python
def default_cleanup() -> None:
...
"""
timeout_sec: float
evaluator_config: EvaluatorConfig
cooldown_sec: float
alloc_repeat: int
f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None]
f_run_evaluator: Union[T_RUN_EVALUATOR, str, None]
f_cleanup: Union[T_CLEANUP, str, None]
pool: PopenPoolExecutor
def __init__(
self,
timeout_sec: float = 30,
evaluator_config: Optional[EvaluatorConfig] = None,
cooldown_sec: float = 0.0,
alloc_repeat: int = 1,
f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None] = None,
f_run_evaluator: Union[T_RUN_EVALUATOR, str, None] = None,
f_cleanup: Union[T_CLEANUP, str, None] = None,
initializer: Optional[Callable[[], None]] = None,
) -> None:
"""Constructor
Parameters
----------
timeout_sec: float
The timeout setting.
evaluator_config: EvaluatorConfig
The evaluator configuration.
cooldown_sec: float
The cooldown in seconds.
alloc_repeat: int
The number of times to random fill the allocation.
f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None]
The function name to allocate the arguments or the function itself.
f_run_evaluator: Union[T_RUN_EVALUATOR, str, None]
The function name to run the evaluator or the function itself.
f_cleanup: Union[T_CLEANUP, str, None]
The function name to cleanup the session or the function itself.
initializer: Optional[Callable[[], None]]
The initializer function.
"""
super().__init__()
self.timeout_sec = timeout_sec
self.evaluator_config = EvaluatorConfig._normalized(evaluator_config)
self.cooldown_sec = cooldown_sec
self.alloc_repeat = alloc_repeat
self.f_alloc_argument = f_alloc_argument
self.f_run_evaluator = f_run_evaluator
self.f_cleanup = f_cleanup
logger.info("LocalRunner: max_workers = 1")
self.pool = PopenPoolExecutor(
max_workers=1, # one local worker
timeout=timeout_sec,
initializer=initializer,
stderr=subprocess.DEVNULL, # suppress the stderr output
)
self._sanity_check()
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
results: List[RunnerFuture] = []
for runner_input in runner_inputs:
future = self.pool.submit(
_worker_func,
self.f_alloc_argument,
self.f_run_evaluator,
self.f_cleanup,
self.evaluator_config,
self.alloc_repeat,
str(runner_input.artifact_path),
str(runner_input.device_type),
tuple(arg_info.as_json() for arg_info in runner_input.args_info),
)
try:
result: List[float] = future.result()
error_message: str = None
except TimeoutError:
result = None
error_message = f"LocalRunner: Timeout, killed after {self.timeout_sec} seconds\n"
except Exception as exception: # pylint: disable=broad-except
result = None
error_message = "LocalRunner: An exception occurred\n" + str(exception)
local_future = LocalRunnerFuture(res=result, error_message=error_message)
results.append(local_future) # type: ignore
return results
def _sanity_check(self) -> None:
def _check(
f_alloc_argument,
f_run_evaluator,
f_cleanup,
) -> None:
get_global_func_with_default_on_worker(name=f_alloc_argument, default=None)
get_global_func_with_default_on_worker(name=f_run_evaluator, default=None)
get_global_func_with_default_on_worker(name=f_cleanup, default=None)
value = self.pool.submit(
_check,
self.f_alloc_argument,
self.f_run_evaluator,
self.f_cleanup,
)
value.result()
def default_alloc_argument(
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> List[T_ARGUMENT_LIST]:
"""Default function to allocate the arguments
Parameters
----------
device: Device
The device to allocate the arguments
args_info: T_ARG_INFO_JSON_OBJ_LIST
The arguments info
alloc_repeat: int
The number of times to repeat the allocation
Returns
-------
repeated_args: List[T_ARGUMENT_LIST]
The allocation args
"""
f_random_fill = get_global_func_with_default_on_worker(
name="tvm.contrib.random.random_fill_for_measure", default=None
)
return alloc_argument_common(f_random_fill, device, args_info, alloc_repeat)
def default_run_evaluator(
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[T_ARGUMENT_LIST],
) -> List[float]:
"""Default function to run the evaluator
Parameters
----------
rt_mod: Module
The runtime module
device: Device
The device to run the evaluator
evaluator_config: EvaluatorConfig
The evaluator config
repeated_args: List[T_ARGUMENT_LIST]
The repeated arguments
Returns
-------
costs: List[float]
The evaluator results
"""
return run_evaluator_common(rt_mod, device, evaluator_config, repeated_args)
def default_cleanup() -> None:
"""Default function to clean up the session"""
pass # pylint: disable=unnecessary-pass
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/runner/rpc_runner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC Runner"""
import concurrent.futures
import os.path as osp
from contextlib import contextmanager
from typing import Callable, List, Optional, Union
from tvm.contrib.popen_pool import PopenPoolExecutor
from tvm.rpc import RPCSession
from tvm.runtime import Device, Module
from ..logging import get_logger
from ..profiler import Profiler
from ..utils import (
cpu_count,
derived_object,
get_global_func_on_rpc_session,
get_global_func_with_default_on_worker,
)
from .config import EvaluatorConfig, RPCConfig
from .runner import PyRunner, PyRunnerFuture, RunnerFuture, RunnerInput, RunnerResult
from .utils import (
T_ARG_INFO_JSON_OBJ_LIST,
T_ARGUMENT_LIST,
alloc_argument_common,
run_evaluator_common,
)
logger = get_logger(__name__) # pylint: disable=invalid-name
T_CREATE_SESSION = Callable[ # pylint: disable=invalid-name
[RPCConfig], # The RPC configuration
RPCSession, # The RPC Session
]
T_UPLOAD_MODULE = Callable[ # pylint: disable=invalid-name
[
RPCSession, # The RPC Session
str, # local path to the artifact
str, # remote path to the artifact
],
Module, # the Module opened on the remote
]
T_ALLOC_ARGUMENT = Callable[ # pylint: disable=invalid-name
[
RPCSession, # The RPC Session
Device, # The device on the remote
T_ARG_INFO_JSON_OBJ_LIST, # The metadata information of the arguments to be allocated
int, # The number of repeated allocations to be done
],
List[T_ARGUMENT_LIST], # A list of argument lists
]
T_RUN_EVALUATOR = Callable[ # pylint: disable=invalid-name
[
RPCSession, # The RPC Session
Module, # The Module opened on the remote
Device, # The device on the remote
EvaluatorConfig, # The evaluator configuration
List[T_ARGUMENT_LIST], # A list of argument lists
],
List[float], # A list of running time
]
T_CLEANUP = Callable[ # pylint: disable=invalid-name
[
Optional[RPCSession], # The RPC Session to be cleaned up
Optional[str], # remote path to the artifact
],
None,
]
@derived_object
class RPCRunnerFuture(PyRunnerFuture):
"""RPC based runner future
Parameters
----------
future: concurrent.futures.Future
The concurrent function to check when the function is done and to return the result.
timeout_sec: float
The timeout in seconds.
"""
future: concurrent.futures.Future
timeout_sec: float
def __init__(self, future: concurrent.futures.Future, timeout_sec: float) -> None:
"""Constructor
Parameters
----------
future: concurrent.futures.Future
The concurrent function to check when the function is done and to return the result.
timeout_sec: float
The timeout in seconds.
"""
super().__init__()
self.future = future
self.timeout_sec = timeout_sec
def done(self) -> bool:
return self.future.done()
def result(self) -> RunnerResult:
try:
run_secs: List[float] = self.future.result()
except TimeoutError:
return RunnerResult(
None,
error_msg=f"RPCRunner: Timeout, killed after {self.timeout_sec} seconds",
)
except Exception as exception: # pylint: disable=broad-except
return RunnerResult(
None,
error_msg="RPCRunner: An exception occurred\n" + str(exception),
)
return RunnerResult(run_secs, None)
@derived_object
class RPCRunner(PyRunner):
"""RPC based runner
Parameters
----------
rpc_config: RPCConfig
The rpc configuration.
evaluator_config: EvaluatorConfig
The evaluator configuration.
cooldown_sec: float
The cooldown in seconds. TODO(@junrushao1994,@zxybazh): This is not used yet.
alloc_repeat: int
The number of times to repeat the allocation.
f_create_session: Optional[str, Callable]
The function name to create the session or the function itself.
f_upload_module: Optional[str, Callable]
The function name to upload the module or the function itself.
f_alloc_argument: Optional[str, Callable]
The function name to allocate the arguments or the function itself.
f_run_evaluator: Optional[str, Callable]
The function name to run the evaluator or the function itself.
f_cleanup: Optional[str, Callable]
The function name to cleanup the session or the function itself.
pool: PopenPoolExecutor
The popen pool executor.
Attributes
----------
T_CREATE_SESSION : typing._GenericAlias
The signature of the function `f_create_session`, which is:
.. code-block:: python
def default_create_session(rpc_config: RPCConfig) -> RPCSession:
...
T_UPLOAD_MODULE : typing._GenericAlias
The signature of the function `f_upload_module`, which is:
.. code-block:: python
def default_upload_module(
session: RPCSession,
local_path: str,
remote_path: str,
) -> Module:
...
T_ALLOC_ARGUMENT : typing._GenericAlias
The signature of the function `f_alloc_argument`, which is:
.. code-block:: python
def default_alloc_argument(
session: RPCSession,
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> List[T_ARGUMENT_LIST]:
...
T_RUN_EVALUATOR : typing._GenericAlias
The signature of the function `f_run_evaluator`, which is:
.. code-block:: python
def default_run_evaluator(
session: RPCSession,
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[T_ARGUMENT_LIST],
) -> List[float]:
...
T_CLEANUP : typing._GenericAlias
The signature of the function `f_cleanup`, which is:
.. code-block:: python
def default_cleanup(
session: Optional[RPCSession],
remote_path: Optional[str],
) -> None:
...
"""
rpc_config: RPCConfig
evaluator_config: EvaluatorConfig
cooldown_sec: float
alloc_repeat: int
f_create_session: Union[T_CREATE_SESSION, str, None]
f_upload_module: Union[T_UPLOAD_MODULE, str, None]
f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None]
f_run_evaluator: Union[T_RUN_EVALUATOR, str, None]
f_cleanup: Union[T_CLEANUP, str, None]
pool: PopenPoolExecutor
def __init__(
self,
rpc_config: Optional[RPCConfig] = None,
evaluator_config: Optional[EvaluatorConfig] = None,
cooldown_sec: float = 0.0,
alloc_repeat: int = 1,
f_create_session: Union[T_CREATE_SESSION, str, None] = None,
f_upload_module: Union[T_UPLOAD_MODULE, str, None] = None,
f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None] = None,
f_run_evaluator: Union[T_RUN_EVALUATOR, str, None] = None,
f_cleanup: Union[T_CLEANUP, str, None] = None,
max_workers: Optional[int] = None,
initializer: Optional[Callable[[], None]] = None,
) -> None:
"""Constructor
Parameters
----------
rpc_config: RPCConfig
The rpc configuration.
evaluator_config: EvaluatorConfig
The evaluator configuration.
cooldown_sec: float
The cooldown in seconds.
alloc_repeat: int
The number of times to random fill the allocation.
f_create_session: Union[T_CREATE_SESSION, str, None]
The function name to create the session or the function itself.
f_upload_module: Union[T_UPLOAD_MODULE, str, None]
The function name to upload the module or the function itself.
f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None]
The function name to allocate the arguments or the function itself.
f_run_evaluator: Union[T_RUN_EVALUATOR, str, None]
The function name to run the evaluator or the function itself.
f_cleanup: Union[T_CLEANUP, str, None]
The function name to cleanup the session or the function itself.
max_workers: Optional[int] = None
The maximum number of connections. Defaults to number of logical CPU cores.
initializer: Optional[Callable[[], None]]
The initializer function.
"""
super().__init__()
self.rpc_config = RPCConfig._normalized(rpc_config)
self.evaluator_config = EvaluatorConfig._normalized(evaluator_config)
self.cooldown_sec = cooldown_sec
self.alloc_repeat = alloc_repeat
self.f_create_session = f_create_session
self.f_upload_module = f_upload_module
self.f_alloc_argument = f_alloc_argument
self.f_run_evaluator = f_run_evaluator
self.f_cleanup = f_cleanup
if max_workers is None:
max_workers = cpu_count(logical=True)
logger.info("RPCRunner: max_workers = %d", max_workers)
self.pool = PopenPoolExecutor(
max_workers=max_workers,
timeout=rpc_config.session_timeout_sec,
initializer=initializer,
)
self._sanity_check()
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
results: List[RunnerFuture] = []
for runner_input in runner_inputs:
future = RPCRunnerFuture(
future=self.pool.submit(
_worker_func,
self.f_create_session,
self.f_upload_module,
self.f_alloc_argument,
self.f_run_evaluator,
self.f_cleanup,
self.rpc_config,
self.evaluator_config,
self.alloc_repeat,
str(runner_input.artifact_path),
str(runner_input.device_type),
tuple(arg_info.as_json() for arg_info in runner_input.args_info),
),
timeout_sec=self.rpc_config.session_timeout_sec,
)
results.append(future) # type: ignore
return results
def _sanity_check(self) -> None:
def _check(
f_create_session,
f_upload_module,
f_alloc_argument,
f_run_evaluator,
f_cleanup,
) -> None:
get_global_func_with_default_on_worker(name=f_create_session, default=None)
get_global_func_with_default_on_worker(name=f_upload_module, default=None)
get_global_func_with_default_on_worker(name=f_alloc_argument, default=None)
get_global_func_with_default_on_worker(name=f_run_evaluator, default=None)
get_global_func_with_default_on_worker(name=f_cleanup, default=None)
value = self.pool.submit(
_check,
self.f_create_session,
self.f_upload_module,
self.f_alloc_argument,
self.f_run_evaluator,
self.f_cleanup,
)
value.result()
def _worker_func(
_f_create_session: Union[T_CREATE_SESSION, str, None],
_f_upload_module: Union[T_UPLOAD_MODULE, str, None],
_f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None],
_f_run_evaluator: Union[T_RUN_EVALUATOR, str, None],
_f_cleanup: Union[T_CLEANUP, str, None],
rpc_config: RPCConfig,
evaluator_config: EvaluatorConfig,
alloc_repeat: int,
artifact_path: str,
device_type: str,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
) -> List[float]:
# Step 0. Get the registered functions
f_create_session: T_CREATE_SESSION = get_global_func_with_default_on_worker(
_f_create_session, default_create_session
)
f_upload_module: T_UPLOAD_MODULE = get_global_func_with_default_on_worker(
_f_upload_module, default_upload_module
)
f_alloc_argument: T_ALLOC_ARGUMENT = get_global_func_with_default_on_worker(
_f_alloc_argument, default_alloc_argument
)
f_run_evaluator: T_RUN_EVALUATOR = get_global_func_with_default_on_worker(
_f_run_evaluator, default_run_evaluator
)
f_cleanup: T_CLEANUP = get_global_func_with_default_on_worker(_f_cleanup, default_cleanup)
# Managed resources
session: Optional[RPCSession] = None
remote_path: Optional[str] = None
@contextmanager
def resource_handler():
try:
yield
finally:
# Final step. Always clean up
with Profiler.timeit("RPCRunner/cleanup"):
f_cleanup(session, remote_path)
with resource_handler():
# Step 1. Create session
with Profiler.timeit("RPCRunner/create_session"):
session = f_create_session(rpc_config)
device = session.device(dev_type=device_type, dev_id=0)
# Step 2. Upload the module
with Profiler.timeit("RPCRunner/upload_module"):
_, remote_path = osp.split(artifact_path)
local_path: str = artifact_path
rt_mod: Module = f_upload_module(session, local_path, remote_path)
# Step 3: Allocate input arguments
with Profiler.timeit("RPCRunner/alloc_argument"):
repeated_args: List[T_ARGUMENT_LIST] = f_alloc_argument(
session,
device,
args_info,
alloc_repeat,
)
# Step 4: Run time_evaluator
with Profiler.timeit("LocalRunner/run_evaluator"):
costs: List[float] = f_run_evaluator(
session,
rt_mod,
device,
evaluator_config,
repeated_args,
)
return costs
def default_create_session(rpc_config: RPCConfig) -> RPCSession:
"""Default function to create the session
Parameters
----------
rpc_config : RPCConfig
The configuration of the RPC session
Returns
-------
session : RPCSession
The created rpc session
"""
return rpc_config.connect_server()
def default_upload_module(
session: RPCSession,
local_path: str,
remote_path: str,
) -> Module:
"""Default function to upload the module
Parameters
----------
session: RPCSession
The session to upload the module
local_path: str
The local path of the module
remote_path: str
The remote path to place the module
Returns
-------
rt_mod : Module
The runtime module
"""
session.upload(local_path, remote_path)
rt_mod: Module = session.load_module(remote_path)
return rt_mod
def default_alloc_argument(
session: RPCSession,
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> List[T_ARGUMENT_LIST]:
"""Default function to allocate the arguments
Parameters
----------
session: RPCSession
The session to allocate the arguments
device: Device
The device to allocate the arguments
args_info: T_ARG_INFO_JSON_OBJ_LIST
The arguments info
alloc_repeat: int
The number of times to repeat the allocation
Returns
-------
repeated_args: List[Args]
The allocation args
"""
f_random_fill = get_global_func_on_rpc_session(
session,
"tvm.contrib.random.random_fill_for_measure",
"Please make sure 'USE_RANDOM' is turned ON in the config.cmake on the RPC server.",
)
return alloc_argument_common(f_random_fill, device, args_info, alloc_repeat)
def default_run_evaluator(
session: RPCSession, # pylint: disable=unused-argument
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[T_ARGUMENT_LIST],
) -> List[float]:
"""Default function to run the evaluator
Parameters
----------
session: RPCSession
The session to run the evaluator
rt_mod: Module
The runtime module
device: Device
The device to run the evaluator
evaluator_config: EvaluatorConfig
The evaluator config
repeated_args: List[T_ARGUMENT_LIST]
The repeated arguments
Returns
-------
costs: List[float]
The evaluator results
"""
return run_evaluator_common(rt_mod, device, evaluator_config, repeated_args)
def default_cleanup(
session: Optional[RPCSession],
remote_path: Optional[str],
) -> None:
"""Default function to clean up the session
Parameters
----------
session: RPCSession
The session to clean up
remote_path: str
The remote path to clean up
"""
if session is not None and remote_path is not None:
session.remove(remote_path)
session.remove(remote_path + ".so")
session.remove("")
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/runner/runner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Runners"""
from typing import Callable, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from .. import _ffi_api
from ..arg_info import ArgInfo
@register_object("meta_schedule.RunnerInput")
class RunnerInput(Object):
"""The runner's input
Parameters
----------
artifact_path : str
The path to the built artifact.
device_type : str
The device type.
args_info : List[ArgInfo]
The argument information.
"""
artifact_path: str
device_type: str
args_info: List[ArgInfo]
def __init__(
self,
artifact_path: str,
device_type: str,
args_info: List[ArgInfo],
) -> None:
"""Constructor
Parameters
----------
artifact_path : str
The path to the built artifact.
device_type : str
The device type.
args_info : List[ArgInfo]
The argument information.
"""
self.__init_handle_by_constructor__(
_ffi_api.RunnerInput, # type: ignore # pylint: disable=no-member
artifact_path,
device_type,
args_info,
)
@register_object("meta_schedule.RunnerResult")
class RunnerResult(Object):
"""The runner's result
Parameters
----------
run_secs : Optional[List[float]]
The run time in seconds.
error_msg : Optional[str]
The error message, if any.
"""
run_secs: Optional[List[float]]
error_msg: Optional[str]
def __init__(
self,
run_secs: Optional[List[float]],
error_msg: Optional[str],
) -> None:
"""Constructor
Parameters
----------
run_secs : Optional[List[float]]
The run time in seconds.
error_msg : Optional[str]
The error message, if any.
"""
self.__init_handle_by_constructor__(
_ffi_api.RunnerResult, # type: ignore # pylint: disable=no-member
run_secs,
error_msg,
)
@register_object("meta_schedule.RunnerFuture")
class RunnerFuture(Object):
"""
A class to fetch asynchronous runner's output.
This is NOT the user facing class for function overloading inheritance.
Can be used for general return type of runner.
See also: PyRunnerFuture
"""
def __init__(self, f_done: Callable, f_result: Callable = None) -> None:
"""Constructor"""
self.__init_handle_by_constructor__(
_ffi_api.RunnerFuture, # type: ignore # pylint: disable=no-member
f_done,
f_result,
)
def done(self) -> bool:
"""Check whether the runner has finished."""
return _ffi_api.RunnerFutureDone(self) # type: ignore # pylint: disable=no-member
def result(self) -> RunnerResult:
"""Fetch the runner's output if it is ready."""
return _ffi_api.RunnerFutureResult(self) # type: ignore # pylint: disable=no-member
class PyRunnerFuture:
"""
A class to fetch asynchronous runner's output with customizable function on the python side.
This is the user facing class for function overloading inheritance.
Can NOT be used for general return type of runner.
Note: @derived_object is required for proper usage of any inherited class.
Example:
@derived_object
def LocalRunnerFuture(PyRunnerFuture):
...
"""
_tvm_metadata = {
"cls": RunnerFuture,
"methods": ["done", "result"],
}
def done(self) -> bool:
"""Check whether the runner has finished."""
raise NotImplementedError
def result(self) -> RunnerResult:
"""Fetch the runner's output if it is ready."""
raise NotImplementedError
@register_object("meta_schedule.Runner")
class Runner(Object):
"""The abstract runner interface"""
RunnerType = Union["Runner", Literal["local", "rpc"]]
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
"""Run the built artifact and get runner futures.
Parameters
----------
runner_inputs : List[RunnerInput]
The inputs to the runner.
Returns
-------
runner_futures: List[RunnerFuture]
The runner futures.
"""
return _ffi_api.RunnerRun(self, runner_inputs) # type: ignore # pylint: disable=no-member
@staticmethod
def create( # pylint: disable=keyword-arg-before-vararg
kind: Literal["local", "rpc"] = "local",
*args,
**kwargs,
) -> "Runner":
"""Create a Runner."""
from . import LocalRunner, RPCRunner # pylint: disable=import-outside-toplevel
if kind == "local":
return LocalRunner(*args, **kwargs) # type: ignore
elif kind == "rpc":
return RPCRunner(*args, **kwargs) # type: ignore
raise ValueError(f"Unknown Runner: {kind}")
create = Runner.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyRunner")
class _PyRunner(Runner):
"""
A TVM object runner to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyRunner
"""
def __init__(self, f_run: Callable = None) -> None:
"""Constructor"""
self.__init_handle_by_constructor__(
_ffi_api.RunnerPyRunner, # type: ignore # pylint: disable=no-member
f_run,
)
class PyRunner:
"""
An abstract runner with customized run method on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyRunner,
"methods": ["run"],
}
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
"""Run the built artifact and get runner futures.
Parameters
----------
runner_inputs : List[RunnerInput]
The inputs to the runner.
Returns
-------
runner_futures: List[RunnerFuture]
The runner futures.
"""
raise NotImplementedError
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/runner/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Runner utility functions"""
import itertools
from typing import Any, Callable, Dict, List
from ...runtime import Device, Module, ndarray
from .config import EvaluatorConfig
T_ARG_INFO_JSON_OBJ = List[Any] # pylint: disable=invalid-name
T_ARG_INFO_JSON_OBJ_LIST = List[T_ARG_INFO_JSON_OBJ] # pylint: disable=invalid-name
T_ARGUMENT = Any # pylint: disable=invalid-name
T_ARGUMENT_LIST = List[T_ARGUMENT] # pylint: disable=invalid-name
def alloc_argument_common(
f_random_fill: Callable,
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> List[T_ARGUMENT_LIST]:
"""Common function to allocate the arguments
Parameters
----------
f_random_fill: Callable
The callable function for random fill
device: Device
The device to allocate the arguments
args_info: T_ARG_INFO_JSON_OBJ_LIST
The arguments info
alloc_repeat: int
The number of times to repeat the allocation
Returns
-------
repeated_args: List[T_ARGUMENT_LIST]
The allocation args
"""
def alloc_tensor(_, dtype, shape) -> ndarray.NDArray:
arg = ndarray.empty(shape=shape, dtype=dtype, device=device)
f_random_fill(arg)
return arg
def alloc_fail(*arg_info) -> None:
raise NotImplementedError(arg_info)
dispatcher: Dict[Any, Callable] = {
"TENSOR": alloc_tensor,
None: alloc_fail,
}
repeated_args: List[T_ARGUMENT_LIST] = []
for _ in range(alloc_repeat):
args: T_ARGUMENT_LIST = []
arg_info: T_ARG_INFO_JSON_OBJ
for arg_info in args_info:
arg_type = arg_info[0]
arg: Any = dispatcher.get(arg_type, None)(*arg_info)
args.append(arg)
repeated_args.append(args)
return repeated_args
def run_evaluator_common(
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[T_ARGUMENT_LIST],
) -> List[float]:
"""Common function to run the evaluator
Parameters
----------
rt_mod: Module
The runtime module
device: Device
The device to run the evaluator
evaluator_config: EvaluatorConfig
The evaluator config
repeated_args: List[T_ARGUMENT_LIST]
The repeated arguments
Returns
-------
costs: List[float]
The evaluator results
"""
evaluator = rt_mod.time_evaluator(
func_name=rt_mod.entry_name,
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs: List[List[float]] = []
for args in repeated_args:
device.sync()
profile_result = evaluator(*args)
repeated_costs.append(profile_result.results)
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
return costs
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Per-block schedule rules in MetaSchedule"""
from . import cpu, cuda, generic, x86
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule/cpu/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Per-block schedule rules in MetaSchedule for target key 'cpu'"""
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule/cuda/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Per-block schedule rules in MetaSchedule for target key 'cuda'"""
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule/generic/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Per-block schedule rules in MetaSchedule for generic cases"""
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule/x86/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Per-block schedule rules in MetaSchedule for target key 'x86'"""
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule_rule/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.schedule_rule package.
Meta Schedule schedule rules are used for modification of
blocks in a schedule. See also PostOrderApply.
"""
from .add_rfactor import AddRFactor
from .apply_custom_rule import ApplyCustomRule
from .auto_bind import AutoBind
from .auto_inline import AutoInline, InlineConstantScalars
from .cross_thread_reduction import CrossThreadReduction
from .multi_level_tiling import (
MultiLevelTiling,
MultiLevelTilingTensorCore,
MultiLevelTilingWideVector,
MultiLevelTilingWithIntrin,
ReuseType,
)
from .parallel_vectorize_unroll import ParallelizeVectorizeUnroll
from .random_compute_location import RandomComputeLocation
from .schedule_rule import PyScheduleRule, ScheduleRule
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule_rule/add_rfactor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add-rfactor Rule that add-rfactor to some blocks if needed"""
from typing import Optional
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.AddRFactor")
class AddRFactor(ScheduleRule):
"""Rules for add-rfactor to some blocks if needed.
Parameters
----------
max_jobs_per_core: int
The maximum number of jobs to be launched per CPU core. It sets the uplimit of CPU
parallelism, i.e. `num_cores * max_jobs_per_core`.
Use -1 to disable parallelism.
max_innermost_factor: Optional[int] = None
The maximum size of the innermost factor. None means no limit.
"""
def __init__(
self,
max_jobs_per_core: int = 16,
max_innermost_factor: Optional[int] = None,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleAddRFactor, # type: ignore # pylint: disable=no-member
max_jobs_per_core,
max_innermost_factor,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule_rule/apply_custom_rule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Create a rule that applies customized rules registered using block attribute `schedule_rule`.
The rule will be dispatched according to target keys."""
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.ApplyCustomRule")
class ApplyCustomRule(ScheduleRule):
"""A rule that applies customized rules registered using block attribute `schedule_rule`.
The rule will be dispatched according to target keys."""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleApplyCustomRule, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule_rule/auto_bind.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Auto-bind Rule that binds blocks to threads if needed"""
from typing import List, Optional
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.AutoBind")
class AutoBind(ScheduleRule):
"""Auto bind loops around the block to BlockIdx and ThreadIdx
Parameters
----------
max_threadblocks: int
The maximum number of threadblock on GPU.
thread_extents: Optional[List[int]]
Candidates of thread axis extent.
"""
def __init__(
self,
max_threadblocks: int = 256,
thread_extents: Optional[List[int]] = None,
) -> None:
if thread_extents is None:
thread_extents = [32, 64, 128, 256, 512, 1024]
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleAutoBind, # type: ignore # pylint: disable=no-member
max_threadblocks,
thread_extents,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule_rule/auto_inline.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Auto-Inline. Rule that inlines spatial blocks if it satisfies some conditions"""
from typing import List, Optional
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.AutoInline")
class AutoInline(ScheduleRule):
"""Rule that inlines spatial blocks if it satisfies some conditions
Parameters
----------
into_producer : bool
If allows to inline a block into its producer
into_consumer : bool
If allows to inline a block into its consumer
inline_const_tensor : bool
Always inline constant tensors
disallow_if_then_else : bool
Always disallow if-then-else-like constructs
require_injective : bool
Always require the read-to-write mapping to be ordered
require_ordered : bool
Always require the read-to-write mapping to be injective
disallow_op : Optional[List[str]]
The operators that are disallowed in auto inline
"""
def __init__(
self,
into_producer: bool,
into_consumer: bool,
inline_const_tensor: bool,
disallow_if_then_else: bool,
require_injective: bool,
require_ordered: bool,
disallow_op: Optional[List[str]] = None,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleAutoInline, # type: ignore # pylint: disable=no-member
into_producer,
into_consumer,
inline_const_tensor,
disallow_if_then_else,
require_injective,
require_ordered,
disallow_op,
)
@register_object("meta_schedule.InlineConstantScalars")
class InlineConstantScalars(ScheduleRule):
"""Inline blocks that produce a constant scalar.
Such blocks get in the way of ReverseComputeInline during AutoInline, since they are also
counted as a producer block unless they are inlined first. So it is recommended to run
InlineConstantScalars before AutoInline.
"""
def __init__(
self,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleInlineConstantScalars, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule_rule/cross_thread_reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Rules which apply cross-thread reduction to some reduction blocks correspondingly when needed"""
from typing import List
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.CrossThreadReduction")
class CrossThreadReduction(ScheduleRule):
"""A schedule rule which applies cross-thread reduction to some reduction blocks
correspondingly when needed
Parameters
----------
thread_extents: List[int]
Candidates of thread axis extent (values are required to be positive).
"""
def __init__(self, thread_extents: List[int]) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleCrossThreadReduction, # type: ignore # pylint: disable=no-member
thread_extents,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule_rule/multi_level_tiling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Multi-level tiling with reuse."""
from typing import Any, Dict, List, Mapping, NamedTuple, Optional
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
class ReuseType(NamedTuple):
"""Reuse type."""
req: str
levels: List[int]
scope: str
def as_dict(self) -> Dict[str, Any]:
"""Return the dict representation of the reuse type."""
return {
"req": self.req,
"levels": self.levels,
"scope": self.scope,
}
@register_object("meta_schedule.MultiLevelTiling")
class MultiLevelTiling(ScheduleRule):
"""Multi-level tiling with reuse.
Parameters
----------
structure : str
The tiling structure. Recommended:
- 'SSRSRS' on CPU
- 'SSSRRSRS' on GPU
tile_bind : Optional[List[str]]
For each level of tiles, which thread axis it is bound to. Recommended:
- None on CPU
- [blockIdx.x, vthread.x, threadIdx.x] on GPU
max_innermost_factor : Optional[int]
The maximum size of the innermost factor. None means no limit
vector_load_lens : Optional[List[int]]
The length of vector lane in vectorized cooperative fetching.
None means disable vectorization
reuse_read : Optional[ReuseType]
Data reuse configuration for reading. None means no reuse.
reuse_write : Optional[ReuseType]
Data reuse configuration for writing. None means no reuse.
"""
def __init__(
self,
structure: str,
tile_binds: Optional[List[str]] = None,
max_innermost_factor: Optional[int] = None,
vector_load_lens: Optional[List[int]] = None,
reuse_read: Optional[ReuseType] = None,
reuse_write: Optional[ReuseType] = None,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleMultiLevelTiling, # type: ignore # pylint: disable=no-member
structure,
tile_binds,
max_innermost_factor,
vector_load_lens,
reuse_read.as_dict() if reuse_read is not None else None,
reuse_write.as_dict() if reuse_write is not None else None,
)
@register_object("meta_schedule.MultiLevelTilingWithIntrin")
class MultiLevelTilingWithIntrin(ScheduleRule):
"""Extension of MultiLevelTiling for auto-tensorizing with a single intrinsic.
Parameters
----------
intrin_name : str
The name of a tensor intrinsic, must be registerd via TensorIntrin.register(...) beforehand
structure : str
The tiling structure. Recommended:
- 'SSRSRS' on CPU
- 'SSSRRSRS' on GPU
tile_bind : Optional[List[str]]
For each level of tiles, which thread axis it is bound to. Recommended:
- None on CPU
- [blockIdx.x, vthread.x, threadIdx.x] on GPU
max_innermost_factor : Optional[int]
The maximum size of the innermost factor. None means no limit
vector_load_lens : Optional[List[int]]
The length of vector lane in vectorized cooperative fetching.
None means disable vectorization
reuse_read : Optional[ReuseType]
Data reuse configuration for reading. None means no reuse.
reuse_write : Optional[ReuseType]
Data reuse configuration for writing. None means no reuse.
"""
def __init__(
self,
intrin_name: str,
structure: str,
tile_binds: Optional[List[str]] = None,
max_innermost_factor: Optional[int] = None,
vector_load_lens: Optional[List[int]] = None,
reuse_read: Optional[ReuseType] = None,
reuse_write: Optional[ReuseType] = None,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleMultiLevelTilingWithIntrin, # type: ignore # pylint: disable=no-member
intrin_name,
structure,
tile_binds,
max_innermost_factor,
vector_load_lens,
reuse_read.as_dict() if reuse_read is not None else None,
reuse_write.as_dict() if reuse_write is not None else None,
)
@register_object("meta_schedule.MultiLevelTilingTensorCore")
class MultiLevelTilingTensorCore(ScheduleRule):
"""Extension of MultiLevelTiling for auto-tensorizing with multiple groups of candidate tensor
core intrinsics.
Parameters
----------
intrin_groups : List[Mapping[str, str]]
A list of groups of tensor core intrinsics. The map should contains key "init", "load_a",
"load_b", "compute", "store", which represent the tensor intrin for initialization,
loading operand A, loading operand B, tensor core computation, storing the result.
The value of the map should be names of tensor intrinsics, must be registerd via
TensorIntrin.register(...) beforehand
structure : str
The tiling structure. Recommended:
- 'SSSRRSRS' on GPU
tile_bind : Optional[List[str]]
For each level of tiles, which thread axis it is bound to. Recommended:
- [blockIdx.y, vthread.x, threadIdx.y] on GPU
max_innermost_factor : Optional[int]
The maximum size of the innermost factor. None means no limit
vector_load_lens : Optional[List[int]]
The length of vector lane in vectorized cooperative fetching.
None means disable vectorization
reuse_read : Optional[ReuseType]
Data reuse configuration for reading. None means no reuse.
reuse_write : Optional[ReuseType]
Data reuse configuration for writing. None means no reuse.
use_software_pipeline : bool
Whether to use the software pipeline.
"""
def __init__(
self,
intrin_groups: List[Mapping[str, str]],
structure: str,
tile_binds: Optional[List[str]] = None,
max_innermost_factor: Optional[int] = None,
vector_load_lens: Optional[List[int]] = None,
reuse_read: Optional[ReuseType] = None,
reuse_write: Optional[ReuseType] = None,
use_software_pipeline: bool = False,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleMultiLevelTilingTensorCore, # type: ignore # pylint: disable=no-member
intrin_groups,
structure,
tile_binds,
max_innermost_factor,
vector_load_lens,
reuse_read.as_dict() if reuse_read is not None else None,
reuse_write.as_dict() if reuse_write is not None else None,
use_software_pipeline,
)
@register_object("meta_schedule.MultiLevelTilingWideVector")
class MultiLevelTilingWideVector(ScheduleRule):
"""Extension of MultiLevelTiling for backends with wide vectors. The loop over the innermost
spatial axis of the output buffer is always vectorized with the maximum vector length.
Parameters
----------
structure : str
The tiling structure. 'SSRSRS' is recommended.
vector_length_in_bits: int
The length of a vector register in bits.
max_innermost_factor : Optional[int]
The maximum size of the innermost factor. None means no limit
reuse_read : Optional[ReuseType]
Data reuse configuration for reading. None means no reuse.
reuse_write : Optional[ReuseType]
Data reuse configuration for writing. None means no reuse.
"""
def __init__(
self,
structure: str,
vector_length_in_bits: int,
max_innermost_factor: Optional[int] = None,
reuse_read: Optional[ReuseType] = None,
reuse_write: Optional[ReuseType] = None,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleMultiLevelTilingWideVector, # type: ignore # pylint: disable=no-member
structure,
vector_length_in_bits,
max_innermost_factor,
reuse_read.as_dict() if reuse_read is not None else None,
reuse_write.as_dict() if reuse_write is not None else None,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule_rule/parallel_vectorize_unroll.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Rule that mark parallelize, vectorize and unroll to the root block. The mark will be applied to
each block in a follow-up post processor"""
from typing import List, Optional
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.ParallelizeVectorizeUnroll")
class ParallelizeVectorizeUnroll(ScheduleRule):
"""Rule that mark parallelize, vectorize and unroll to the root block. The mark will be applied
to each block in a follow-up post processor
Parameters
----------
max_jobs_per_core: int
The maximum number of jobs to be launched per CPU core. It sets the upper limit of CPU
parallelism, i.e. `num_cores * max_jobs_per_core`.
Use -1 to disable parallelism.
max_vectorize_extent: int
The maximum extent to be vectorized. It sets the upper limit of the hardware target
vectorization.
Use -1 to disable vectorization.
unroll_max_steps: Optional[List[int]]
The options of the maximum number of unroll steps to be done.
Use None to disable unroll
unroll_explicit: bool
Whether to explicitly unroll the loop, or just add an "unroll" pragma
"""
def __init__(
self,
max_jobs_per_core: int = 16,
max_vectorize_extent: int = 16,
unroll_max_steps: Optional[List[int]] = None,
unroll_explicit: bool = True,
) -> None:
if unroll_max_steps is None:
unroll_max_steps = []
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleParallelizeVectorizeUnroll, # type: ignore # pylint: disable=no-member
max_jobs_per_core,
max_vectorize_extent,
unroll_max_steps,
unroll_explicit,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule_rule/random_compute_location.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Rule that randomly select a compute-at location for a free block"""
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.RandomComputeLocation")
class RandomComputeLocation(ScheduleRule):
"""A rule that randomly select a compute-at location for a free block"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleRandomComputeLocation, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/schedule_rule/schedule_rule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Meta Schedule schedule rules are used for modification of
blocks in a schedule. See also PostOrderApply.
"""
from typing import TYPE_CHECKING, Callable, List
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.tir.schedule import BlockRV, Schedule
from .. import _ffi_api
from ..utils import _get_default_str
if TYPE_CHECKING:
from ..tune_context import TuneContext
@register_object("meta_schedule.ScheduleRule")
class ScheduleRule(Object):
"""Rules to modify a block in a schedule."""
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the schedule rule with a tune context.
Parameters
----------
context : TuneContext
The tuning context for initializing the schedule rule.
"""
_ffi_api.ScheduleRuleInitializeWithTuneContext( # type: ignore # pylint: disable=no-member
self, context
)
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
"""Apply a schedule rule to the specific block in the given schedule.
Parameters
----------
sch : Schedule
The schedule to be modified.
block : BlockRV
The specific block to apply the schedule rule.
Returns
-------
design_spaces : List[Schedule]
The list of schedules generated by applying the schedule rule.
"""
return _ffi_api.ScheduleRuleApply( # type: ignore # pylint: disable=no-member
self, sch, block
)
def clone(self) -> "ScheduleRule":
"""Deep clone the schedule rule.
Returns
-------
cloned_rule : ScheduleRule
The cloned schedule rule.
"""
return _ffi_api.ScheduleRuleClone(self) # type: ignore # pylint: disable=no-member
@staticmethod
def create(kind: Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"]) -> List["ScheduleRule"]:
"""Create a list of schedule rules for the given kind.
Parameters
----------
kind : Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"]
The kind of the schedule rules.
Returns
-------
rules : List[ScheduleRule]
The list of schedule rules.
"""
funcs = {
# pylint: disable=no-member
"llvm": _ffi_api.ScheduleRuleDefaultLLVM, # type: ignore
"cuda": _ffi_api.ScheduleRuleDefaultCUDA, # type: ignore
"cuda-tensorcore": _ffi_api.ScheduleRuleDefaultCUDATensorCore, # type: ignore
"hexagon": _ffi_api.ScheduleRuleDefaultHexagon, # type: ignore
# pylint: enable=no-member
}
for k, v in funcs.items():
if k == kind:
return v()
raise ValueError(f"Unsupported kind {kind} for schedule rule creation.")
create = ScheduleRule.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyScheduleRule")
class _PyScheduleRule(ScheduleRule):
"""
A TVM object schedule rule to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyScheduleRule
"""
def __init__(
self,
f_initialize_with_tune_context: Callable = None,
f_apply: Callable = None,
f_clone: Callable = None,
f_as_string: Callable = None,
):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRulePyScheduleRule, # type: ignore # pylint: disable=no-member
f_initialize_with_tune_context,
f_apply,
f_clone,
f_as_string,
)
class PyScheduleRule:
"""
An abstract schedule rule with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyScheduleRule,
"methods": ["_initialize_with_tune_context", "apply", "clone", "__str__"],
}
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the schedule rule with a tune context.
Parameters
----------
context : TuneContext
The tuning context for initializing the schedule rule.
"""
raise NotImplementedError
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
"""Apply a schedule rule to the specific block in the given schedule.
Parameters
----------
sch : Schedule
The schedule to be modified.
block : BlockRV
The specific block to apply the schedule rule.
Returns
-------
design_spaces : List[Schedule]
The list of schedules generated by applying the schedule rule.
"""
raise NotImplementedError
def clone(self) -> ScheduleRule:
"""Deep clone the schedule rule.
Returns
-------
cloned_rule : ScheduleRule
The cloned schedule rule.
"""
raise NotImplementedError
def __str__(self) -> str:
"""Get the schedule rule as string with name.
Return
------
result : str
Get the schedule rule as string with name.
"""
return _get_default_str(self)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/search_strategy/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.search_strategy package.
Meta Schedule search strategy utilizes the design spaces given
to generate measure candidates.
"""
from .evolutionary_search import EvolutionarySearch
from .replay_func import ReplayFunc
from .replay_trace import ReplayTrace
from .search_strategy import MeasureCandidate, PySearchStrategy, SearchStrategy, create
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/search_strategy/evolutionary_search.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Evolutionary Search Strategy"""
from tvm._ffi import register_object
from .. import _ffi_api
from .search_strategy import SearchStrategy
@register_object("meta_schedule.EvolutionarySearch")
class EvolutionarySearch(SearchStrategy):
"""
Replay Trace Search Strategy is a search strategy that always replays the trace by removing its
decisions so that the decisions would be randomly re-generated.
Parameters
----------
population_size : int
The initial population of traces from measured samples and randomly generated samples.
init_measured_ratio : int
The ratio of measured samples in the initial population.
init_min_unmeasured : int
The minimal size of unmeasured population in the initial sampling.
max_fail_count : int
The maximum number of failure during initial sampling.
genetic_num_iters : int
The number of iterations for genetic algorithm.
genetic_mutate_prob : float
The probability of mutation.
genetic_max_fail_count : int
The maximum number to retry mutation.
eps_greedy : float
The ratio of greedy selected samples in the final picks.
"""
population_size: int
init_measured_ratio: int
init_min_unmeasured: int
genetic_num_iters: int
genetic_mutate_prob: float
genetic_max_fail_count: int
eps_greedy: float
def __init__(
self,
*,
population_size: int = 512,
init_measured_ratio: float = 0.2,
init_min_unmeasured: int = 50,
max_fail_count: int = 5,
genetic_num_iters: int = 4,
genetic_mutate_prob: float = 0.85,
genetic_max_fail_count: int = 10,
eps_greedy: float = 0.05,
) -> None:
"""Constructor"""
self.__init_handle_by_constructor__(
_ffi_api.SearchStrategyEvolutionarySearch, # type: ignore # pylint: disable=no-member
population_size,
init_measured_ratio,
init_min_unmeasured,
max_fail_count,
genetic_num_iters,
genetic_mutate_prob,
genetic_max_fail_count,
eps_greedy,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/search_strategy/replay_func.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Replay Trace Search Strategy"""
from tvm._ffi import register_object
from .. import _ffi_api
from .search_strategy import SearchStrategy
@register_object("meta_schedule.ReplayFunc")
class ReplayFunc(SearchStrategy):
"""
Replay Func Search Strategy is a search strategy that generates measure candidates by
calling a design space generator and transform the design space.
Parameters
----------
num_trials_per_iter : int
Number of trials per iteration.
max_trials_per_task : int
Total number of trials for one task
"""
def __init__(self):
"""Constructor"""
self.__init_handle_by_constructor__(
_ffi_api.SearchStrategyReplayFunc, # type: ignore # pylint: disable=no-member
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/search_strategy/replay_trace.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Replay Trace Search Strategy"""
from tvm._ffi import register_object
from .. import _ffi_api
from .search_strategy import SearchStrategy
@register_object("meta_schedule.ReplayTrace")
class ReplayTrace(SearchStrategy):
"""
Replay Trace Search Strategy is a search strategy that always replays the trace by removing its
decisions so that the decisions would be randomly re-generated.
Parameters
----------
max_fail_count : int
Max number of failures during trace replaying.
"""
max_fail_count: int
def __init__(self, max_fail_count: int = 100):
"""Constructor"""
self.__init_handle_by_constructor__(
_ffi_api.SearchStrategyReplayTrace, # type: ignore # pylint: disable=no-member
max_fail_count,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/search_strategy/search_strategy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Meta Schedule search strategy that generates the measure
candidates for measurement.
"""
from typing import TYPE_CHECKING, Callable, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.tir.schedule import Schedule
from .. import _ffi_api
from ..arg_info import ArgInfo
from ..runner import RunnerResult
if TYPE_CHECKING:
from ..cost_model import CostModel
from ..database import Database
from ..tune_context import TuneContext
@register_object("meta_schedule.MeasureCandidate")
class MeasureCandidate(Object):
"""Measure candidate class.
Parameters
----------
sch : Schedule
The schedule to be measured.
args_info : List[ArgInfo]
The argument information.
"""
sch: Schedule
args_info: List[ArgInfo]
def __init__(
self,
sch: Schedule,
args_info: List[ArgInfo],
) -> None:
"""Constructor.
Parameters
----------
sch : Schedule
The schedule to be measured.
args_info : List[ArgInfo]
The argument information.
"""
self.__init_handle_by_constructor__(
_ffi_api.MeasureCandidate, # type: ignore # pylint: disable=no-member
sch,
args_info,
)
@register_object("meta_schedule.SearchStrategy")
class SearchStrategy(Object):
"""Search strategy is the class that generates the measure candidates."""
SearchStrategyType = Union[
"SearchStrategy",
Literal[
"replay-func",
"replay-trace",
"evolutionary",
],
]
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the search strategy with tuning context.
Parameters
----------
context : TuneContext
The tuning context for initialization.
"""
_ffi_api.SearchStrategyInitializeWithTuneContext( # type: ignore # pylint: disable=no-member
self, context
)
def pre_tuning(
self,
max_trials: int,
num_trials_per_iter: int,
design_spaces: List[Schedule],
database: Optional["Database"] = None,
cost_model: Optional["CostModel"] = None,
) -> None:
"""Pre-tuning for the search strategy.
Parameters
----------
max_trials : int
The maximum number of trials.
num_trials_per_iter : int
The number of trials per iteration.
design_spaces : List[Schedule]
The design spaces used during tuning process.
database : Optional[Database] = None
The database used during tuning process.
cost_model : Optional[CostModel] = None
The cost model used during tuning process.
"""
_ffi_api.SearchStrategyPreTuning( # type: ignore # pylint: disable=no-member
self,
max_trials,
num_trials_per_iter,
design_spaces,
database,
cost_model,
)
def post_tuning(self) -> None:
"""Post-tuning for the search strategy."""
_ffi_api.SearchStrategyPostTuning(self) # type: ignore # pylint: disable=no-member
def generate_measure_candidates(self) -> Optional[List[MeasureCandidate]]:
"""Generate measure candidates from design spaces for measurement.
Returns
-------
measure_candidates : Optional[List[IRModule]]
The measure candidates generated, None if finished.
"""
return _ffi_api.SearchStrategyGenerateMeasureCandidates(self) # type: ignore # pylint: disable=no-member
def notify_runner_results(
self,
measure_candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the search strategy with profiling results.
Parameters
----------
measure_candidates : List[MeasureCandidate]
The measure candidates for update.
results : List[RunnerResult]
The profiling results from the runner.
"""
_ffi_api.SearchStrategyNotifyRunnerResults( # type: ignore # pylint: disable=no-member
self,
measure_candidates,
results,
)
def clone(self) -> "SearchStrategy":
"""Clone the search strategy.
Returns
-------
cloned : SearchStrategy
The cloned search strategy.
"""
return _ffi_api.SearchStrategyClone(self) # type: ignore # pylint: disable=no-member
@staticmethod
def create( # pylint: disable=keyword-arg-before-vararg
kind: Literal[
"evolutionary",
"replay-trace",
"replay-func",
] = "evolutionary",
*args,
**kwargs,
) -> "SearchStrategy":
"""Create a search strategy."""
from . import ( # pylint: disable=import-outside-toplevel
EvolutionarySearch,
ReplayFunc,
ReplayTrace,
)
if kind == "evolutionary":
return EvolutionarySearch(*args, **kwargs)
if kind == "replay-trace":
return ReplayTrace(*args, **kwargs)
if kind == "replay-func":
return ReplayFunc(*args, **kwargs) # type: ignore
raise ValueError(f"Unknown SearchStrategy: {kind}")
create = SearchStrategy.create # pylint: disable=invalid-name
@register_object("meta_schedule.PySearchStrategy")
class _PySearchStrategy(SearchStrategy):
"""
A TVM object search strategy to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PySearchStrategy
"""
def __init__(
self,
f_initialize_with_tune_context: Callable = None,
f_pre_tuning: Callable = None,
f_post_tuning: Callable = None,
f_generate_measure_candidates: Callable = None,
f_notify_runner_results: Callable = None,
f_clone: Callable = None,
):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.SearchStrategyPySearchStrategy, # type: ignore # pylint: disable=no-member
f_initialize_with_tune_context,
f_pre_tuning,
f_post_tuning,
f_generate_measure_candidates,
f_notify_runner_results,
f_clone,
)
class PySearchStrategy:
"""
An abstract search strategy with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PySearchStrategy,
"methods": [
"_initialize_with_tune_context",
"pre_tuning",
"post_tuning",
"generate_measure_candidates",
"notify_runner_results",
"clone",
],
}
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the search strategy with tuning context.
Parameters
----------
context : TuneContext
The tuning context for initialization.
"""
raise NotImplementedError
def pre_tuning(
self,
max_trials: int,
num_trials_per_iter: int,
design_spaces: List[Schedule],
database: Optional["Database"] = None,
cost_model: Optional["CostModel"] = None,
) -> None:
"""Pre-tuning for the search strategy.
Parameters
----------
design_spaces : List[Schedule]
The design spaces for pre-tuning.
"""
raise NotImplementedError
def post_tuning(self) -> None:
"""Post-tuning for the search strategy."""
raise NotImplementedError
def generate_measure_candidates(self) -> Optional[List[MeasureCandidate]]:
"""Generate measure candidates from design spaces for measurement.
Returns
-------
measure_candidates : Optional[List[IRModule]]
The measure candidates generated, None if finished.
"""
raise NotImplementedError
def notify_runner_results(
self,
measure_candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the search strategy with profiling results.
Parameters
----------
measure_candidates : List[MeasureCandidate]
The measure candidates for update.
results : List[RunnerResult]
The profiling results from the runner.
"""
raise NotImplementedError
def clone(self) -> SearchStrategy:
"""Clone the search strategy.
Returns
-------
strategy : SearchStrategy
The cloned search strategy.
"""
raise NotImplementedError
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/space_generator/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.space_generator package.
Meta Schedule design space generators that generates design
space for generation of measure candidates.
"""
from .post_order_apply import PostOrderApply
from .schedule_fn import ScheduleFn
from .space_generator import PySpaceGenerator, ScheduleFnType, SpaceGenerator, create
from .space_generator_union import SpaceGeneratorUnion
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/space_generator/post_order_apply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Post Order Apply Space Generator."""
from tvm._ffi import register_object
from .. import _ffi_api
from .space_generator import (
MutatorProbType,
PostprocType,
ScheduleRuleType,
SpaceGenerator,
_normalize_rules,
)
@register_object("meta_schedule.PostOrderApply")
class PostOrderApply(SpaceGenerator):
"""
PostOrderApply is the design space generator that generates design spaces by applying schedule
rules to blocks in post-DFS order.
Parameters
----------
f_block_filter : Optional[function]
An optional callback function that is used to filter which blocks have schedules generated
for them. The function should take in a block and return True if a schedule should
be generated or False if that block should be skipped. If no function is provided
all blocks will have schedules generated.
"""
def __init__(
self,
f_block_filter=None,
sch_rules: ScheduleRuleType = "from-target",
postprocs: PostprocType = "from-target",
mutator_probs: MutatorProbType = "from-target",
):
"""Constructor"""
sch_rules, postprocs, mutator_probs = _normalize_rules(sch_rules, postprocs, mutator_probs)
self.__init_handle_by_constructor__(
_ffi_api.SpaceGeneratorPostOrderApply, # type: ignore # pylint: disable=no-member
f_block_filter,
sch_rules,
postprocs,
mutator_probs,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/space_generator/schedule_fn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Union of meta Schedule design space generators."""
from tvm._ffi import register_object
from .. import _ffi_api
from .space_generator import (
MutatorProbType,
PostprocType,
ScheduleRuleType,
SpaceGenerator,
_normalize_rules,
)
@register_object("meta_schedule.ScheduleFn")
class ScheduleFn(SpaceGenerator):
"""Create a design space generator with customized schedule function.
The schedule function can have the following signatures:
- 1) [Schedule] -> None
- 2) [Schedule] -> Schedule
- 3) [Schedule] -> List[Schedule]
"""
def __init__(
self,
sch_fn: SpaceGenerator.ScheduleFnType,
sch_rules: ScheduleRuleType = "from-target",
postprocs: PostprocType = "from-target",
mutator_probs: MutatorProbType = "from-target",
):
"""Constructor.
Parameters
----------
sch_fn : SpaceGenerator.ScheduleFnType
The schedule function, which can have the following signatures:
- 1) [Schedule] -> None
- 2) [Schedule] -> Schedule
- 3) [Schedule] -> List[Schedule]
"""
sch_rules, postprocs, mutator_probs = _normalize_rules(sch_rules, postprocs, mutator_probs)
self.__init_handle_by_constructor__(
_ffi_api.SpaceGeneratorScheduleFn, # type: ignore # pylint: disable=no-member
sch_fn,
sch_rules,
postprocs,
mutator_probs,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/space_generator/space_generator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Meta Schedule design space generators that generates design
space for generation of measure candidates.
"""
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.ir import IRModule
from tvm.runtime import Object
from tvm.tir.schedule import Schedule
from .. import _ffi_api
if TYPE_CHECKING:
from ..mutator import Mutator
from ..postproc import Postproc
from ..schedule_rule import ScheduleRule
from ..tune_context import TuneContext
@register_object("meta_schedule.SpaceGenerator")
class SpaceGenerator(Object):
"""The abstract design space generator interface."""
ScheduleFnType = Union[
Callable[[Schedule], None], # No output
Callable[[Schedule], Schedule], # Single output
Callable[[Schedule], List[Schedule]], # Multiple outputs
]
SpaceGeneratorType = Union[
"SpaceGenerator",
ScheduleFnType,
Literal["post-order-apply", "union"],
]
sch_rules: Optional[List["ScheduleRule"]]
postprocs: Optional[List["Postproc"]]
mutator_probs: Optional[Dict["Mutator", float]]
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the design space generator with tuning context.
Parameters
----------
context : TuneContext
The tuning context for initializing the design space generator.
"""
_ffi_api.SpaceGeneratorInitializeWithTuneContext( # type: ignore # pylint: disable=no-member
self, context
)
def generate_design_space(self, mod: IRModule) -> List[Schedule]:
"""Generate design spaces given a module.
Parameters
----------
mod : IRModule
The module used for design space generation.
Returns
-------
design_spaces : List[Schedule]
The generated design spaces, i.e., schedules.
"""
return _ffi_api.SpaceGeneratorGenerateDesignSpace(self, mod) # type: ignore # pylint: disable=no-member
def clone(self) -> "SpaceGenerator":
"""Clone the design space generator.
Returns
-------
cloned_sg : SpaceGenerator
The cloned design space generator.
"""
return _ffi_api.SpaceGeneratorClone(self) # type: ignore # pylint: disable=no-member
@staticmethod
def create( # pylint: disable=keyword-arg-before-vararg
kind: Union[
Literal["post-order-apply", "union"],
ScheduleFnType,
] = "post-order-apply",
*args,
**kwargs,
) -> "SpaceGenerator":
"""Create a design space generator."""
from . import ( # pylint: disable=import-outside-toplevel
PostOrderApply,
ScheduleFn,
SpaceGeneratorUnion,
)
if callable(kind):
def create_schedule_fn(
func,
sch_rules=[],
postprocs=[],
mutator_probs={},
): # pylint: disable=dangerous-default-value
return ScheduleFn(func, sch_rules, postprocs, mutator_probs)
return create_schedule_fn(kind, *args, **kwargs) # type: ignore
if kind == "post-order-apply":
return PostOrderApply(*args, **kwargs)
if kind == "union":
return SpaceGeneratorUnion(*args, **kwargs)
raise ValueError(f"Unknown SpaceGenerator: {kind}")
ScheduleFnType = SpaceGenerator.ScheduleFnType
ScheduleRuleType = Union[
List["ScheduleRule"],
Literal["llvm", "cuda", "cuda-tensorcore", "hexagon", "from-target"],
]
PostprocType = Union[
List["Postproc"],
Literal["llvm", "cuda", "cuda-tensorcore", "hexagon", "from-target"],
]
MutatorProbType = Union[
Dict["Mutator", float],
Literal["llvm", "cuda", "cuda-tensorcore", "hexagon", "from-target"],
]
create = SpaceGenerator.create # pylint: disable=invalid-name
def _normalize_rules(
sch_rules: ScheduleRuleType,
postprocs: PostprocType,
mutator_probs: MutatorProbType,
) -> Tuple[
Optional[List["ScheduleRule"]],
Optional[List["Postproc"]],
Optional[Dict["Mutator", float]],
]:
# pylint: disable=import-outside-toplevel
from ..mutator import Mutator
from ..postproc import Postproc
from ..schedule_rule import ScheduleRule
# pylint: enable=import-outside-toplevel
assert sch_rules is not None
assert postprocs is not None
assert mutator_probs is not None
if isinstance(sch_rules, str):
if sch_rules == "from-target":
sch_rules = None
else:
sch_rules = ScheduleRule.create(sch_rules)
if isinstance(postprocs, str):
if postprocs == "from-target":
postprocs = None
else:
postprocs = Postproc.create(postprocs)
if isinstance(mutator_probs, str):
if mutator_probs == "from-target":
mutator_probs = None
else:
mutator_probs = Mutator.create(mutator_probs)
return sch_rules, postprocs, mutator_probs # type: ignore
@register_object("meta_schedule.PySpaceGenerator")
class _PySpaceGenerator(SpaceGenerator):
"""
A TVM object space generator to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PySpaceGenerator
"""
def __init__(
self,
sch_rules: ScheduleRuleType = "from-target",
postprocs: PostprocType = "from-target",
mutator_probs: MutatorProbType = "from-target",
f_initialize_with_tune_context: Optional[Callable] = None,
f_generate_design_space: Optional[Callable] = None,
f_clone: Optional[Callable] = None,
):
"""Constructor."""
sch_rules, postprocs, mutator_probs = _normalize_rules(sch_rules, postprocs, mutator_probs)
self.__init_handle_by_constructor__(
_ffi_api.SpaceGeneratorPySpaceGenerator, # type: ignore # pylint: disable=no-member
sch_rules,
postprocs,
mutator_probs,
f_initialize_with_tune_context,
f_generate_design_space,
f_clone,
)
class PySpaceGenerator:
"""
An abstract space generator with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PySpaceGenerator,
"fields": ["sch_rules", "postprocs", "mutator_probs"],
"methods": ["_initialize_with_tune_context", "generate_design_space", "clone"],
}
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the design space generator with tuning context.
Parameters
----------
context : TuneContext
The tuning context for initializing the design space generator.
"""
raise NotImplementedError
def generate_design_space(self, mod: IRModule) -> List[Schedule]:
"""Generate design spaces given a module.
Parameters
----------
mod : IRModule
The module used for design space generation.
Returns
-------
design_spaces : List[Schedule]
The generated design spaces, i.e., schedules.
"""
raise NotImplementedError
def clone(self) -> SpaceGenerator:
"""Clone the design space generator.
Returns
-------
cloned_sg : SpaceGenerator
The cloned design space generator.
"""
raise NotImplementedError
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/space_generator/space_generator_union.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Union of meta Schedule design space generators."""
from typing import List
from tvm._ffi import register_object
from .. import _ffi_api
from .space_generator import (
MutatorProbType,
PostprocType,
ScheduleRuleType,
SpaceGenerator,
_normalize_rules,
)
@register_object("meta_schedule.SpaceGeneratorUnion")
class SpaceGeneratorUnion(SpaceGenerator):
"""Union of design space generators."""
def __init__(
self,
space_generators: List[SpaceGenerator],
sch_rules: ScheduleRuleType = "from-target",
postprocs: PostprocType = "from-target",
mutator_probs: MutatorProbType = "from-target",
):
"""Constructor.
Parameters
----------
space_generators : List[SpaceGenerator]
The list of design space generators to be unioned.
"""
sch_rules, postprocs, mutator_probs = _normalize_rules(sch_rules, postprocs, mutator_probs)
self.__init_handle_by_constructor__(
_ffi_api.SpaceGeneratorSpaceGeneratorUnion, # type: ignore # pylint: disable=no-member
space_generators,
sch_rules,
postprocs,
mutator_probs,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/task_scheduler/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.task_scheduler package.
Meta Schedule task scheduler that manage the task scheduling
for measure candidates generation and measurement, then save
records to the database.
"""
from .gradient_based import GradientBased
from .round_robin import RoundRobin
from .task_scheduler import PyTaskScheduler, TaskScheduler, create
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/task_scheduler/gradient_based.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Gradient Based Task Scheduler"""
from tvm._ffi import register_object
from .. import _ffi_api
from ..logging import get_logger, get_logging_func
from .task_scheduler import TaskScheduler
logger = get_logger(__name__) # pylint: disable=invalid-name
@register_object("meta_schedule.GradientBased")
class GradientBased(TaskScheduler):
"""Gradient Based Task Scheduler"""
def __init__(
self,
*,
alpha: float = 0.2,
window_size: int = 3,
seed: int = -1,
) -> None:
"""Constructor.
Parameters
----------
alpha : float = 0.2
The parameter alpha in gradient computation.
window_size : int = 3
The parameter to control backward window size in gradient computation.
seed : int = -1
The random seed.
"""
self.__init_handle_by_constructor__(
_ffi_api.TaskSchedulerGradientBased, # type: ignore # pylint: disable=no-member
get_logging_func(logger),
alpha,
window_size,
seed,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/task_scheduler/round_robin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Round Robin Task Scheduler"""
from tvm._ffi import register_object
from .. import _ffi_api
from ..logging import get_logger, get_logging_func
from .task_scheduler import TaskScheduler
logger = get_logger(__name__) # pylint: disable=invalid-name
@register_object("meta_schedule.RoundRobin")
class RoundRobin(TaskScheduler):
"""Round Robin Task Scheduler"""
def __init__(self) -> None:
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.TaskSchedulerRoundRobin, # type: ignore # pylint: disable=no-member
get_logging_func(logger),
)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/task_scheduler/task_scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Auto-tuning Task Scheduler"""
from typing import Callable, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from .. import _ffi_api
from ..builder import Builder, BuilderResult
from ..cost_model import CostModel
from ..database import Database
from ..logging import get_logger, get_logging_func
from ..measure_callback import MeasureCallback
from ..runner import Runner, RunnerResult
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
logger = get_logger(__name__) # pylint: disable=invalid-name
@register_object("meta_schedule.TaskRecord")
class TaskRecord(Object):
"""The running record of a task."""
ctx: TuneContext
task_weight: float
flop: float
is_terminated: bool
build_error_count: int
run_error_count: int
measure_candidates: List[MeasureCandidate]
builder_results: List[BuilderResult]
runner_results: List[RunnerResult]
@register_object("meta_schedule.TaskScheduler")
class TaskScheduler(Object):
"""The abstract task scheduler interface."""
tasks_: List[TaskRecord]
measure_callbacks_: List[MeasureCallback]
database_: Optional[Database]
cost_model_: Optional[CostModel]
remaining_tasks_: int
TaskSchedulerType = Union["TaskScheduler", Literal["gradient", "round-robin"]]
def next_task_id(self) -> int:
"""Fetch the next task id.
Returns
-------
next_task_id : int
The next task id.
"""
return _ffi_api.TaskSchedulerNextTaskId(self) # type: ignore # pylint: disable=no-member
def join_running_task(self, task_id: int) -> List[RunnerResult]:
"""Wait until the task is finished.
Parameters
----------
task_id : int
The task id to be joined.
Returns
-------
results : List[RunnerResult]
The list of results.
"""
return _ffi_api.TaskSchedulerJoinRunningTask(self, task_id) # type: ignore # pylint: disable=no-member
def tune(
self,
tasks: List[TuneContext],
task_weights: List[float],
max_trials_global: int,
max_trials_per_task: int,
num_trials_per_iter: int,
builder: Builder,
runner: Runner,
measure_callbacks: List[MeasureCallback],
database: Optional[Database],
cost_model: Optional[CostModel],
) -> None:
"""Auto-tuning.
Parameters
----------
tasks : List[TuneContext]
The list of tuning contexts as tasks.
task_weights : List[float]
The list of task weights.
max_trials_global : int
The maximum number of trials globally.
max_trials_per_task : int
The maximum number of trials per task.
num_trials_per_iter : int
The number of trials per iteration.
builder : Builder
The builder.
runner : Runner
The runner.
measure_callbacks : List[MeasureCallback]
The list of measure callbacks.
database : Optional[Database]
The database.
cost_model : Optional[CostModel]
The cost model.
"""
task_weights = [float(w) for w in task_weights]
_ffi_api.TaskSchedulerTune( # type: ignore # pylint: disable=no-member
self,
tasks,
task_weights,
max_trials_global,
max_trials_per_task,
num_trials_per_iter,
builder,
runner,
measure_callbacks,
database,
cost_model,
)
def terminate_task(self, task_id: int) -> None:
"""Terminate the task
Parameters
----------
task_id : int
The task id to be terminated.
"""
_ffi_api.TaskSchedulerTerminateTask(self, task_id) # type: ignore # pylint: disable=no-member
def touch_task(self, task_id: int) -> None:
"""Touch the task and update its status
Parameters
----------
task_id : int
The task id to be checked.
"""
_ffi_api.TaskSchedulerTouchTask(self, task_id) # type: ignore # pylint: disable=no-member
def print_tuning_statistics(self) -> None:
"""Print out a human-readable format of the tuning statistics."""
return _ffi_api.TaskSchedulerPrintTuningStatistics(self) # type: ignore # pylint: disable=no-member
@staticmethod
def create( # pylint: disable=keyword-arg-before-vararg
kind: Literal["round-robin", "gradient"] = "gradient",
*args,
**kwargs,
) -> "TaskScheduler":
"""Create a task scheduler."""
from . import ( # pylint: disable=import-outside-toplevel
GradientBased,
RoundRobin,
)
if kind == "round-robin":
return RoundRobin(*args, **kwargs) # type: ignore
if kind == "gradient":
return GradientBased(*args, **kwargs)
raise ValueError(f"Unknown TaskScheduler name: {kind}")
create = TaskScheduler.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyTaskScheduler")
class _PyTaskScheduler(TaskScheduler):
"""
A TVM object task scheduler to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyTaskScheduler
"""
def __init__(
self,
f_next_task_id: Callable,
f_join_running_task: Callable,
f_tune: Callable,
):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.TaskSchedulerPyTaskScheduler, # type: ignore # pylint: disable=no-member
get_logging_func(logger),
f_next_task_id,
f_join_running_task,
f_tune,
)
class PyTaskScheduler:
"""
An abstract task scheduler with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyTaskScheduler,
"fields": [],
"methods": ["next_task_id", "join_running_task", "tune"],
}
def __init__(self):
...
def tune(
self,
tasks: List[TuneContext],
task_weights: List[float],
max_trials_global: int,
max_trials_per_task: int,
builder: Builder,
runner: Runner,
measure_callbacks: List[MeasureCallback],
database: Optional[Database],
cost_model: Optional[CostModel],
) -> None:
"""Auto-tuning."""
# Using self._outer to replace the self pointer
_ffi_api.TaskSchedulerTune( # type: ignore # pylint: disable=no-member
self._outer(), # type: ignore # pylint: disable=no-member
tasks,
task_weights,
max_trials_global,
max_trials_per_task,
builder,
runner,
measure_callbacks,
database,
cost_model,
)
def next_task_id(self) -> int:
"""Fetch the next task id.
Returns
-------
next_task_id : int
The next task id.
"""
raise NotImplementedError
def join_running_task(self, task_id: int) -> List[RunnerResult]:
"""Wait until the task is finished.
Parameters
----------
task_id : int
The task id to be joined.
"""
# Using self._outer to replace the self pointer
return _ffi_api.TaskSchedulerJoinRunningTask(self._outer(), task_id) # type: ignore # pylint: disable=no-member
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing utilities in meta schedule"""
# NOTE: Do not import any module here by default
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/custom_builder_runner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Customized builder and runner methods"""
# pylint: disable=import-outside-toplevel
from typing import TYPE_CHECKING, Dict, List, Optional, Union, Callable
if TYPE_CHECKING:
import numpy as np # type: ignore
from tvm.ir import IRModule
from tvm.meta_schedule.runner import EvaluatorConfig, RPCConfig
from tvm.runtime import Device, Module, NDArray
from tvm.target import Target
from tvm.runtime.vm import Executable
def build_relay(
mod: "IRModule",
target: "Target",
params: Dict[str, "NDArray"],
) -> "Module":
"""Build a Relay IRModule
Parameters
----------
mod : IRModule
The Relay IRModule to build.
target : Target
The target to build the module for.
params : Dict[str, NDArray]
The parameter dict to build the module with.
Returns
-------
mod : runtime.Module
The built module.
"""
from tvm.relay.build_module import _build_module_no_factory as relay_build
from tvm.runtime import Module
result = relay_build(mod, target=target, target_host=None, params=params)
assert isinstance(result, Module)
return result
def build_relay_with_tensorrt(
mod: "IRModule",
target: "Target",
params: Dict[str, "NDArray"],
) -> "Module":
"""Build a Relay IRModule with TensorRT BYOC
Parameters
----------
mod : IRModule
The Relay IRModule to build.
target : Target
The target to build the module for.
params : Dict[str, NDArray]
The parameter dict to build the module with.
Returns
-------
mod : runtime.Module
The built module.
"""
from tvm.ir.transform import PassContext
from tvm.relay.build_module import _build_module_no_factory as relay_build
from tvm.relay.op.contrib import tensorrt
from tvm.runtime import Module
mod = tensorrt.partition_for_tensorrt(mod, params)
with PassContext(opt_level=3):
result = relay_build(mod, target=target, target_host=None, params=params)
assert isinstance(result, Module)
return result
def run_with_graph_executor(
rt_mod: "Module",
device: "Device",
evaluator_config: "EvaluatorConfig",
repeated_args: List["NDArray"],
) -> List[float]:
"""Run a Relay module with GraphExecutor
Parameters
----------
rt_mod : Module
The Relay module to run.
device : Device
The device to run the module on.
evaluator_config : EvaluatorConfig
The evaluator configuration to run the module with.
repeated_args : List[NDArray]
The list of repeated arguments to run the module with.
Returns
-------
results : List[float]
The list of results.
"""
import itertools
from tvm.contrib.graph_executor import GraphModule
graph_mod = GraphModule(rt_mod["default"](device))
evaluator = graph_mod.module.time_evaluator(
func_name="run",
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs = []
for args in repeated_args:
profile_result = evaluator(*args)
repeated_costs.append(profile_result.results)
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
return costs
def run_module_via_rpc(
rpc_config: "RPCConfig",
lib: Union["Module", "Executable"],
dev_type: str,
args: Union[Dict[int, "np.ndarray"], Dict[str, "np.ndarray"]],
continuation: Callable,
backend: Optional[str] = "graph",
):
"""Execute a tvm.runtime.Module on RPC remote"""
# pylint: disable=import-outside-toplevel
import os
import tempfile
from tvm.contrib.tar import tar
from tvm.runtime import ndarray
# pylint: enable=import-outside-toplevel
with tempfile.TemporaryDirectory() as tmp_dir:
filename = os.path.join(tmp_dir, "tvm_tmp_mod." + tar.output_format)
if backend == "vm":
code, lib = lib.save()
lib.export_library(filename, tar)
session = rpc_config.connect_server()
session.upload(filename)
_, filename = os.path.split(filename)
rt_mod = session.load_module(filename)
if backend == "vm":
rt_mod = session.get_function("runtime.Load_Executable")(code, rt_mod)
dev = session.device(dev_type=dev_type, dev_id=0)
nd_args = {k: ndarray.array(v, dev) for k, v in args.items()}
return continuation(rt_mod, dev, nd_args)
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/dataset_collect_models.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import os
from typing import List, Tuple
from tqdm import tqdm # type: ignore
from tvm.meta_schedule.testing.relay_workload import get_network
# pylint: disable=too-many-branches
def _build_dataset() -> List[Tuple[str, List[int]]]:
network_keys = []
for name in [
"resnet_18",
"resnet_50",
"mobilenet_v2",
"mobilenet_v3",
"wide_resnet_50",
"resnext_50",
"densenet_121",
"vgg_16",
]:
for batch_size in [1, 4, 8]:
for image_size in [224, 240, 256]:
network_keys.append((name, [batch_size, 3, image_size, image_size]))
# inception-v3
for name in ["inception_v3"]:
for batch_size in [1, 2, 4]:
for image_size in [299]:
network_keys.append((name, [batch_size, 3, image_size, image_size]))
# resnet3d
for name in ["resnet3d_18"]:
for batch_size in [1, 2, 4]:
for image_size in [112, 128, 144]:
network_keys.append((name, [batch_size, 3, image_size, image_size, 16]))
# bert
for name in ["bert_tiny", "bert_base", "bert_medium", "bert_large"]:
for batch_size in [1, 2, 4]:
for seq_length in [64, 128, 256]:
network_keys.append((name, [batch_size, seq_length]))
# dcgan
for name in ["dcgan"]:
for batch_size in [1, 4, 8]:
for image_size in [64]:
network_keys.append((name, [batch_size, 3, image_size, image_size]))
return network_keys
def main():
model_cache_dir = args.model_cache_dir
try:
os.makedirs(model_cache_dir, exist_ok=True)
except OSError:
print(f"Directory {model_cache_dir} cannot be created successfully.")
keys = _build_dataset()
for name, input_shape in tqdm(keys):
get_network(name=name, input_shape=input_shape, cache_dir=model_cache_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser() # pylint: disable=invalid-name
parser.add_argument(
"--model_cache_dir",
type=str,
help="Please provide the full path to the model cache dir.",
)
args = parser.parse_args() # pylint: disable=invalid-name
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/dataset_extract_tasks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import glob
import json
import os
import tvm
from tqdm import tqdm # type: ignore
from tvm import meta_schedule as ms
from tvm.ir import save_json
from tvm.meta_schedule.testing.relay_workload import _load_cache
from tvm.runtime import load_param_dict
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_cache_dir", type=str, help="Please provide the full path to the model cache dir."
)
parser.add_argument(
"--task_cache_dir", type=str, help="Please provide the full path to save extracted tasks."
)
parser.add_argument(
"--target", type=str, default="cuda", help="Please specify the target hardware for tuning."
)
return parser.parse_args()
# pylint: disable=too-many-locals
def extract_and_save_tasks(cache_file):
"""Extract tuning tasks and cache the nonspatial ones in the given directory.
Parameters
----------
cache_file : str
The filename of the cached model.
Returns
-------
None
"""
mod, params_bytearray, _ = _load_cache(args.model_cache_dir, cache_file)
params = load_param_dict(params_bytearray)
try:
extracted_tasks = ms.relay_integration.extract_tasks(mod, target=args.target, params=params)
except tvm.error.TVMError as error:
print(str(error))
return
task_cache_path = os.path.join(
args.task_cache_dir, cache_file.split(".")[0] + "_extracted_tasks.json"
)
is_spatial = tvm.get_global_func("tir.schedule.IsSpatialPrimFunc")
with open(task_cache_path, "w", encoding="utf8") as file:
for i, task in enumerate(extracted_tasks):
subgraph = task.dispatched[0]
prim_func = subgraph[subgraph.get_global_vars()[0]]
if not is_spatial(prim_func):
subgraph_str = save_json(subgraph)
json_obj = [task.task_name, json.loads(subgraph_str)]
json_str = json.dumps(json_obj)
assert "\n" not in json_str, "Failed to generate single line string."
if i == len(extracted_tasks) - 1:
file.write(json_str)
else:
file.write(json_str + "\n")
args = _parse_args() # pylint: disable=invalid-name
def main():
if not os.path.isdir(args.model_cache_dir):
raise Exception("Please provide a correct model cache dir.")
try:
os.makedirs(args.task_cache_dir, exist_ok=True)
except OSError:
print(f"Directory {args.task_cache_dir} cannot be created successfully.")
paths = glob.glob(os.path.join(args.model_cache_dir, "*.json")) # pylint: disable=invalid-name
for path in tqdm(paths):
filename = path.split("/")[-1]
extract_and_save_tasks(filename)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/dataset_sample_candidates.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import glob
import json
import os
from typing import List
import tvm
from tqdm import tqdm # type: ignore
from tvm import meta_schedule as ms
from tvm.ir import load_json
from tvm.target import Target
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--task_cache_dir", type=str, help="Please provide the full path to the extracted tasks."
)
parser.add_argument(
"--candidate_cache_dir",
type=str,
help="Please provide the full path to save the sampled candidates.",
)
parser.add_argument(
"--target",
type=str,
default="nvidia/geforce-rtx-3070",
help="Please specify the target hardware for tuning.\
Note: for generating dataset, the hardware does not need to be present.",
)
parser.add_argument(
"--init_population_size",
type=int,
default=256,
help="The initial population size used in evolutionary search.",
)
parser.add_argument(
"--num_samples_per_task",
type=int,
default=400,
help="The number of samples to gather per tuning task.",
)
parser.add_argument(
"--num_trials_per_iter",
type=int,
default=64,
help="The number of trials per iteration in evolutionary search.",
)
parser.add_argument(
"--max_trials_per_task",
type=int,
default=400,
help="The maximum number of trials per task in evolutionary search.",
)
parser.add_argument(
"--max_retry_per_task",
type=int,
default=10,
help="The maximum number of retry attempts allowed.",
)
parser.add_argument(
"--file_group",
type=int,
default=0,
help="To enable running multiple scripts in parallel, files [idx * 10 : (idx + 1) * 10]\
in the sorted file list from the given directory will be run.",
)
return parser.parse_args()
# pylint: disable=too-many-locals
def sample_candidates(task, task_name, model_name):
"""Randomly sample candidates for a task and save the candidates in the given directory.
Parameters
----------
task : IRModule
The initial ir module used for generating the search space.
task_name : str
The name of the task.
model_name : str
The name of the model.
Returns
-------
None
"""
candidate_path = os.path.join(
args.candidate_cache_dir, model_name, task_name + "_candidates.json"
)
workload_path = os.path.join(args.candidate_cache_dir, model_name, task_name + "_workload.json")
database = ms.database.JSONDatabase(
path_workload=workload_path,
path_tuning_record=candidate_path,
)
sample_init_population = tvm.get_global_func(
"meta_schedule.SearchStrategyEvolutionarySearchSampleInitPopulation"
)
evolve_with_cost_model = tvm.get_global_func(
"meta_schedule.SearchStrategyEvolutionarySearchEvolveWithCostModel"
)
strategy = ms.search_strategy.EvolutionarySearch(init_measured_ratio=0.0)
target = Target(args.target)
context = ms.TuneContext(
mod=task,
target=target,
space_generator="post-order-apply",
search_strategy=strategy,
task_name=task_name,
)
context.initialize()
context.pre_tuning(
max_trials=args.max_trials_per_task,
num_trials_per_iter=args.num_trials_per_iter,
design_spaces=context.generate_design_space(),
database=database,
cost_model=ms.cost_model.RandomModel(), # type: ignore
)
all_states: List[tvm.tir.Schedule] = []
num_retry, itr = 0, 0
states = sample_init_population(strategy, args.init_population_size)
while len(all_states) < args.num_samples_per_task and num_retry < args.max_retry_per_task:
states = evolve_with_cost_model(strategy, states, len(states))
all_states += states
if len(states) == 0:
states = sample_init_population(strategy, args.init_population_size)
num_retry += 1
else:
num_retry = 0
print(f"iter: {itr}, number of states sampled: {len(all_states)}")
itr += 1
all_states = all_states[: args.num_samples_per_task]
workload = ms.database.Workload(context.mod)
database.commit_workload(context.mod)
for state in all_states:
database.commit_tuning_record(ms.database.TuningRecord(state.trace, workload))
args = _parse_args() # pylint: disable=invalid-name
def main():
if not os.path.isdir(args.task_cache_dir):
raise Exception("Please provide a correct task cache dir.")
try:
os.makedirs(args.candidate_cache_dir, exist_ok=True)
except OSError:
print(f"Directory {args.candidate_cache_dir} cannot be created successfully.")
task_paths = sorted(glob.glob(os.path.join(args.task_cache_dir, "*.json")))[
args.file_group * 10 : (args.file_group + 1) * 10
]
print(f"Selected models: {task_paths}")
for num, task_path in enumerate(task_paths):
print(f"Processing model {num} ...")
with open(task_path, "rb") as file:
tasks = file.readlines()
model_name = task_path.split("/")[-1][len("relay-") :][: -len("_extracted_tasks.json")]
os.makedirs(os.path.join(args.candidate_cache_dir, model_name), exist_ok=True)
for task_str in tqdm(tasks):
task_name, task_mod = json.loads(task_str)
task_mod = load_json(json.dumps(task_mod))
sample_candidates(task_mod, task_name, model_name)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/distributed_measure_candidates.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import glob
import os
from tqdm import tqdm # type: ignore
from tvm import meta_schedule as ms
from tvm.target import Target
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--candidate_cache_dir", type=str, help="Please provide the full path to the candidates."
)
parser.add_argument(
"--result_cache_dir", type=str, help="Please provide the full path to the result database."
)
parser.add_argument(
"--target",
type=str,
default="nvidia/nvidia-v100",
help="Please specify the target hardware for tuning context.",
)
parser.add_argument(
"--rpc_host", type=str, help="Please provide the private IPv4 address for the tracker."
)
parser.add_argument(
"--rpc_port", type=int, default=4445, help="Please provide the port for the tracker."
)
parser.add_argument(
"--rpc_key",
type=str,
default="p3.2xlarge",
help="Please provide the key for the rpc servers.",
)
parser.add_argument(
"--builder_timeout_sec",
type=int,
default=10,
help="The time for the builder session to time out.",
)
parser.add_argument(
"--min_repeat_ms", type=int, default=100, help="The time for preheating the gpu."
)
parser.add_argument(
"--runner_timeout_sec",
type=int,
default=100,
help="The time for the runner session to time out.",
)
parser.add_argument(
"--cpu_flush", type=bool, default=False, help="Whether to enable cpu cache flush or not."
)
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size of candidates sent to builder and runner each time.",
)
return parser.parse_args()
# pylint: disable=too-many-locals
def measure_candidates(database, builder, runner):
"""Send the candidates to builder and runner for distributed measurement,
and save the results in a new json database.
Parameters
----------
database : JSONDatabase
The database for candidates to be measured.
builder : Builder
The builder for building the candidates.
runner : Runner
The runner for measuring the candidates.
Returns
-------
None
"""
candidates, runner_results, build_fail_indices, run_fail_indices = [], [], [], []
context = ms.TuneContext(target=Target(args.target))
tuning_records = database.get_all_tuning_records()
for record in tuning_records:
candidates.append(record.as_measure_candidate())
with ms.Profiler() as profiler:
for idx in range(0, len(candidates), args.batch_size):
batch_candidates = candidates[idx : idx + args.batch_size]
context._set_measure_candidates(batch_candidates) # pylint: disable=protected-access
with ms.Profiler.timeit("build"):
context._send_to_builder(builder) # pylint: disable=protected-access
with ms.Profiler.timeit("run"):
context._send_to_runner(runner) # pylint: disable=protected-access
batch_runner_results = context._join() # pylint: disable=protected-access
runner_results.extend(batch_runner_results)
for i, result in enumerate(context.builder_results):
if result.error_msg is None:
ms.utils.remove_build_dir(result.artifact_path)
else:
build_fail_indices.append(i + idx)
context._clear_measure_state() # pylint: disable=protected-access
model_name, workload_name = database.path_workload.split("/")[-2:]
record_name = database.path_tuning_record.split("/")[-1]
new_database = ms.database.JSONDatabase(
path_workload=os.path.join(args.result_cache_dir, model_name, workload_name),
path_tuning_record=os.path.join(args.result_cache_dir, model_name, record_name),
)
workload = tuning_records[0].workload
new_database.commit_workload(workload.mod)
for i, (record, result) in enumerate(zip(tuning_records, runner_results)):
if result.error_msg is None:
new_database.commit_tuning_record(
ms.database.TuningRecord(
trace=record.trace,
workload=workload,
run_secs=[v.value for v in result.run_secs],
target=Target(args.target),
)
)
else:
run_fail_indices.append(i)
fail_indices_name = workload_name.replace("_workload.json", "_failed_indices.txt")
with open(
os.path.join(args.result_cache_dir, model_name, fail_indices_name), "w", encoding="utf8"
) as file:
file.write(" ".join([str(n) for n in run_fail_indices]))
print(
f"Builder time: {profiler.get()['build']}, Runner time: {profiler.get()['run']}\n\
Failed number of builds: {len(build_fail_indices)},\
Failed number of runs: {len(run_fail_indices)}"
)
args = _parse_args() # pylint: disable=invalid-name
def main():
builder = ms.builder.LocalBuilder(timeout_sec=args.builder_timeout_sec)
runner = ms.runner.RPCRunner(
rpc_config=ms.runner.RPCConfig(
tracker_host=args.rpc_host,
tracker_port=args.rpc_port,
tracker_key=args.rpc_key,
session_timeout_sec=args.runner_timeout_sec,
),
evaluator_config=ms.runner.EvaluatorConfig(
number=3,
repeat=1,
min_repeat_ms=args.min_repeat_ms,
enable_cpu_cache_flush=args.cpu_flush,
),
max_workers=os.cpu_count(),
)
if not os.path.isdir(args.candidate_cache_dir):
raise Exception("Please provide a correct candidate cache dir.")
try:
os.makedirs(args.result_cache_dir, exist_ok=True)
except OSError:
print(f"Directory {args.result_cache_dir} cannot be created successfully.")
model_dirs = glob.glob(os.path.join(args.candidate_cache_dir, "*"))
for model_dir in model_dirs:
model_name = model_dir.split("/")[-1]
os.makedirs(os.path.join(args.result_cache_dir, model_name), exist_ok=True)
all_tasks = glob.glob(os.path.join(model_dir, "*.json"))
workload_paths = []
for path in all_tasks:
if path.endswith("_workload.json"):
workload_paths.append(path)
for workload_path in tqdm(workload_paths):
candidate_path = workload_path.replace("_workload.json", "_candidates.json")
database = ms.database.JSONDatabase(
path_workload=workload_path,
path_tuning_record=candidate_path,
)
measure_candidates(database, builder, runner)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/dummy_object.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Dummy objects for testing."""
import random
from typing import List, Optional
from tvm.tir.schedule import Trace
from ..builder import BuilderInput, BuilderResult, PyBuilder
from ..mutator import PyMutator
from ..runner import PyRunner, PyRunnerFuture, RunnerFuture, RunnerInput, RunnerResult
from ..tune_context import TuneContext # pylint: disable=unused-import
from ..utils import derived_object
@derived_object
class DummyRunnerFuture(PyRunnerFuture):
def done(self) -> bool:
return True
def result(self) -> RunnerResult:
run_secs = [random.uniform(5, 30) for _ in range(random.randint(1, 10))]
return RunnerResult(run_secs, None)
@derived_object
class DummyBuilder(PyBuilder):
def build(self, build_inputs: List[BuilderInput]) -> List[BuilderResult]:
return [BuilderResult("test_path", None) for _ in build_inputs]
@derived_object
class DummyRunner(PyRunner):
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
return [DummyRunnerFuture() for _ in runner_inputs] # type: ignore
@derived_object
class DummyMutator(PyMutator):
"""Dummy Mutator for testing"""
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
pass
def apply(self, trace: Trace, _) -> Optional[Trace]:
return Trace(trace.insts, {})
def clone(self):
return DummyMutator()
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/local_rpc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC tracker and server running locally"""
from tvm.rpc.tracker import Tracker
from tvm.rpc.server import Server
class LocalRPC:
"""A pair of RPC tracker/server running locally
Parameters
----------
tracker_host : str
The host URL of the tracker
tracker_port : int
The port of the tracker
tracker_key: str
The key used in the tracker to refer to a worker
"""
tracker_host: str
tracker_port: int
tracker_key: str
def __init__(
self,
tracker_key: str = "key",
silent: bool = False,
no_fork: bool = False,
) -> None:
self.tracker = Tracker(
silent=silent,
port=9190,
port_end=12345,
)
self.server = Server(
host="0.0.0.0",
is_proxy=False,
tracker_addr=(self.tracker.host, self.tracker.port),
key=tracker_key,
silent=silent,
no_fork=no_fork,
port=9190,
port_end=12345,
)
self.tracker_host = self.tracker.host
self.tracker_port = self.tracker.port
self.tracker_key = tracker_key
def __enter__(self):
return self
def __exit__(self, _type, _value, _traceback):
if hasattr(self, "server"):
del self.server
if hasattr(self, "tracker"):
del self.tracker
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/relay_workload.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Workloads in Relay IR"""
# pylint: disable=import-outside-toplevel
import logging
import multiprocessing
import os
import pickle
from typing import Any, Dict, List, Optional, Tuple
import tvm
import tvm.relay.testing
from tvm import meta_schedule as ms
from tvm import relay
from tvm.ir import IRModule
from tvm.runtime import NDArray, load_param_dict, save_param_dict
from tvm.target import Target
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def _get_network(
args: Tuple[str, List[int], Optional[str]]
) -> Tuple[IRModule, bytearray, Tuple[str, List[int], str]]:
name: str
input_shape: List[int]
layout: Optional[str]
name, input_shape, layout = args
if layout == "None":
layout = None
mod: IRModule
if name in [
"resnet_18",
"resnet_50",
"wide_resnet_50",
"resnext_50",
"mobilenet_v2",
"mobilenet_v3",
"inception_v3",
"densenet_121",
"resnet3d_18",
"vgg_16",
]:
import torch # type: ignore
from torchvision import models # type: ignore
assert layout is None or layout in ["NCHW", "NHWC"]
params: Dict[str, Any] = {}
if name in ["resnet_18", "resnet_50"]:
model = getattr(models, name.replace("_", ""))
elif name == "wide_resnet_50":
model = getattr(models, "wide_resnet50_2")
elif name == "resnext_50":
model = getattr(models, "resnext50_32x4d")
elif name == "mobilenet_v2":
model = getattr(models, name)
elif name == "mobilenet_v3":
model = getattr(models, name + "_large")
elif name == "inception_v3":
model = getattr(models, name)
params["aux_logits"] = False
elif name == "densenet_121":
model = getattr(models, name.replace("_", ""))
elif name == "resnet3d_18":
model = models.video.r3d_18
elif name == "vgg_16":
model = getattr(models, name.replace("_", ""))
try:
model = model(**params, weights=None)
except TypeError:
model = model(**params, pretrained=False)
dtype = "float32"
input_data = torch.randn(input_shape).type( # pylint: disable=no-member
{
"float32": torch.float32, # pylint: disable=no-member
}[dtype]
)
scripted_model = torch.jit.trace(model, input_data).eval() # type: ignore
input_name = "input0"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
passes = [relay.transform.RemoveUnusedFunctions()]
if layout is None or layout == "NHWC":
# PyTorch is imported as NCHW by default
passes.append(
relay.transform.ConvertLayout(
{
"nn.conv2d": ["NHWC", "default"],
"nn.conv3d": ["NDHWC", "default"],
"nn.max_pool2d": ["NHWC", "default"],
"nn.avg_pool2d": ["NHWC", "default"],
}
)
)
with tvm.transform.PassContext(opt_level=3):
mod = tvm.transform.Sequential(passes)(mod)
inputs = (input_name, input_shape, dtype)
elif name in ["bert_tiny", "bert_base", "bert_medium", "bert_large"]:
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# pip3 install transformers==3.5 torch==1.7
import torch # type: ignore
import transformers # type: ignore
assert layout is None
config_dict = {
"bert_tiny": transformers.BertConfig(
num_hidden_layers=6,
hidden_size=512,
intermediate_size=2048,
num_attention_heads=8,
return_dict=False,
),
"bert_base": transformers.BertConfig(
num_hidden_layers=12,
hidden_size=768,
intermediate_size=3072,
num_attention_heads=12,
return_dict=False,
),
"bert_medium": transformers.BertConfig(
num_hidden_layers=12,
hidden_size=1024,
intermediate_size=4096,
num_attention_heads=16,
return_dict=False,
),
"bert_large": transformers.BertConfig(
num_hidden_layers=24,
hidden_size=1024,
intermediate_size=4096,
num_attention_heads=16,
return_dict=False,
),
}
configuration = config_dict[name]
model = transformers.BertModel(configuration)
input_name = "input_ids"
input_dtype = "int64"
a = torch.randint(10000, input_shape) # pylint: disable=no-member
model.eval()
scripted_model = torch.jit.trace(model, [a], strict=False) # type: ignore
input_name = "input_ids"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
mod = relay.transform.FastMath()(mod)
mod = relay.transform.CombineParallelBatchMatmul()(mod)
inputs = (input_name, input_shape, input_dtype)
elif name == "dcgan":
assert layout is None
output_shape = input_shape
batch_size = output_shape[0]
oshape = output_shape[1:]
mod, params = relay.testing.dcgan.get_workload(
batch_size=batch_size,
oshape=oshape,
layout="NHWC",
)
inputs = ("data", [100], "float32")
else:
raise ValueError("Invalid name: " + name)
params_bytearray: bytearray = save_param_dict(params)
return mod, params_bytearray, inputs
def _load_cache(cache_dir: Optional[str], filename: str) -> Optional[List[Any]]:
if cache_dir is None:
return None
path = os.path.join(os.path.expanduser(cache_dir), filename)
if not os.path.exists(path):
return None
logger.info("Loaded from cached: %s", path)
with open(path, "rb") as i_f:
return pickle.load(i_f)
def _save_cache(cache_dir: Optional[str], filename: str, objects: List[Any]) -> None:
if cache_dir is None:
return
path = os.path.join(os.path.expanduser(cache_dir), filename)
with open(path, "wb") as o_f:
pickle.dump(objects, o_f)
def get_network(
name: str,
input_shape: List[int],
*,
layout: Optional[str] = None,
cache_dir: Optional[str] = None,
) -> Tuple[IRModule, Dict[str, NDArray], Tuple[str, List[int], str]]:
"""Get the symbol definition and random weight of a network
Parameters
----------
name : str
The name of the network.
input_shape : List[int]
The shape of the input tensor.
layout : Optional[str]
The layout of the input tensor. For vision models, the layout is by default NHWC.
cache_dir : Optional[str], optional
The directory to cache the generated network.
If not specified, the cache will be disabled.
Returns
-------
mod : IRModule
The IRModule representing the network.
params : Dict[str, NDArray]
The parameters of the networks.
inputs : Tuple[str, List[int], str]
The name, shape and dtype of the input tensor.
"""
mod: IRModule
params: Dict[str, NDArray]
inputs: Tuple[str, List[int], str]
params_bytearray: bytearray
filename = f'relay-{name}-{layout}-{",".join(str(i) for i in input_shape)}.json'
cached = _load_cache(cache_dir, filename)
if cached is None:
with multiprocessing.Pool(processes=1) as pool:
result = pool.map(_get_network, [(name, input_shape, layout)])
((mod, params_bytearray, inputs),) = result
cached = [mod, params_bytearray, inputs]
_save_cache(cache_dir, filename, cached)
mod, params_bytearray, inputs = cached
params = load_param_dict(params_bytearray)
return mod, params, inputs
def extract_from_relay(
mod: IRModule,
target: Target,
params: Optional[Dict[str, NDArray]],
name: str,
input_shape: List[int],
*,
cache_dir: Optional[str] = None,
) -> List[ms.ExtractedTask]:
"""Extract the tasks from a network.
Parameters
----------
mod : IRModule
The IRModule representing the network.
target : Target
The target that the network will be deployed to.
params : Optional[Dict[str, NDArray]]
The parameters of the networks.
name : str
The name of the network.
input_shape : List[int]
The shape of the input tensor.
cache_dir : Optional[str]
The directory to cache the generated network.
If not specified, the cache will be disabled.
Returns
-------
extracted_tasks : List[ExtractedTask]
The extracted tasks.
"""
filename = f'tasks-{target.kind.name}-{name}-{",".join(str(i) for i in input_shape)}.json'
extracted_tasks = _load_cache(cache_dir, filename)
if extracted_tasks is None:
extracted_tasks = ms.relay_integration.extract_tasks(
mod=mod,
target=target,
params=params,
)
extracted_tasks = list(extracted_tasks)
_save_cache(cache_dir, filename, extracted_tasks)
return extracted_tasks
SUPPORTED = [
# TorchVision
"resnet_18",
"resnet_50",
"mobilenet_v2",
"mobilenet_v3",
"wide_resnet_50",
"resnext_50",
"resnet3d_18",
"inception_v3",
"densenet_121",
"vgg_16",
# Transformer
"bert_tiny",
"bert_base",
"bert_medium",
"bert_large",
# Relay testing
"dcgan",
]
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/space_generation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
from typing import List, Optional, Tuple, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm import meta_schedule as ms
from tvm.ir import IRModule, structural_equal
from tvm.target import Target
from tvm.tir import Schedule
from tvm.tir.schedule import Trace
from tvm.tir.schedule.testing import verify_trace_roundtrip
def get_rules(
kind: Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"],
types: Union[type, Tuple[type, ...]],
) -> List[ms.ScheduleRule]:
"""Get default schedule rules"""
rules = ms.ScheduleRule.create(kind)
return [rule for rule in rules if isinstance(rule, types)]
def generate_design_space(
kind: Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"],
mod: IRModule,
target: Target,
types: Union[type, Tuple[type, ...]],
sch_rules: Optional[List[ms.ScheduleRule]] = None,
) -> List[Schedule]:
if sch_rules is None:
sch_rules = get_rules(kind, types)
else:
assert types is None
return ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=sch_rules,
postprocs=[],
mutator_probs={},
),
task_name="test",
).generate_design_space()
def _find_match_sketch_id(
mod: IRModule,
sketches: List[Schedule],
expected_mod: IRModule,
expected_decision: List[Tuple[str, List[int]]],
*,
debug_mask="all",
) -> Optional[int]:
for sketch_id, sketch in enumerate(sketches):
i = 0
new_decisions = {}
for inst in sketch.trace.insts:
if not inst.kind.name.startswith("Sample"):
continue
assert i < len(expected_decision)
if inst.kind.name == expected_decision[i][0]:
new_decisions[inst] = expected_decision[i][1]
i += 1
if len(new_decisions) != len(expected_decision):
continue
sch = Schedule(mod, debug_mask=debug_mask)
Trace(
insts=sketch.trace.insts,
decisions=new_decisions,
).apply_to_schedule(sch, remove_postproc=True)
if structural_equal(sch.mod, expected_mod):
verify_trace_roundtrip(sch=sch, mod=mod, debug_mask=debug_mask)
return sketch_id
return None
def check_sketches(
mod: IRModule,
sketches: List[Schedule],
expected_mods: List[IRModule],
expected_decisions: List[List[Tuple[str, List[int]]]],
*,
debug_mask="all",
):
assert len(expected_mods) == len(expected_decisions)
assert len(sketches) == len(expected_mods)
expected_mods = [
IRModule({"main": m}) if not isinstance(m, IRModule) else m for m in expected_mods
]
sketches = list(sketches)
for expected_id, (expected_mod, expected_decision) in enumerate(
zip(expected_mods, expected_decisions)
):
sketch_id = _find_match_sketch_id(
mod,
sketches,
expected_mod,
expected_decision,
debug_mask=debug_mask,
)
if sketch_id is None:
raise AssertionError(
f"Expected sketch #{expected_id} doesn't exist in the generated sketches."
)
sketches.pop(sketch_id)
def print_sketches(sketches: List[Schedule]):
for i, sch in enumerate(sketches):
print(f"###### {i}")
sch.mod.show()
for inst in sch.trace.insts:
if inst in sch.trace.decisions:
print(f'("{inst.kind.name}", {sch.trace.decisions[inst]}),')
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/te_workload.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Workloads in TE"""
# pylint: disable=missing-docstring
from typing import Tuple
from tvm import te, tir, topi
from tvm.target import Target
def batch_matmul_nkkm( # pylint: disable=invalid-name,missing-docstring
B: int,
N: int,
M: int,
K: int,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
x = te.placeholder((B, N, K), name="X", dtype=in_dtype)
y = te.placeholder((B, K, M), name="Y", dtype=in_dtype)
k = te.reduce_axis((0, K), name="k")
z = te.compute( # pylint: disable=invalid-name
(B, N, M),
lambda b, i, j: te.sum(
x[b][i][k].astype(out_dtype) * y[b][k][j].astype(out_dtype),
axis=[k],
),
name="Z",
)
return (x, y, z)
def conv1d_nlc( # pylint: disable=invalid-name,missing-docstring
N: int,
L: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
inputs = te.placeholder((N, L, CI), name="inputs", dtype=in_dtype)
weight = te.placeholder((kernel_size, CI // groups, CO), name="weight", dtype=in_dtype)
batch_size, in_len, _ = inputs.shape
k_len, channel_per_group, out_channel = weight.shape
out_channel_per_group = out_channel // groups
out_len = (in_len + 2 * padding - dilation * (k_len - 1) - 1) // stride + 1
rc = te.reduce_axis((0, channel_per_group), name="rc")
rl = te.reduce_axis((0, k_len), name="rl")
padded = topi.nn.pad(inputs, [0, padding, 0])
output = te.compute(
(batch_size, out_len, out_channel),
lambda n, l, co: te.sum(
(
padded[
n,
l * stride + rl * dilation,
co // out_channel_per_group * channel_per_group + rc,
].astype(out_dtype)
* weight[rl, rc, co].astype(out_dtype)
),
axis=[rl, rc],
),
name="conv1d_nlc",
)
return (inputs, weight, output)
def conv2d_nhwc( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
inputs = te.placeholder((N, H, W, CI), name="inputs", dtype=in_dtype)
weight = te.placeholder(
(kernel_size, kernel_size, CI // groups, CO), name="weight", dtype=in_dtype
)
batch_size, in_h, in_w, _ = inputs.shape
k_h, k_w, channel_per_group, out_channel = weight.shape
out_channel_per_group = out_channel // groups
out_h = (in_h + 2 * padding - dilation * (k_h - 1) - 1) // stride + 1
out_w = (in_w + 2 * padding - dilation * (k_w - 1) - 1) // stride + 1
rh = te.reduce_axis((0, k_h), name="rh")
rw = te.reduce_axis((0, k_w), name="rw")
rc = te.reduce_axis((0, channel_per_group), name="rc")
padded = topi.nn.pad(inputs, [0, padding, padding, 0])
output = te.compute(
(batch_size, out_h, out_w, out_channel),
lambda n, h, w, co: te.sum(
(
padded[
n,
h * stride + rh * dilation,
w * stride + rw * dilation,
co // out_channel_per_group * channel_per_group + rc,
].astype(out_dtype)
* weight[rh, rw, rc, co].astype(out_dtype)
),
axis=[rh, rw, rc],
),
name="conv2d_nhwc",
)
return (inputs, weight, output)
def conv3d_ndhwc( # pylint: disable=invalid-name,missing-docstring
N: int,
D: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
inputs = te.placeholder((N, D, H, W, CI), name="inputs", dtype=in_dtype)
weight = te.placeholder(
(kernel_size, kernel_size, kernel_size, CI // groups, CO), name="weight", dtype=in_dtype
)
batch_size, in_d, in_h, in_w, _ = inputs.shape
k_d, k_h, k_w, channel_per_group, out_channel = weight.shape
out_channel_per_group = out_channel // groups
out_d = (in_d + 2 * padding - dilation * (k_d - 1) - 1) // stride + 1
out_h = (in_h + 2 * padding - dilation * (k_h - 1) - 1) // stride + 1
out_w = (in_w + 2 * padding - dilation * (k_w - 1) - 1) // stride + 1
rd = te.reduce_axis((0, k_d), name="rd")
rh = te.reduce_axis((0, k_h), name="rh")
rw = te.reduce_axis((0, k_w), name="rw")
rc = te.reduce_axis((0, channel_per_group), name="rc")
padded = topi.nn.pad(inputs, [0, padding, padding, padding, 0])
output = te.compute(
(batch_size, out_d, out_h, out_w, out_channel),
lambda n, d, h, w, co: te.sum(
(
padded[
n,
d * stride + rd * dilation,
h * stride + rh * dilation,
w * stride + rw * dilation,
co // out_channel_per_group * channel_per_group + rc,
].astype(out_dtype)
* weight[rd, rh, rw, rc, co].astype(out_dtype)
),
axis=[rd, rh, rw, rc],
),
name="conv3d_ndhwc",
)
return (inputs, weight, output)
def depthwise_conv2d_nhwc( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
C: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
factor: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
inputs = te.placeholder((N, H, W, C), dtype=in_dtype)
weight = te.placeholder((factor, kernel_size, kernel_size, C), dtype=in_dtype)
batch_size, in_h, in_w, in_channel = inputs.shape
factor, k_h, k_w, in_channel = weight.shape
out_channel = in_channel * factor
assert int(factor) == 1, "Not optimized for factor != 1"
out_h = (in_h + 2 * padding - dilation * (k_h - 1) - 1) // stride + 1
out_w = (in_w + 2 * padding - dilation * (k_w - 1) - 1) // stride + 1
rh = te.reduce_axis((0, k_h), name="rh")
rw = te.reduce_axis((0, k_w), name="rw")
padded = topi.nn.pad(inputs, [0, padding, padding, 0])
output = te.compute(
(batch_size, out_h, out_w, out_channel),
lambda n, h, w, c: te.sum(
(
padded[
n,
h * stride + rh * dilation,
w * stride + rw * dilation,
c // factor,
].astype(out_dtype)
* weight[c % factor, rh, rw, c // factor].astype(out_dtype)
),
axis=[rh, rw],
),
name="depth_conv2d_nhwc",
)
return (inputs, weight, output)
def conv2d_transpose_nhwc( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
inputs = te.placeholder((N, H, W, CI), name="inputs", dtype=in_dtype)
weight = te.placeholder((kernel_size, kernel_size, CI, CO), name="weight", dtype=in_dtype)
batch, in_h, in_w, in_c = inputs.shape
filter_h, filter_w, in_c, out_c = weight.shape
stride_h, stride_w = (stride, stride)
# compute padding
fpad_top, fpad_left, fpad_bottom, fpad_right = topi.nn.get_pad_tuple(
padding, (filter_h, filter_w)
)
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right
# padding stage
padded = topi.nn.pad(
inputs,
[
0,
(bpad_top + stride_h - 1) // stride_h,
(bpad_left + stride_w - 1) // stride_w,
0,
],
[
0,
(bpad_bottom + stride_h - 1) // stride_h,
(bpad_right + stride_w - 1) // stride_w,
0,
],
)
# remove extra padding introduced by dilatation
idx_div = te.indexdiv
idx_mod = te.indexmod
border_h = idx_mod(stride_h - idx_mod(bpad_top, stride_h), stride_h)
border_w = idx_mod(stride_w - idx_mod(bpad_left, stride_w), stride_w)
# dilation stage
strides = [1, stride_h, stride_w, 1]
n = len(padded.shape)
# We should embed this dilation directly into te.compute rather than creating a new te.compute.
# Only in this way can we use unroll to eliminate the multiplication of zeros.
def _dilate(*indices):
not_zero = []
index_tuple = []
for i in range(n):
if not strides[i] == 1:
index_tuple.append(idx_div(indices[i], strides[i]))
not_zero.append(idx_mod(indices[i], strides[i]).equal(0))
else:
index_tuple.append(indices[i])
if not_zero:
not_zero = te.all(*not_zero)
return te.if_then_else(not_zero, padded(*index_tuple), tir.const(0.0, padded.dtype))
return padded(*index_tuple)
# convolution stage
out_h = (in_h - 1) * stride_h - fpad_top - fpad_bottom + filter_h
out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w
rc = te.reduce_axis((0, in_c), name="rc")
rh = te.reduce_axis((0, filter_h), name="rh")
rw = te.reduce_axis((0, filter_w), name="rw")
output = te.compute(
(batch, out_h, out_w, out_c),
lambda n, h, w, co: te.sum(
_dilate(n, h + rh + border_h, w + rw + border_w, rc).astype(out_dtype)
* weight[filter_h - 1 - rh, filter_w - 1 - rw, rc, co].astype(out_dtype),
axis=[rh, rw, rc],
),
name="conv2d_transpose_nhwc",
)
return (inputs, weight, output)
def conv2d_capsule_nhwijc( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
capsule_size: int = 4,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
inputs = te.placeholder(
(N, H, W, capsule_size, capsule_size, CI), name="inputs", dtype=in_dtype
)
weight = te.placeholder(
(kernel_size, kernel_size, capsule_size, capsule_size, CI, CO),
name="weight",
dtype=in_dtype,
)
batch_size, in_h, in_w, _, _, in_channel = inputs.shape
k_h, k_w, _, _, _, out_channel = weight.shape
out_h = (in_h + 2 * padding - kernel_size) // stride + 1
out_w = (in_w + 2 * padding - kernel_size) // stride + 1
rh = te.reduce_axis((0, k_h), name="rh")
rw = te.reduce_axis((0, k_w), name="rw")
cap_k = te.reduce_axis((0, capsule_size), name="cap_k")
rc = te.reduce_axis((0, in_channel), name="rc")
padded = topi.nn.pad(inputs, [0, padding, padding, 0, 0, 0])
output = te.compute(
(batch_size, out_h, out_w, capsule_size, capsule_size, out_channel),
lambda n, h, w, cap_i, cap_j, co: te.sum(
(
padded[n, h * stride + rh, w * stride + rw, cap_i, cap_k, rc].astype(out_dtype)
* weight[rh, rw, cap_k, cap_j, rc, co].astype(out_dtype)
),
axis=[rh, rw, cap_k, rc],
),
name="conv2d_capsule_nhwijc",
)
return (inputs, weight, output)
def norm_bmn( # pylint: disable=invalid-name,missing-docstring
B: int,
M: int,
N: int,
) -> Tuple[te.Tensor, te.Tensor]:
a = te.placeholder((B, M, N), name="A")
i = te.reduce_axis((0, M), name="i")
j = te.reduce_axis((0, N), name="j")
c = te.compute(
(B,),
lambda b: te.sum(a[b][i][j] * a[b][i][j], axis=[i, j]),
name="C",
)
d = te.compute((B,), lambda b: te.sqrt(c[b]), name="D")
return (a, d)
def conv2d_nhwc_without_layout_rewrite( # pylint: disable=invalid-name
Input: te.Tensor,
Filter: te.Tensor,
stride: int,
padding: int,
dilation: int,
out_dtype="float32",
):
"""A copy of `topi.nn.conv2d_nhwc` but without the 'layout_free` attribute.
We use this in single op and subgraph evaluation
because we don't want to introduce graph level optimization.
"""
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_height, in_width, in_channel = Input.shape # type: ignore
kernel_h, kernel_w, _channel, num_filter = Filter.shape # type: ignore
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = topi.nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_height = topi.utils.simplify(
(in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1
)
out_width = topi.utils.simplify(
(in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1
)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = topi.nn.pad(Input, pad_before, pad_after, name="PaddedInput")
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
Output = te.compute(
(batch, out_height, out_width, out_channel),
lambda nn, yy, xx, ff: te.sum(
PaddedInput[
nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rc
].astype(out_dtype)
* Filter[ry, rx, rc, ff].astype(out_dtype), # type: ignore
axis=[ry, rx, rc],
),
name="Conv2dOutput",
tag="conv2d_nhwc",
)
return Output
def conv2d_nhwc_bn_relu( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
strides: int,
padding: int,
dilation: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor, te.Tensor, te.Tensor, te.Tensor]:
data = te.placeholder((N, H, W, CI), name="data", dtype=in_dtype)
kernel = te.placeholder((kernel_size, kernel_size, CI, CO), name="kernel", dtype=in_dtype)
bias = te.placeholder((CO,), name="bias")
bn_scale = te.placeholder((CO,), name="bn_scale")
bn_offset = te.placeholder((CO,), name="bn_offset")
OH = (H + 2 * padding - (kernel_size - 1) * dilation - 1) // strides + 1
OW = (W + 2 * padding - (kernel_size - 1) * dilation - 1) // strides + 1
conv = conv2d_nhwc_without_layout_rewrite(data, kernel, strides, padding, dilation, out_dtype)
conv = te.compute(
(N, OH, OW, CO), lambda i, j, k, l: conv[i, j, k, l] + bias[l], name="bias_add"
)
conv = te.compute(
(N, OH, OW, CO), lambda i, j, k, l: conv[i, j, k, l] * bn_scale[l], name="bn_mul"
)
conv = te.compute(
(N, OH, OW, CO), lambda i, j, k, l: conv[i, j, k, l] + bn_offset[l], name="bn_add"
)
out = topi.nn.relu(conv)
return (data, kernel, bias, bn_offset, bn_scale, out)
def transpose_batch_matmul( # pylint: disable=invalid-name,missing-docstring
batch: int,
seq_len: int,
n_head: int,
n_dim: int,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
query = te.placeholder((batch, seq_len, n_head, n_dim), name="query", dtype=in_dtype)
value = te.placeholder((batch, seq_len, n_head, n_dim), name="value", dtype=in_dtype)
query_T = te.compute(
(batch, n_head, seq_len, n_dim),
lambda b, h, l, d: query[b, l, h, d],
name="query_T",
)
value_T = te.compute(
(batch, n_head, n_dim, seq_len),
lambda b, h, d, l: value[b, l, h, d],
name="value_T",
)
k = te.reduce_axis((0, n_dim), name="k")
out = te.compute(
(batch, n_head, seq_len, seq_len),
lambda b, h, i, j: te.sum(
query_T[b, h, i, k].astype(out_dtype) * value_T[b, h, k, j].astype(out_dtype), axis=[k]
),
name="C",
)
return (query, value, out)
def conv2d_winograd_nhwc( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
tile_size: int = 4,
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
from tvm.topi.nn.conv2d import ( # pylint: disable=import-outside-toplevel
_conv2d_winograd_nhwc_impl,
)
target = Target.current(allow_none=True)
if target is not None and target.kind.name == "cuda":
write_cache_level = 3
else:
write_cache_level = 2
data = te.placeholder((N, H, W, CI), "float32", name="data")
weight = te.placeholder((kernel_size, kernel_size, CO, CI), "float32", name="weight")
out = _conv2d_winograd_nhwc_impl(
data,
weight,
stride,
padding,
dilation,
"float32",
pre_computed=True,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
tile_size=tile_size,
write_cache_level=write_cache_level,
)
return (data, weight, out)
def conv2d_winograd_nchw( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 1,
dilation: int = 1,
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
from tvm.topi.cuda.conv2d_winograd import ( # pylint: disable=import-outside-toplevel
_infer_tile_size,
)
from tvm.topi.nn.conv2d import ( # pylint: disable=import-outside-toplevel
_conv2d_winograd_nchw_impl,
)
data = te.placeholder((N, CI, H, W), "float32", name="data")
weight = te.placeholder((kernel_size, kernel_size, CI, CO), "float32", name="weight")
out = _conv2d_winograd_nchw_impl(
data,
weight,
stride,
padding,
dilation,
"float32",
pre_computed=True,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
tile_size=_infer_tile_size(data, weight),
)
return (data, weight, out)
def matmul(
n: int, m: int, k: int, in_dtype: str = "float32", out_dtype: str = "float32"
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
a = te.placeholder((n, k), name="A", dtype=in_dtype)
b = te.placeholder((k, m), name="B", dtype=in_dtype)
k = te.reduce_axis((0, k), name="k")
c = te.compute(
(n, m),
lambda i, j: te.sum(a[i, k].astype(out_dtype) * b[k, j].astype(out_dtype), axis=[k]),
name="C",
)
return (a, b, c)
def matmul_relu(
n: int, m: int, k: int, in_dtype: str = "float32", out_dtype: str = "float32"
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
a = te.placeholder((n, k), name="A", dtype=in_dtype)
b = te.placeholder((k, m), name="B", dtype=in_dtype)
k = te.reduce_axis((0, k), name="k")
c = te.compute(
(n, m),
lambda i, j: te.sum(a[i, k].astype(out_dtype) * b[k, j].astype(out_dtype), axis=[k]),
name="C",
)
d = topi.nn.relu(c) # pylint: disable=invalid-name
return (a, b, d)
def conv2d_nchw( # pylint: disable=invalid-name
n: int,
h: int,
w: int,
ci: int,
co: int,
kh: int,
kw: int,
stride: int,
padding: int,
dilation: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
x = te.placeholder((n, ci, h, w), name="X", dtype=in_dtype)
w = te.placeholder((co, ci, kh, kw), name="W", dtype=in_dtype)
y = topi.nn.conv2d_nchw(
Input=x, Filter=w, stride=stride, padding=padding, dilation=dilation, out_dtype=out_dtype
)
return (x, w, y)
def conv2d_nchw_bias_bn_relu( # pylint: disable=invalid-name
n: int,
h: int,
w: int,
ci: int,
co: int,
kh: int,
kw: int,
stride: int,
padding: int,
dilation: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor, te.Tensor, te.Tensor, te.Tensor]:
oh = (h + 2 * padding - (kh - 1) * dilation - 1) // stride + 1 # pylint: disable=invalid-name
ow = (w + 2 * padding - (kw - 1) * dilation - 1) // stride + 1 # pylint: disable=invalid-name
x = te.placeholder((n, ci, h, w), name="X", dtype=in_dtype)
w = te.placeholder((co, ci, kh, kw), name="W", dtype=in_dtype)
b = te.placeholder((co, 1, 1), name="B", dtype=out_dtype)
bn_scale = te.placeholder((co, 1, 1), name="bn_scale", dtype=out_dtype)
bn_offset = te.placeholder((co, 1, 1), name="bn_offset", dtype=out_dtype)
y = topi.nn.conv2d_nchw(
Input=x, Filter=w, stride=stride, padding=padding, dilation=dilation, out_dtype=out_dtype
)
y = te.compute((n, co, oh, ow), lambda i, j, k, l: y[i, j, k, l] + b[j, 0, 0], name="bias_add")
y = te.compute(
(n, co, oh, ow), lambda i, j, k, l: y[i, j, k, l] * bn_scale[j, 0, 0], name="bn_mul"
)
y = te.compute(
(n, co, oh, ow), lambda i, j, k, l: y[i, j, k, l] + bn_offset[j, 0, 0], name="bn_add"
)
y = topi.nn.relu(y)
return (x, w, b, bn_scale, bn_offset, y)
def max_pool2d_nchw( # pylint: disable=invalid-name
n: int,
h: int,
w: int,
ci: int,
padding: int,
) -> Tuple[te.Tensor, te.Tensor]: # pylint: disable=invalid-name
x = te.placeholder((n, ci, h, w), name="X")
y = topi.nn.pool2d(x, [2, 2], [1, 1], [1, 1], [padding, padding, padding, padding], "max")
return (x, y)
def softmax_mn(m, n) -> Tuple[te.Tensor, te.Tensor]: # pylint: disable=invalid-name
a = te.placeholder((m, n), name="A")
b = topi.nn.softmax(a, axis=1)
return (a, b)
def create_te_workload(name: str, idx: int) -> tir.PrimFunc:
workload_func, params = CONFIGS[name]
return te.create_prim_func(workload_func(*params[idx])) # type: ignore
CONFIGS = {
"C1D": (
conv1d_nlc,
[
# derived from conv2d_shapes
(1, 256, 64, 128, 3, 2, 1),
# (1, 256, 64, 128, 1, 2, 0),
# (1, 256, 64, 64, 1, 1, 0),
# (1, 128, 128, 256, 3, 2, 1),
(1, 128, 128, 256, 1, 2, 0),
# (1, 128, 128, 128, 3, 1, 1),
# (1, 64, 256, 512, 3, 2, 1),
# (1, 64, 256, 512, 1, 2, 0),
(1, 64, 256, 256, 5, 1, 2),
(1, 32, 512, 512, 3, 1, 1),
],
),
"C2D": (
conv2d_nhwc,
[
# all conv2d layers in resnet-18
(1, 224, 224, 3, 64, 7, 2, 3),
# (1, 56, 56, 64, 128, 3, 2, 1),
# (1, 56, 56, 64, 128, 1, 2, 0),
# (1, 56, 56, 64, 64, 3, 1, 1),
(1, 56, 56, 64, 64, 1, 1, 0),
# (1, 28, 28, 128, 256, 3, 2, 1),
# (1, 28, 28, 128, 256, 1, 2, 0),
# (1, 28, 28, 128, 128, 3, 1, 1),
# (1, 14, 14, 256, 512, 3, 2, 1),
# (1, 14, 14, 256, 512, 1, 2, 0),
(1, 14, 14, 256, 256, 3, 1, 1),
(1, 7, 7, 512, 512, 3, 1, 1),
],
),
"C3D": (
conv3d_ndhwc,
[
# Derived from conv2d_shapes. Use depth=16 for all configurations
(1, 16, 224, 224, 3, 64, 7, 2, 3),
# (1, 16, 56, 56, 64, 128, 3, 2, 1),
# (1, 16, 56, 56, 64, 128, 1, 2, 0),
# (1, 16, 56, 56, 64, 64, 3, 1, 1),
(1, 16, 56, 56, 64, 64, 1, 1, 0),
# (1, 16, 28, 28, 128, 256, 3, 2, 1),
# (1, 16, 28, 28, 128, 256, 1, 2, 0),
# (1, 16, 28, 28, 128, 128, 3, 1, 1),
# (1, 16, 14, 14, 256, 512, 3, 2, 1),
# (1, 16, 14, 14, 256, 512, 1, 2, 0),
(1, 16, 14, 14, 256, 256, 3, 1, 1),
(1, 16, 7, 7, 512, 512, 3, 1, 1),
],
),
"GMM": (
batch_matmul_nkkm,
[
(1, 128, 128, 128),
(1, 512, 32, 512),
(1, 512, 512, 512),
(1, 1024, 1024, 1024),
],
),
"GRP": (
conv2d_nhwc,
[
# Derived from conv2d_shapes. Use group=4 for all configurations
(1, 56, 56, 64, 128, 3, 2, 1, 1, 4),
# (1, 56, 56, 64, 128, 1, 2, 0 , 1, 4),
# (1, 56, 56, 64, 64, 3, 1, 1 , 1, 4),
(1, 56, 56, 64, 64, 1, 1, 0, 1, 4),
# (1, 28, 28, 128, 256, 3, 2, 1, 1, 4),
# (1, 28, 28, 128, 256, 1, 2, 0, 1, 4),
# (1, 28, 28, 128, 128, 3, 1, 1, 1, 4),
# (1, 14, 14, 256, 512, 3, 2, 1, 1, 4),
# (1, 14, 14, 256, 512, 1, 2, 0, 1, 4),
(1, 14, 14, 256, 256, 3, 1, 1, 1, 4),
(1, 7, 7, 512, 512, 3, 1, 1, 1, 4),
],
),
"DIL": (
conv2d_nhwc,
[
# Derived from conv2d_shapes. Use dilation=2 for all configurations
(1, 224, 224, 3, 64, 7, 2, 3, 2),
# (1, 56, 56, 64, 128, 3, 2, 1 , 2),
# (1, 56, 56, 64, 128, 1, 2, 0 , 2),
# (1, 56, 56, 64, 64, 3, 1, 1 , 2),
(1, 56, 56, 64, 64, 1, 1, 0, 2),
# (1, 28, 28, 128, 256, 3, 2, 1, 2),
# (1, 28, 28, 128, 256, 1, 2, 0, 2),
# (1, 28, 28, 128, 128, 3, 1, 1, 2),
# (1, 14, 14, 256, 512, 3, 2, 1, 2),
# (1, 14, 14, 256, 512, 1, 2, 0, 2),
(1, 14, 14, 256, 256, 3, 1, 1, 2),
(1, 7, 7, 512, 512, 3, 1, 1, 2),
],
),
"DEP": (
depthwise_conv2d_nhwc,
[
# all depthwise conv2d layers in mobilenet
(1, 112, 112, 32, 3, 1, 1),
(1, 112, 112, 64, 3, 2, 1),
# (1, 56, 56, 128, 3, 1, 1),
# (1, 56, 56, 128, 3, 2, 1),
# (1, 28, 28, 256, 3, 1, 1),
# (1, 28, 28, 256, 3, 2, 1),
# (1, 14, 14, 512, 3, 1, 1),
(1, 14, 14, 512, 3, 2, 1),
(1, 7, 7, 1024, 3, 1, 1),
],
),
"T2D": (
conv2d_transpose_nhwc,
[
# all conv2d transpose layers in DCGAN
(1, 4, 4, 512, 256, 4, 2, 1),
(1, 8, 8, 256, 128, 4, 2, 1),
(1, 16, 16, 128, 64, 4, 2, 1),
(1, 32, 32, 64, 3, 4, 2, 1),
],
),
"CAP": (
conv2d_capsule_nhwijc,
[
# all conv2d capsule layers in matrix capsules withemrouting (ICLR 2018)
(1, 16, 16, 32, 32, 3, 2, 1),
(1, 8, 8, 32, 32, 3, 1, 1),
(1, 16, 16, 8, 16, 3, 2, 1),
(1, 8, 8, 16, 16, 3, 1, 1),
],
),
"NRM": (
norm_bmn,
[
(1, 256, 256),
(1, 512, 512),
(1, 1024, 1024),
(1, 4096, 1024),
],
),
"SFM": (
softmax_mn,
[
(256, 256),
(512, 512),
(1024, 1024),
(2048, 2048),
],
),
"CBR": (
conv2d_nhwc_bn_relu,
[
(1, 224, 224, 3, 64, 7, 2, 3),
(1, 56, 56, 64, 128, 3, 2, 1),
(1, 28, 28, 128, 256, 1, 2, 0),
(1, 7, 7, 512, 512, 3, 1, 1),
],
),
"TBG": (
transpose_batch_matmul,
[
(1, 128, 12, 64),
(1, 128, 16, 64),
(1, 64, 12, 128),
(1, 128, 12, 128),
],
),
"C2D_WIN_NHWC": (
conv2d_winograd_nhwc,
[
(1, 14, 14, 128, 128, 6),
],
),
"C2D_WIN_NCHW": (
conv2d_winograd_nchw,
[
(1, 56, 56, 64, 64, 6),
],
),
}
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/tlcbench.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,import-outside-toplevel
# type: ignore
"""Model loader for TLCBench."""
import logging
import multiprocessing
import os
import tvm
from tvm import relay
from tvm.contrib.download import download_testdata
log = logging.getLogger(__name__)
def _convert(args):
onnx_model, shape_dict, json_path, params_path = args
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict, freeze_params=True)
seq = tvm.transform.Sequential(
[relay.transform.InferType(), relay.transform.FakeQuantizationToInteger(use_qat=True)]
)
mod = seq(mod)
with open(json_path, "w") as fo:
fo.write(tvm.ir.save_json(mod))
with open(params_path, "wb") as fo:
fo.write(relay.save_param_dict(params))
def convert_to_qnn(onnx_path, json_path, params_path, input_info):
"""Run the ONNX frontend and the FQ2I pass. The output is serialized to disk."""
import onnx
onnx_model = onnx.load(onnx_path)
shape_dict = dict(input_info)
log.info("Converting te ONNX model to Relay and running the FQ2I pass, it may take a while...")
with multiprocessing.Pool(processes=1) as pool:
pool.map(_convert, [(onnx_model, shape_dict, json_path, params_path)])
def deserialize_relay(json_path, params_path):
with open(json_path, "r") as fi:
mod = tvm.ir.load_json(fi.read())
with open(params_path, "rb") as fi:
params = relay.load_param_dict(fi.read())
return mod, params
def load_quantized_bert_base(batch_size=1, seq_len=384):
"""
Load the quantized bert-base model from TLCBench, possibly downloading it from github
and caching the converted int8 QNN module to disk.
In addition to returing the relay module and its parameters, it also returns input name
and shape information, which can be used at the deployment time as follows:
```
mod, params, input_info = load_quantized_bert_base()
...
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
for name, shape in input_info:
arr = np.random.uniform(1, 10, size=shape).astype("int64")
runtime.set_input(name, arr)
runtime.run()
```
"""
url = "https://github.com/tlc-pack/TLCBench/raw/main/models/bert-base-qat.onnx"
log.info("Downloading quantized bert-base model.")
onnx_path = download_testdata(url, "bert-base-qat.onnx", module="tlcbench")
data_dir = os.path.dirname(onnx_path)
json_path = os.path.join(data_dir, "bert_base_int8_b%d_s%d.json" % (batch_size, seq_len))
params_path = os.path.join(data_dir, "bert_base_int8_b%d_s%d.params" % (batch_size, seq_len))
# Input names and order encoded in the ONNX model
input_info = [
("input_ids", (batch_size, seq_len)),
("segment_ids", (batch_size, seq_len)),
("input_mask", (batch_size, seq_len)),
]
if not os.path.exists(json_path) or not os.path.exists(params_path):
convert_to_qnn(onnx_path, json_path, params_path, input_info)
def deserialize():
try:
return deserialize_relay(json_path, params_path)
except ValueError:
# A serialized Relay json file may become invalid after TVM bump
# Update the serialized model and try loading again
convert_to_qnn(onnx_path, json_path, params_path, input_info)
return deserialize_relay(json_path, params_path)
mod, params = deserialize()
return mod, params, input_info
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/torchbench/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/torchbench/run.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This script is for benchmarking TVM performance on models from TorchBench.
It uses the TorchDynamo as the frontend to ingest models into TVM, and it also
leverages the benchmark util from TorchDynamo.
TorchDynamo (https://github.com/pytorch/torchdynamo) and TorchBench
(https://github.com/pytorch/benchmark) need to be in the parent directory of TVM.
We need a local clone of these repos because torchbench and the benchmark runner
in TorchDynamo isn't designed to be used as a Python package.
To setup the environment, run the following commands in the parent directory of TVM and with
the appropriate Python environment:
```bash
# torchdynamo requires nightly pytorch. If it fails to find the specified version, try
# installing the latest nightly pytorch.
pip3 install --pre \
--extra-index-url https://download.pytorch.org/whl/nightly/cu116 \
torch==1.13.0.dev20220926 \
torchvision==0.14.0.dev20220926 \
torchtext==0.14.0.dev20220926
git clone https://github.com/pytorch/torchdynamo
pushd torchdynamo
git checkout c537639f9712621dc04ca09908796dbbe86c354b
pip install -e .
popd
sudo apt install git-lfs # git lfs is used for TorchBench
git clone https://github.com/pytorch/benchmark
pushd benchmark
python install.py --continue_on_fail # fambench_xlmr might fail to install
popd
```
To run a benchmark, the script can be run under 'tune' mode by
```bash
python python/tvm/meta_schedule/testing/torchbench/run.py \
--mode tune \
--model resnet50 \
--target "nvidia/geforce-rtx-3070" \
--work-dir /path/to/work/dir/ \
--num-trials 20000 \
--rpc-host <rpc tracker host for tuning> \
--rpc-port <rpc tracker port for tuning> \
--rpc-key <rpc key> \
```
All available target tags (like nvidia/geforce-rtx-3070) can be found at
https://github.com/apache/tvm/blob/main/src/target/tag.cc
Then the script can be run under 'eval' mode to actual benchmark the performance,
using the tuning database under the work directory. This can be executed on a different
machine than the one executes tuning (the database json files need to be inside
of the work directory).
```bash
python python/tvm/meta_schedule/testing/torchbench/run.py \
--mode eval \
--model resnet50 \
--target "nvidia/geforce-rtx-3070" \
--work-dir /path/to/work/dir/ \
--num-trials 0
```
Alternatively, both tuning and evaluation can be done in a single run on the same machine,
by
```bash
python python/tvm/meta_schedule/testing/torchbench/run.py \
--mode all \
--model resnet50 \
--target "llvm -num-cores 6" \
--work-dir /path/to/work/dir/ \
--num-trials 0
```
"""
# pylint: disable=logging-format-interpolation
import argparse
import contextlib
import logging
import os
import sys
import warnings
from collections import defaultdict
from enum import Enum
from typing import Callable, Dict, List, Tuple
import numpy as np # type: ignore
import torch # type: ignore
from scipy.stats import ttest_ind # type: ignore
import tvm
import tvm.relay
from tvm import meta_schedule as ms
from tvm._ffi import get_global_func
from tvm.contrib.graph_executor import GraphModule
from tvm.meta_schedule.testing.torchbench.utils import (
load_torchdynamo_benchmark_runner,
same,
timed,
)
from tvm.runtime.vm import VirtualMachine
from tvm.support import describe
# Needs to be imported after the .utils is executed
import torchdynamo # type: ignore # isort: skip, pylint: disable=wrong-import-order
class RunMode(Enum):
"""
The running mode of this script. Available values are:
- tune: Only tune the model and create the tuning database.
- eval: Only benchmark model using pre-existing tuning database.
- all: Run both tuning and benchmark
"""
ALL = "all"
TUNE = "tune"
EVAL = "eval"
@property
def should_tune(self):
"""
Returns whether it should tune the model.
"""
return self != RunMode.EVAL
@property
def should_eval(self):
"""
Returns whether it should actually benchmark the model.
"""
return self != RunMode.TUNE
class ResultComparisonMetric(Enum):
"""
This changes how it compares the results with the expected value during
accuracy check.
- cosine: Use the cosine similarity. It should be greater than 0.99.
- allclose-1e-4: Use the max elementwise absolute difference. It should be less than 1e-4.
"""
COSINE = "cosine"
ALLCLOSE = "allclose-1e-4"
def parse_args():
"""
Parse arguments
"""
args = argparse.ArgumentParser()
args.add_argument(
"--mode",
type=RunMode,
default=RunMode.ALL,
help=RunMode.__doc__,
)
args.add_argument(
"--batch-size",
type=int,
default=None,
help="The batch size of model input. Use TorchBench's default value if not specified.",
)
args.add_argument(
"--result-metric",
type=ResultComparisonMetric,
default=ResultComparisonMetric.ALLCLOSE,
help=ResultComparisonMetric.__doc__,
)
args.add_argument(
"--benchmark-repeat",
type=int,
default=10,
help="The number of times to repeat the benchmark measurement.",
)
args.add_argument(
"--benchmark-warmup-rounds",
type=int,
default=5,
help="The number of rounds to warmup before starting to measure the performance.",
)
# Model selection
args.add_argument(
"--model",
type=str,
required=True,
help="""
The name of model to run. It should a directory name under
https://github.com/pytorch/benchmark/tree/main/torchbenchmark/models.
""",
)
args.add_argument(
"--float32",
action="store_true",
help="""
Cast model and inputs to fp32
""",
)
# Tuning-related config
args.add_argument(
"--target",
type=tvm.target.Target,
required=True,
help="The target to tune and run benchmark for.",
)
args.add_argument(
"--work-dir",
type=str,
required=True,
help="""
The working directory to save intermediate results and store databases for compilation.
""",
)
args.add_argument(
"--strategy",
type=str,
default="evolutionary",
help="The search strategy used by MetaSchdule.",
)
args.add_argument(
"--num-trials",
type=int,
required=True,
help="The max number of trials to run MetaSchedule.",
)
args.add_argument(
"--max-trials-per-task",
type=int,
default=None,
help="""
The max number of trials to run per task extracted in MetaSchedule.
By default it's the same as --num-trials.
""",
)
args.add_argument(
"--backend",
type=str,
choices=["graph", "vm"],
default="graph",
help="The backend to use for relay compilation(graph / vm).",
)
# TODO(@yelite): Add a layout arg to transform the network after
# ingesting into Relay and before feeding into MetaSchedule.
# Evaluator-related config
args.add_argument(
"--number",
type=int,
default=3,
help="The number of times to run the model for taking average in a single measurement.",
)
args.add_argument(
"--repeat",
type=int,
default=1,
help="The number of times to repeat the measurement.",
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
help="""
Minimum repeat time in ms. The number of runs will be increased if the actual
repeat time is lowered than this.
""",
)
args.add_argument(
"--adaptive-training",
action="store_true",
help="Whether to use adaptive training for cost model.",
)
args.add_argument(
"--cpu-flush",
action="store_true",
help="Whether to perform CPU cache flush.",
)
# RPC-related args
args.add_argument(
"--rpc-host",
type=str,
help="Host of the RPC Tracker for tuning. Use LocalRunner if not provided",
)
args.add_argument(
"--rpc-port",
type=int,
help="Port of the RPC Tracker for tuning",
)
args.add_argument(
"--rpc-key",
type=str,
help="Key of the RPC Tracker for tuning",
)
parsed = args.parse_args()
# Trim all args, otherwise it confuses the arg parser of timm_efficientdet
sys.argv = sys.argv[:1]
return parsed
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
ARGS = parse_args()
IS_CUDA = ARGS.target.kind.name == "cuda"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
logger.setLevel(logging.INFO)
runner = load_torchdynamo_benchmark_runner( # pylint: disable=invalid-name
IS_CUDA,
cosine_similarity=ARGS.result_metric == ResultComparisonMetric.COSINE,
float32=ARGS.float32,
)
def get_meta_schedule_runner() -> ms.runner.PyRunner:
"""
Get the Runner for MetaSchedule.
It returns RPCRunner if --rpc-host is given, otherwise it returns LocalRunner
"""
if ARGS.rpc_host is not None:
assert ARGS.rpc_port is not None, "Missing rpc_port"
assert ARGS.rpc_key is not None, "Missing rpc_key"
return ms.runner.RPCRunner(
rpc_config=ms.runner.RPCConfig(
tracker_host=ARGS.rpc_host,
tracker_port=ARGS.rpc_port,
tracker_key=ARGS.rpc_key,
session_timeout_sec=600,
),
evaluator_config=ms.runner.EvaluatorConfig(
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
),
alloc_repeat=1,
)
else:
warnings.warn("Falling back to MetaSchedule LocalRunner because --rpc-host isn't provided.")
return ms.runner.LocalRunner()
def get_graph_executor_forward(
graph_executor_factory: tvm.runtime.Module, device: tvm.runtime.Device
) -> Callable:
"""
Get the forward function for graph executor, in order to integrate with TorchDynamo.
"""
# It has to lazily import this package, loading the C++ PyTorch integration
# after the transformers package is imported when loading model. Otherwise
# there will be segfault caused by the protobuf library.
import tvm.contrib.torch # pylint: disable=import-outside-toplevel, unused-import, redefined-outer-name
save_runtime_mod = get_global_func("tvmtorch.save_runtime_mod", allow_missing=True)
if save_runtime_mod is None:
warnings.warn(
"C++ PyTorch TVM integration is missing. Fallback to Python forward function."
"Build TVM with 'USE_PT_TVMDSOOP' to enable the C++ custom operator"
)
mod = GraphModule(graph_executor_factory["default"](device))
def forward(*args):
if IS_CUDA:
torch.cuda.synchronize()
args = tuple(arg.detach().contiguous() for arg in args)
for idx, arg in enumerate(args, 0):
mod.set_input(
f"inp_{idx}",
tvm.nd.from_dlpack(arg),
)
mod.run()
device.sync()
result = [torch.from_dlpack(mod.get_output(i)) for i in range(mod.get_num_outputs())]
return result
return forward
else:
save_runtime_mod(graph_executor_factory.module)
module = torch.classes.tvm_torch.GraphExecutorFactoryWrapper()
def forward(*args): # type: ignore # isort: skip, pylint: disable=function-redefined
return module.forward(args)
return forward
def get_vm_forward(virtual_machine: VirtualMachine, device: tvm.runtime.Device) -> Callable:
"""
Get the forward function for VM, in order to integrate with TorchDynamo.
"""
def forward(*args):
if IS_CUDA:
torch.cuda.synchronize()
args = tuple(tvm.nd.from_dlpack(arg.detach().contiguous()) for arg in args)
result = virtual_machine.invoke("main", *args)
device.sync()
if isinstance(result, tvm.nd.NDArray):
result = [result]
return [torch.from_dlpack(m) for m in result]
return forward
def should_skip_subgraph(graph_module: torch.fx.GraphModule) -> bool:
"""
Returns whether it should skip optimizing the input graph module.
The graph could be empyt or only containing nodes calling function
for side effect.
"""
graph = graph_module.graph
inputs = [n for n in graph.nodes if n.op == "placeholder"]
outputs = [n for n in graph.nodes if n.op == "output"]
return len(inputs) == 0 and all(output.args == ((),) for output in outputs)
def create_tvm_task_collection_backend() -> Tuple[Callable, List[ms.ExtractedTask]]:
"""
This torchdynamo backend only collects the extracted tasks from MetaSchedule.
It doesn't tune the model.
"""
subgraph_idx = 0
subgraphs_dir = os.path.join(ARGS.work_dir, "subgraphs")
os.makedirs(subgraphs_dir, exist_ok=True)
collected_tasks = []
task_index: Dict[int, List[ms.ExtractedTask]] = defaultdict(list)
def collect_task(task):
task_hash = tvm.ir.structural_hash(task.dispatched[0])
for duplicate_task in task_index[task_hash]:
if tvm.ir.structural_equal(duplicate_task.dispatched[0], task.dispatched[0]):
duplicate_task.weight += task.weight
return
task_index[task_hash].append(task)
collected_tasks.append(task)
def backend(graph_module, example_inputs):
nonlocal subgraph_idx
torch.save(graph_module, os.path.join(subgraphs_dir, f"graph_module_{subgraph_idx}"))
torch.save(example_inputs, os.path.join(subgraphs_dir, f"example_inputs_{subgraph_idx}"))
if should_skip_subgraph(graph_module):
return graph_module.forward
jit_mod = torch.jit.trace(graph_module, example_inputs)
shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
ir_mod, params = tvm.relay.frontend.from_pytorch(jit_mod, shape_list)
extracted_tasks = ms.relay_integration.extract_tasks(
mod=ir_mod,
target=ARGS.target,
params=params,
)
old_tasks_count = len(collected_tasks)
for task in extracted_tasks:
collect_task(task)
logger.info(
"Extracted %d tasks from graph %d, with %d new tasks",
len(extracted_tasks),
subgraph_idx,
len(collected_tasks) - old_tasks_count,
)
subgraph_idx += 1
return graph_module.forward
return backend, collected_tasks
def create_tvm_compilation_backend(database: ms.database.Database) -> Callable:
"""
This torchdynamo backend compiles the model using history best record from the
MetaSchedule database.
"""
def backend(graph_module, example_inputs):
if should_skip_subgraph(graph_module):
return graph_module.forward
jit_mod = torch.jit.trace(graph_module, example_inputs)
shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
ir_mod, params = tvm.relay.frontend.from_pytorch(jit_mod, shape_list)
lib = ms.relay_integration.compile_relay(
database=database,
mod=ir_mod,
target=ARGS.target,
params=params,
backend=ARGS.backend,
)
device = tvm.cuda(0) if IS_CUDA else tvm.cpu(0)
if ARGS.backend == "graph":
return get_graph_executor_forward(lib, device)
elif ARGS.backend == "vm":
vm = VirtualMachine(lib, device) # pylint: disable=invalid-name
return get_vm_forward(vm, device)
else:
raise RuntimeError(f"Unknown backend {ARGS.backend}")
return backend
def format_time(seconds: float) -> str:
"""
Format elapsed time based on its value.
"""
if seconds > 1:
return f"{seconds:.3g}s"
else:
return f"{seconds * 1000:.3g}ms"
def is_output_correct(output: torch.Tensor, expected: torch.Tensor) -> bool:
"""
Check whether the output is correct.
"""
comparison_metric = ARGS.result_metric
if comparison_metric == ResultComparisonMetric.COSINE:
return same(expected, output, cosine_similarity=True)
elif comparison_metric == ResultComparisonMetric.ALLCLOSE:
return same(expected, output, tol=1e-4)
else:
raise RuntimeError(f"Unknown comparison metric {comparison_metric}")
def inspect_output_error(output, expected):
"""
Inpsect the error between the actual output and expected output.
"""
if not isinstance(output, torch.Tensor):
logger.info(
f"Unsupported type for error inspection: {type(output).__name__}."
f"Please manually check output.pt"
)
return
output = output.cpu().float()
expected = expected.cpu().float()
abs_error = (output - expected).abs()
rel_error = (abs_error / expected).abs()
def format_error_table(error, bins) -> str:
bin_tensor = torch.as_tensor([float(b) for b in bins], dtype=error.dtype)
error_hist = torch.histogram(error, bin_tensor).hist.int()
return "\n".join(f"< {b}\t{e}" for e, b in zip(error_hist, bins[1:]))
abs_error_bins = [
"-1e10",
"0",
"1e-8",
"1e-6",
"1e-5",
"1e-4",
"1e-3",
"1e-2",
"1e-1",
"1",
"1e10",
]
rel_error_bins = [
"-1e10",
"0",
"1e-4",
"1e-3",
"1e-2",
"1e-1",
"1",
"1e1",
"1e2",
"1e3",
"1e100",
]
large_rel_error_idx = rel_error > 1
abs_error_with_large_rel_error = abs_error[large_rel_error_idx]
logger.error(f"Expected (PyTorch eager): {expected}")
logger.error(f"Actual (Optimized): {output}")
logger.error(f"Absolute Error\n{format_error_table(abs_error, abs_error_bins)}")
logger.error(f"Relative Error\n{format_error_table(rel_error, rel_error_bins)}")
logger.error(
f"Max absolute error for position with large relative error (> 1):"
f"{abs_error_with_large_rel_error.max()}"
)
def performance_experiment(
model_iter_fn: Callable,
model: torch.nn.Module,
example_inputs: Tuple[torch.Tensor],
) -> str:
"""
Performs the actual benchmarking
Simplified from https://github.com/pytorch/torchdynamo/blob/c537639f9712621dc04ca09908796dbbe86c354b/benchmarks/common.py#L494 pylint: disable=line-too-long
"""
timings = np.zeros((ARGS.benchmark_repeat, 2), np.float64)
if IS_CUDA:
torch.cuda.empty_cache()
is_correct = True
frozen_model_iter_fn = torchdynamo.run(model_iter_fn)
for _ in range(ARGS.benchmark_warmup_rounds):
frozen_model_iter_fn(model, example_inputs)
model_iter_fn(model, example_inputs)
for rep in range(ARGS.benchmark_repeat):
# interleave the runs to handle frequency scaling and load changes
timings[rep, 0], expected_output = timed(
model, model_iter_fn, example_inputs, return_result=True
)
timings[rep, 1], actual_output = timed(
model, frozen_model_iter_fn, example_inputs, return_result=True
)
is_correct = is_correct and is_output_correct(expected_output, actual_output)
pvalue = ttest_ind(timings[:, 0], timings[:, 1]).pvalue
median = np.median(timings, axis=0)
speedup = median[0] / median[1]
logger.info(
f"eager:{format_time(median[0])} "
f"optimized:{format_time(median[1])} "
f"speedup:{speedup:.3f}x p:{pvalue:.3f}"
)
torch.save(actual_output, os.path.join(ARGS.work_dir, "output.pt"))
torch.save(expected_output, os.path.join(ARGS.work_dir, "expected.pt"))
if not is_correct:
logger.error("Result is incorrect.")
inspect_output_error(actual_output, expected_output)
return ""
def get_torch_device_type(target: tvm.target.Target) -> str:
if target.kind.name == "llvm":
return "cpu"
elif target.kind.name == "cuda":
return "cuda"
else:
raise RuntimeError(f"Unsupported target {target}")
def main():
"""
Entry point of the benchmark
"""
describe()
meta_schedule_work_dir = os.path.join(ARGS.work_dir, "meta_schedule")
os.makedirs(meta_schedule_work_dir, exist_ok=True)
database = ms.database.JSONDatabase(work_dir=meta_schedule_work_dir)
if not ARGS.mode.should_tune:
if len(database) == 0:
raise RuntimeError(
"Script is running in eval mode while the tuning database is empty. "
"Please tune the model first."
)
if IS_CUDA and ARGS.cpu_flush:
warnings.warn(
"Benchmark is running on CUDA, while --cpu-flush is turned on. "
"This flag will have no effect on CUDA."
)
ARGS.cpu_flush = False
try:
logger.info(f"Loading model with batch size: {ARGS.batch_size}")
_, name, model, example_inputs, batch_size = runner.load_model(
get_torch_device_type(ARGS.target),
ARGS.model,
batch_size=ARGS.batch_size,
)
model, example_inputs = runner.maybe_cast(model, example_inputs)
logger.info(f"Got model with batch size: {batch_size}")
except NotImplementedError:
logger.exception(f"{ARGS.model} failed to load")
raise
with contextlib.ExitStack() as stack:
profiler = stack.enter_context(ms.Profiler())
stack.enter_context(torch.no_grad())
if ARGS.mode.should_tune:
task_collect_backend, extracted_tasks = create_tvm_task_collection_backend()
task_collect_ctx = torchdynamo.optimize(task_collect_backend)
task_collect_ctx(runner.model_iter_fn)(model, example_inputs)
tasks, task_weights = ms.relay_integration.extracted_tasks_to_tune_contexts(
extracted_tasks=extracted_tasks,
work_dir=ARGS.work_dir,
strategy=ARGS.strategy,
)
database = ms.tune.tune_tasks(
tasks=tasks,
task_weights=task_weights,
work_dir=ARGS.work_dir,
max_trials_global=ARGS.num_trials,
max_trials_per_task=ARGS.max_trials_per_task,
runner=get_meta_schedule_runner(), # type: ignore
database=database,
cost_model=ms.cost_model.XGBModel( # type: ignore
extractor=ms.feature_extractor.PerStoreFeature(),
adaptive_training=ARGS.adaptive_training,
),
)
if ARGS.mode.should_eval:
torchdynamo.reset()
model_compile_ctx = torchdynamo.optimize(create_tvm_compilation_backend(database))
model_compile_ctx(runner.model_iter_fn)(model, example_inputs)
with torch.no_grad():
performance_experiment(runner.model_iter_fn, model, example_inputs)
print(profiler.table())
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/torchbench/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Helper functions for running TorchBench through the benchmark functions
from TorchDynamo.
"""
import os
import sys
from dataclasses import dataclass
import torch # type: ignore
def find_torchdynamo() -> str:
"""
Find the directory of TorchDynamo repo.
It can't directly import the benchmark runner in TorchDynamo
becuase it isn't designed to be used as a Python package.
"""
candidates = [
"torchdynamo",
"../torchdynamo",
"../../torchdynamo",
]
for library_dir in candidates:
if os.path.exists(f"{library_dir}/benchmarks"):
return library_dir
raise RuntimeError(
"""
Cannot find directory for torchdynamo.
You need to clone https://github.com/pytorch/torchdynamo to the parent directory of cwd.
"""
)
DYNAMO_DIR = find_torchdynamo()
sys.path.insert(
0, DYNAMO_DIR
) # opacus_cifar10 depends on opacus, which installs a package called 'benchmarks'
sys.path.append(f"{DYNAMO_DIR}/benchmarks")
# pylint: disable=wrong-import-position, unused-import
from benchmarks.common import same, timed # type: ignore
from torchbench import TorchBenchmarkRunner # type: ignore
# pylint: disable=wrong-import-position, unused-import
def load_torchdynamo_benchmark_runner(
is_cuda: bool, cosine_similarity: bool = False, float32: bool = False
) -> TorchBenchmarkRunner:
"""
Load the benchmark runner from TorchDynamo.
"""
@dataclass
class RunnerArgs:
"""
This class simulates the parsed args required by the benchmark code from TorchDynamo.
"""
ci: bool = False # Whether runs in CI mode. pylint: disable=invalid-name
training: bool = False # Whether it benchmarks training workload.
use_eval_mode: bool = True # Whether the model should be in eval mode.
dynamic_shapes: bool = False # Whether runs the model in dynamic shape mode.
float16: bool = False # Whether to cast model and inputs to float16
float32: bool = False # Whether to cast model and inputs to float32
accuracy: bool = False # Whether to perform a accuracy test
performance: bool = True # Whether to perform a performance test
cosine: bool = False # Whether to use consine similarity to check if output is correct.
args = RunnerArgs(cosine=cosine_similarity, float32=float32)
runner = TorchBenchmarkRunner()
runner.args = args
runner.model_iter_fn = runner.forward_pass
if is_cuda:
# pylint: disable=import-outside-toplevel
import benchmarks.common # type: ignore
# pylint: enable=import-outside-toplevel
benchmarks.common.synchronize = torch.cuda.synchronize
return runner
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/tune_onnx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import json
import logging
from distutils.util import strtobool
import onnx # type: ignore
import tvm
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.relay.frontend import from_onnx
from tvm.support import describe
from .tune_utils import create_timer, generate_input_data
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--model-name",
type=str,
required=True,
)
args.add_argument(
"--onnx-path",
type=str,
required=True,
)
args.add_argument(
"--input-shape",
type=str,
required=True,
help='example: `[{"name": "input1", "dtype": "int64", "shape": [1, 1, 8]}]',
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--adaptive-training",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
default=True,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--backend",
type=str,
choices=["graph", "vm"],
help="example: graph / vm",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.input_shape = json.loads(parsed.input_shape)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=600,
)
return parsed
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
ARGS = _parse_args()
def main():
describe()
print(f"Workload: {ARGS.model_name}")
onnx_model = onnx.load(ARGS.onnx_path)
shape_dict = {}
for item in ARGS.input_shape:
print(f" input_name : {item['name']}")
print(f" input_shape: {item['shape']}")
print(f" input_dtype: {item['dtype']}")
shape_dict[item["name"]] = item["shape"]
mod, params = from_onnx(onnx_model, shape_dict, freeze_params=True)
input_data = {
item["name"]: generate_input_data(item["shape"], item["dtype"]) for item in ARGS.input_shape
}
with ms.Profiler() as profiler:
database = ms.relay_integration.tune_relay(
mod=mod,
target=ARGS.target,
params=params,
work_dir=ARGS.work_dir,
max_trials_global=ARGS.num_trials,
num_trials_per_iter=64,
runner=ms.runner.RPCRunner( # type: ignore
rpc_config=ARGS.rpc_config,
evaluator_config=ms.runner.EvaluatorConfig(
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
),
alloc_repeat=1,
),
cost_model=ms.cost_model.XGBModel( # type: ignore
extractor=ms.feature_extractor.PerStoreFeature(),
adaptive_training=ARGS.adaptive_training,
),
strategy=ms.search_strategy.EvolutionarySearch(),
)
lib = ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=ARGS.target,
params=params,
backend=ARGS.backend,
)
print("Tuning Time:")
print(profiler.table())
run_module_via_rpc(
rpc_config=ARGS.rpc_config,
lib=lib,
dev_type=ARGS.target.kind.name,
args=input_data,
continuation=create_timer(ARGS.backend),
backend=ARGS.backend,
)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/tune_relay.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import json
import logging
from distutils.util import strtobool
from typing import Dict
import numpy as np # type: ignore
import tvm
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.meta_schedule.testing.relay_workload import get_network
from tvm.meta_schedule.testing.tune_utils import create_timer, generate_input_data
from tvm.support import describe
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--workload",
type=str,
required=True,
)
args.add_argument(
"--input-shape",
type=str,
required=True,
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--layout",
type=str,
default=None,
)
args.add_argument(
"--cache-dir",
type=str,
default=None,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--adaptive-training",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
default=True,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--backend",
type=str,
choices=["graph", "vm"],
help="example: graph / vm",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.input_shape = json.loads(parsed.input_shape)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=600,
)
return parsed
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
ARGS = _parse_args()
def main():
describe()
print(f"Workload: {ARGS.workload}")
mod, params, (input_name, input_shape, input_dtype) = get_network(
ARGS.workload,
ARGS.input_shape,
layout=ARGS.layout,
cache_dir=ARGS.cache_dir,
)
input_info = [
{
"name": input_name,
"shape": input_shape,
"dtype": input_dtype,
},
]
input_data: Dict[str, np.ndarray] = {
item["name"]: generate_input_data( # type: ignore
item["shape"], # type: ignore
item["dtype"], # type: ignore
)
for item in input_info
}
for item in input_info:
print(f" input_name : {item['name']}")
print(f" input_shape: {item['shape']}")
print(f" input_dtype: {item['dtype']}")
with ms.Profiler() as profiler:
database = ms.relay_integration.tune_relay(
mod=mod,
target=ARGS.target,
work_dir=ARGS.work_dir,
max_trials_global=ARGS.num_trials,
num_trials_per_iter=64,
params=params,
runner=ms.runner.RPCRunner( # type: ignore
rpc_config=ARGS.rpc_config,
evaluator_config=ms.runner.EvaluatorConfig(
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
),
alloc_repeat=1,
),
cost_model=ms.cost_model.XGBModel( # type: ignore
extractor=ms.feature_extractor.PerStoreFeature(),
adaptive_training=ARGS.adaptive_training,
),
strategy=ms.search_strategy.EvolutionarySearch(),
)
lib = ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=ARGS.target,
params=params,
backend=ARGS.backend,
)
print("Tuning Time:")
print(profiler.table())
run_module_via_rpc(
rpc_config=ARGS.rpc_config,
lib=lib,
dev_type=ARGS.target.kind.name,
args=input_data,
continuation=create_timer(ARGS.backend),
backend=ARGS.backend,
)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/tune_te.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import logging
from distutils.util import strtobool
from typing import Optional
import tvm
from tvm import meta_schedule as ms
from tvm import tir
from tvm.meta_schedule.testing.te_workload import create_te_workload
from tvm.support import describe
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--workload",
type=str,
required=True,
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--adaptive-training",
type=lambda x: bool(strtobool(x)),
required=False,
help="example: True / False",
default=True,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=60,
)
return parsed
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
ARGS = _parse_args()
def main():
describe()
print(f"Workload: {ARGS.workload}")
with ms.Profiler() as profiler:
sch: Optional[tir.Schedule] = ms.tir_integration.tune_tir(
mod=create_te_workload(ARGS.workload, 0),
target=ARGS.target,
work_dir=ARGS.work_dir,
max_trials_global=ARGS.num_trials,
num_trials_per_iter=64,
runner=ms.runner.RPCRunner( # type: ignore
rpc_config=ARGS.rpc_config,
evaluator_config=ms.runner.EvaluatorConfig(
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
),
alloc_repeat=1,
),
cost_model=ms.cost_model.XGBModel( # type: ignore
extractor=ms.feature_extractor.PerStoreFeature(),
adaptive_training=ARGS.adaptive_training,
),
strategy=ms.search_strategy.EvolutionarySearch(),
task_name=ARGS.workload,
)
print("Tuning Time:")
print(profiler.table())
if sch is None:
print("No valid schedule found!")
else:
print(sch.mod.script())
print(sch.trace)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/tune_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing utility functions in meta schedule"""
from typing import Callable, Optional, Union, List, Dict
from statistics import median
import json
import warnings
import numpy as np # type: ignore
import tvm
from tvm.runtime import NDArray
def generate_input_data(
input_shape: List[int],
input_dtype: str,
*,
low: Optional[int] = None,
high: Optional[int] = None,
) -> np.ndarray:
"""Generate input date with given shape and data type.
Parameters
----------
input_shape : List[int]
The shape of the input data.
input_dtype : str
The data type of the input date.
Returns
-------
input_data : np.ndarray
The generated input data with given shape and data type in numpy ndarray.
"""
if input_dtype.startswith("float"):
return np.random.uniform(size=input_shape).astype(input_dtype)
if low is None or high is None:
warnings.warn(
f"Model input value range for shape {input_shape} of {input_dtype} is not set!"
)
range_map = {
"uint8": (0, 255),
"int8": (-128, 127),
"int32": (0, 10000),
"int64": (0, 10000),
}
if input_dtype in range_map:
_low, _high = range_map[input_dtype]
return np.random.randint(
low=_low if low is None else low,
high=_high if high is None else high,
size=input_shape,
dtype=input_dtype,
)
raise ValueError("Unsupported input datatype!")
def create_timer(backend: str) -> Callable:
"""Create a function to run and benchmark the performance of whole given runtime module,
or Executable in relay vm.
Parameters
----------
backend : str
The backend to use, graph / vm.
Returns
-------
func : Callable
The function to benchmark the workload.
"""
def f_timer(
rt_mod: Union[tvm.runtime.Module, tvm.runtime.vm.Executable],
dev: tvm.runtime.Device,
input_data: Dict[str, NDArray],
) -> None:
"""Run and benchmark the given runtime module, print out the result.
Parameters
----------
rt_mod : Union[tvm.runtime.Module, tvm.runtime.vm.Executable]
The runtime module or vm executable.
dev : tvm.runtime.Device
The device type to run workload.
input_data : Dict[str, np.ndarray]
The input data as a dictionary.
"""
from tvm.contrib.graph_executor import GraphModule # pylint:disable=import-outside-toplevel
from tvm.runtime.vm import VirtualMachine # pylint:disable=import-outside-toplevel
try:
if backend == "vm":
vm = VirtualMachine(rt_mod, dev) # pylint: disable=invalid-name
ftimer = vm.benchmark(
dev, min_repeat_ms=500, repeat=5, number=1, end_to_end=False, **input_data
)
elif backend == "graph":
mod = GraphModule(rt_mod["default"](dev))
for input_name, input_value in input_data.items():
mod.set_input(input_name, input_value)
ftimer = mod.module.time_evaluator(
"run", dev, min_repeat_ms=500, repeat=5, number=1
)()
else:
raise ValueError(f"Backend {backend} not supported in f_timer!")
results = list(np.array(ftimer.results) * 1000.0) # type: ignore
print("Running time in time_evaluator: ", results)
print("-------------------------------")
print(f" Min (ms) : {min(results)}")
print(f" Max (ms) : {max(results)}")
print(f" Median (ms) : {median(results)}")
print(f"Average (ms) : {sum(results) / len(results)}")
except Exception as exc: # pylint: disable=broad-except
print(
f"Run module f_timer via RPC failed, exception: {exc}",
)
return f_timer
def create_time_per_layer(graph: str) -> Callable:
"""Create a function to run and benchmark the per-layer performance of given runtime module,
given the graph output of the module from graph compiler.
Parameters
----------
graph : str
The json format graph output of the module from graph compiler.
Returns
-------
func : Callable
The function using the json format graph.
"""
def f_time_per_layer(
rt_mod: tvm.runtime.Module,
dev: tvm.runtime.Device,
input_data: Dict[str, NDArray],
) -> None:
"""Run and benchmark the per-layer performance of given runtime module,
print out the result.
Parameters
----------
rt_mod : tvm.runtime.Module
The runtime module.
dev : tvm.runtime.Device
The device type to run workload.
input_data : Dict[str, np.ndarray]
The input data as a dictionary.
"""
# pylint:disable=import-outside-toplevel
from tvm.contrib.debugger.debug_executor import create
# pylint:enable=import-outside-toplevel
try:
mod = create(graph, rt_mod, dev)
for input_name, input_value in input_data.items():
mod.set_input(input_name, input_value)
graph_nodes = [n["name"] for n in json.loads(graph)["nodes"]]
graph_time = mod.run_individual(number=10, repeat=1, min_repeat_ms=5000)
print("Running time of each layer:")
print("---------------------------")
print("|graph_nodes| = ", len(graph_nodes))
print("|graph_time| = ", len(graph_time))
for k, v in zip(graph_nodes, graph_time):
print(k, float(v) * 1e6, "us")
except Exception as exc: # pylint: disable=broad-except
print(
f"Run module f_time_per_layer via RPC failed, exception: {exc}",
)
return f_time_per_layer
def create_calculator(backend: str) -> Callable:
"""Create a function to fetch the computing result of running the given runtime module.
Parameters
----------
backend : str
The backend to use, only tir is supported for now.
Returns
-------
func : Callable
The function to fetch the computing result.
"""
def f_calculator(
rt_mod: tvm.runtime.Module,
dev: tvm.runtime.Device, # pylint: disable=unused-argument
input_data: Dict[str, NDArray],
) -> List[NDArray]:
"""Fetch the result of running the given runtime module.
Parameters
----------
rt_mod : Union[tvm.runtime.Module, tvm.runtime.vm.Executable]
The runtime module or vm executable.
dev : tvm.device
The device type to run workload.
input_data : Dict[str, np.ndarray]
The input data as a dictionary.
"""
try:
if backend == "tir":
data = [v for _, v in sorted(input_data.items(), key=lambda x: x[0])]
rt_mod(*data)
return data
else:
raise ValueError(f"Backend {backend} not supported in f_calculator!")
except Exception as exc: # pylint: disable=broad-except
print(
f"Run module f_calculator via RPC failed, exception: {exc}",
)
return None
return f_calculator
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/testing/validate_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""JSON Database validation script"""
from typing import Union, Callable, List
from distutils.util import strtobool
import argparse
import logging
import warnings
import numpy as np # type: ignore
import tvm
from tvm.target import Target
from tvm.ir import IRModule
from tvm.tir import Schedule
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.meta_schedule.testing.tune_utils import create_calculator, generate_input_data
from tvm._ffi import get_global_func, register_func
from tvm.support import describe
DELIMITOR = "\n" + "-" * 30 + "\n"
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--work-dir",
type=str,
required=True,
help="The path to the work directory containing database files.",
)
args.add_argument(
"--target",
type=Target,
required=True,
)
args.add_argument(
"--baseline-target",
type=Target,
default="llvm -num-cores=1",
required=False,
help="The baseline target to compile the original module.",
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=600,
)
if parsed.cpu_flush and parsed.target.kind.name != "llvm":
warnings.warn("cpu_flush is only supported on llvm target")
return parsed
# logging
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
# arg parser
ARGS = _parse_args()
@register_func("tvm.meta_schedule.testing.default_input_generator")
def default_input_generator(mod: IRModule) -> List[tvm.nd.NDArray]:
args_info = ms.arg_info.TensorInfo.from_prim_func(mod["main"])
inputs = [
tvm.nd.array(generate_input_data(input_shape=arg_info.shape, input_dtype=arg_info.dtype))
for arg_info in args_info
]
return inputs
@register_func("tvm.meta_schedule.testing.default_check_metric")
def default_check_metric(a: List[tvm.nd.NDArray], b: List[tvm.nd.NDArray]) -> bool:
assert len(a) == len(b), "Different number of outputs from two modules"
for i, _ in enumerate(a):
if not np.allclose(a[i].numpy(), b[i].numpy(), rtol=1e-3, atol=2e-3):
return False
return True
def validate_correctness(
original_mod: IRModule, # compiled for "baseline_target"
scheduled_mod: IRModule, # compiled for "target"
*,
baseline_target: Target,
target: Target,
dev_type: str,
rpc_config: ms.runner.RPCConfig,
f_input_generator: Union[
str, Callable[[IRModule], List[tvm.nd.NDArray]]
] = default_input_generator,
f_check_metric: Union[
str, Callable[[tvm.nd.NDArray, tvm.nd.NDArray], bool]
] = default_check_metric,
) -> bool:
"""Function to validate the correctness of a scheduled module.
Parameters
----------
original_mod : IRModule
The original module to be compiled.
scheduled_mod : IRModule
The scheduled module to be compiled.
baseline_target : Target
The baseline target to compile the original module.
target : Target
The target to compile the scheduled module.
dev_type : str
The device type to run the module via rpc.
rpc_config : RPCConfig
The RPCConfig to run the scheduled module.
f_input_generator : Union[str, Callable]
The function to generate the input data.
f_check_metric : Union[str, Callable]
The function to check the metric.
Returns
-------
result : bool
The result of the validation.
"""
def to_numpy(a: List[tvm.nd.NDArray]) -> List[np.ndarray]:
"""Convert a list of TVM NDArray to a list of numpy array"""
assert a is not None, "Empty result cannot be converted to numpy"
return [x.numpy() for x in a]
def to_tvm_ndarray(a: List[np.ndarray]) -> List[tvm.nd.NDArray]:
"""Convert a list of numpy array to a list of TVM NDArray"""
assert a is not None, "Empty result cannot be converted to TVM NDArray"
return [tvm.nd.array(x) for x in a]
def build_and_run(mod: IRModule, target: Target, dev_type: str) -> np.ndarray:
"""Build and run the module on the target device."""
rt_mod = tvm.build(mod, target=target)
return run_module_via_rpc(
rpc_config=rpc_config,
lib=rt_mod,
dev_type=dev_type,
args={i: v for i, v in enumerate(inputs)}, # pylint: disable=unnecessary-comprehension
continuation=create_calculator(backend="tir"),
backend="tir",
)
# fetch functions & prepare inputs
if isinstance(f_input_generator, str):
f_input_generator = get_global_func(f_input_generator)
if isinstance(f_check_metric, str):
f_check_metric = get_global_func(f_check_metric)
inputs = to_numpy(f_input_generator(original_mod)) # type: ignore
# build & run original result
original_res = to_numpy(build_and_run(original_mod, target=baseline_target, dev_type="cpu"))
scheduled_res = to_numpy(build_and_run(scheduled_mod, target=target, dev_type=dev_type))
# check metric
if f_check_metric(to_tvm_ndarray(original_res), to_tvm_ndarray(scheduled_res)): # type: ignore
return True
else:
print(
("\n\n").join(
[
"Validation failed!",
"Original Result:" + DELIMITOR + str(original_res),
"Scheduled Result:" + DELIMITOR + str(scheduled_res),
"Input:" + DELIMITOR + str(inputs),
"Original IRModule:" + DELIMITOR + original_mod.script(),
"Scheduled IRModule:" + DELIMITOR + scheduled_mod.script(),
]
)
)
return False
def main():
"""Main function"""
describe()
database = ms.database.create(work_dir=ARGS.work_dir)
target = ARGS.target
if target.kind.name == "llvm":
dev_type = "cpu"
elif target.kind.name == "cuda":
dev_type = "cuda"
else:
raise RuntimeError(f"Unsupported target kind: {target.kind.name}")
records = database.get_all_tuning_records()
with ms.Profiler() as profiler:
for i, record in enumerate(records):
scope_name = f"validate #{i}"
with profiler.timeit(scope_name):
original_mod = record.workload.mod
sch = Schedule(original_mod)
record.trace.apply_to_schedule(sch=sch, remove_postproc=False)
scheduled_mod = sch.mod
is_success = False
try:
is_success = validate_correctness(
original_mod=original_mod,
scheduled_mod=scheduled_mod,
target=target,
baseline_target=ARGS.baseline_target,
dev_type=dev_type,
rpc_config=ARGS.rpc_config,
)
except Exception as e: # pylint: disable=broad-except, invalid-name
print(
("\n\n").join(
[
"Validation failed!",
"Original IRModule:" + DELIMITOR + original_mod.script(),
"Scheduled IRModule:" + DELIMITOR + scheduled_mod.script(),
"Exception" + DELIMITOR + str(e),
]
)
)
if is_success:
print(
f"Progress {i+1: 6d} / {len(records): 6d} checked,"
f" used {float(profiler.get()[scope_name]): 3.3f} sec."
)
else:
return
print("Validation passed!")
print(f"Total time spent: {float(profiler.get()['Total']): 3.3f} sec.")
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/tir_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MetaSchedule-TIR integration"""
from typing import Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm import ir, tir
from tvm.target import Target
from .builder import Builder
from .cost_model import CostModel
from .database import Database
from .logging import get_loggers_from_work_dir
from .measure_callback import MeasureCallback
from .runner import Runner
from .search_strategy import SearchStrategy
from .space_generator import SpaceGenerator
from .task_scheduler import TaskScheduler
from .tune import tune_tasks
from .tune_context import TuneContext, _normalize_mod
from .utils import fork_seed
def tune_tir(
mod: Union[ir.IRModule, tir.PrimFunc],
target: Union[str, Target],
work_dir: str,
max_trials_global: int,
*,
num_trials_per_iter: int = 64,
builder: Builder.BuilderType = "local",
runner: Runner.RunnerType = "local",
database: Database.DatabaseType = "json",
cost_model: CostModel.CostModelType = "xgb",
measure_callbacks: MeasureCallback.CallbackListType = "default",
task_scheduler: TaskScheduler.TaskSchedulerType = "round-robin",
space: SpaceGenerator.SpaceGeneratorType = "post-order-apply",
strategy: SearchStrategy.SearchStrategyType = "evolutionary",
task_name: str = "main",
num_threads: Union[Literal["physical", "logical"], int] = "physical",
seed: Optional[int] = None,
) -> Database:
"""Tune a TIR function.
Parameters
----------
mod : Union[ir.IRModule, tir.PrimFunc]
The TIR function to tune.
target : Union[str, Target]
The target to tune for.
work_dir : str
The working directory.
max_trials_global : int
The maximum number of trials to run globally.
num_trials_per_iter : int
The number of trials to run per iteration
builder : Builder.BuilderType
The builder.
runner : Runner.RunnerType
The runner.
database : Database.DatabaseType
The database.
cost_model : CostModel.CostModelType
The cost model.
measure_callbacks : MeasureCallback.CallbackListType
The measure callbacks.
task_scheduler : TaskScheduler.TaskSchedulerType
The task scheduler.
space : SpaceGenerator.SpaceGeneratorType
The space generator.
strategy : SearchStrategy.SearchStrategyType
The search strategy.
task_name : str
The name of the task.
num_threads : Union[Literal["physical", "logical"], int]
The number of threads to use.
seed : Optional[int]
The seed for the random number generator.
Returns
-------
database : Database
The database with all tuning records
"""
(logger,) = get_loggers_from_work_dir(work_dir, [task_name])
(seed,) = fork_seed(seed, n=1)
return tune_tasks(
tasks=[
TuneContext(
mod=mod,
target=target,
space_generator=space,
search_strategy=strategy,
task_name=task_name,
logger=logger,
rand_state=seed,
num_threads=num_threads,
).clone()
],
task_weights=[1.0],
work_dir=work_dir,
max_trials_global=max_trials_global,
max_trials_per_task=max_trials_global,
num_trials_per_iter=num_trials_per_iter,
builder=builder,
runner=runner,
database=database,
cost_model=cost_model,
measure_callbacks=measure_callbacks,
task_scheduler=task_scheduler,
)
def compile_tir(
database: Database,
mod: Union[ir.IRModule, tir.PrimFunc],
target: Union[Target, str],
) -> tir.Schedule:
"""Compile a TIR to tir.Schedule, according to the records in the database.
Parameters
----------
database : Database
The database of tuning records.
mod : Union[ir.IRModule, tir.PrimFunc]
The TIR function to tune.
target : Union[str, Target]
The target to tune for.
Returns
-------
sch : tir.Schedule
The best schedule found in the database.
"""
mod = _normalize_mod(mod)
if not isinstance(target, Target):
target = Target(target)
return database.query_schedule(mod, target, workload_name="main")
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/trace_apply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Specialized applications of trace"""
from ..tir.schedule import Schedule, Trace
from ..target import Target
from . import _ffi_api
def schedule_using_anchor_trace(sch: Schedule, anchor_trace: Trace, target: Target) -> None:
"""Apply the trace from a TIR module whose anchor block is the same but fused elemewise op
blocks differ. This function can be used for transferring a trace tuned on a conv2d -> add
subgraph to other subgraphs having the same conv2d workload, for example. We call such trace
an "anchor trace". Those blocks that are not scheduled by the given anchor trace will be either
inlined or parallelized.
Parameters
----------
sch : Schedule
The target schedule
anchor_trace: Trace
The trace generated for other TIR module having the same anchor block
target : tvm.target.Target
The compilation target
"""
_ffi_api.ScheduleUsingAnchorTrace(sch, anchor_trace, target) # type: ignore
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/tune.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The core tuning API"""
from typing import List, Optional
from .builder import Builder
from .cost_model import CostModel
from .database import Database
from .measure_callback import MeasureCallback
from .runner import Runner
from .task_scheduler import TaskScheduler
from .tune_context import TuneContext
def tune_tasks(
*,
tasks: List[TuneContext],
task_weights: List[float],
work_dir: str,
max_trials_global: int,
max_trials_per_task: Optional[int] = None,
num_trials_per_iter: int = 64,
builder: Builder.BuilderType = "local",
runner: Runner.RunnerType = "local",
database: Database.DatabaseType = "json",
cost_model: CostModel.CostModelType = "xgb",
measure_callbacks: MeasureCallback.CallbackListType = "default",
task_scheduler: TaskScheduler.TaskSchedulerType = "gradient",
module_equality: str = "structural",
) -> Database:
"""Tune a list of tasks. Using a task scheduler.
Parameters
----------
tasks : List[TuneContext]
The list of tasks to tune.
task_weights : List[float]
The weight of each task.
work_dir : str
The working directory.
max_trials_global : int
The maximum number of trials to run globally.
max_trials_per_task : Optional[int]
The maximum number of trials to run per task.
num_trials_per_iter : int
The number of trials to run per iteration
builder : Builder.BuilderType
The builder.
runner : Runner.RunnerType
The runner.
database : Database.DatabaseType
The database.
cost_model : CostModel.CostModelType
The cost model.
measure_callbacks : MeasureCallback.CallbackListType
The measure callbacks.
task_scheduler : TaskScheduler.TaskSchedulerType
The task scheduler.
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
It must be one of the followings:
- "structural": Use StructuralEqual/Hash
- "ignore-ndarray": Same as "structural", but ignore ndarray raw data during
equality testing and hashing.
- "anchor-block": Apply equality testing and hashing on the anchor block extracted from a
given module. The "ignore-ndarray" varint is used for the extracted
blocks or in case no anchor block is found.
For the definition of the anchor block, see tir/analysis/analysis.py.
Returns
-------
database : Database
The database with all tuning records
"""
if len(tasks) != len(task_weights):
raise ValueError(
f"Length of tasks ({len(tasks)}) and task_weights ({len(task_weights)}) do not match."
)
if max_trials_per_task is None:
max_trials_per_task = max_trials_global
if not isinstance(builder, Builder):
builder = Builder.create(builder)
if not isinstance(runner, Runner):
runner = Runner.create(runner)
if database == "json":
database = Database.create(database, work_dir=work_dir, module_equality=module_equality)
elif not isinstance(database, Database):
database = Database.create(database, module_equality=module_equality)
if not isinstance(cost_model, CostModel):
cost_model = CostModel.create(cost_model)
if isinstance(measure_callbacks, MeasureCallback):
measure_callbacks = [measure_callbacks]
elif measure_callbacks == "default":
measure_callbacks = MeasureCallback.create(measure_callbacks)
if not isinstance(task_scheduler, TaskScheduler):
task_scheduler = TaskScheduler.create(task_scheduler)
task_scheduler.tune(
tasks=tasks,
task_weights=task_weights,
max_trials_global=max_trials_global,
max_trials_per_task=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
builder=builder,
runner=runner,
measure_callbacks=measure_callbacks,
database=database,
cost_model=cost_model,
)
return database
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/tune_context.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule tuning context."""
from typing import TYPE_CHECKING, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm import IRModule
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.target import Target
from tvm.tir import PrimFunc, Schedule
from . import _ffi_api
from .logging import Logger, get_logger, get_logging_func
from .utils import cpu_count
if TYPE_CHECKING:
from .cost_model import CostModel
from .database import Database
from .runner import RunnerResult
from .search_strategy import MeasureCandidate, SearchStrategy
from .space_generator import SpaceGenerator
def _normalize_mod(mod: Union[PrimFunc, IRModule]) -> IRModule:
"""Normalize the input to an IRModule"""
if isinstance(mod, PrimFunc):
mod = mod.with_attr("global_symbol", "main")
mod = mod.with_attr("tir.noalias", True)
mod = IRModule({"main": mod})
if not isinstance(mod, IRModule):
raise TypeError(f"Expected `mod` to be PrimFunc or IRModule, but gets: {mod}")
func_names = mod.get_global_vars()
(func_name,) = func_names
if len(func_names) == 1 and func_name.name_hint != "main":
mod = IRModule({"main": mod[func_name]})
return mod
@register_object("meta_schedule.TuneContext")
class TuneContext(Object):
"""The tune context class is designed to contain all resources for a tuning task.
Parameters
----------
mod : Optional[IRModule] = None
The workload to be optimized.
target : Optional[Target] = None
The target to be optimized for.
space_generator : Union[None, ScheduleFnType, SpaceGenerator] = None
The design space generator.
search_strategy : Union[None, SearchStrategy] = None
The search strategy.
if None, the strategy is left blank.
task_name : Optional[str] = None
The name of the tuning task.
logger : logging.Logger
The logger for the tuning task.
rand_state : int = -1
The random state.
Need to be in integer in [1, 2^31-1], -1 means using random number.
num_threads : int = None
The number of threads to be used, None means using the logical cpu count.
"""
mod: Optional[IRModule]
target: Optional[Target]
space_generator: Optional["SpaceGenerator"]
search_strategy: Optional["SearchStrategy"]
task_name: str
logger: Optional[Logger]
rand_state: int
num_threads: int
def __init__(
self,
mod: Optional[IRModule] = None,
*,
target: Union[Target, str, None] = None,
space_generator: Union["SpaceGenerator.SpaceGeneratorType", None] = None,
search_strategy: Union["SearchStrategy.SearchStrategyType", None] = None,
task_name: str = "main",
rand_state: int = -1,
num_threads: Union[int, Literal["physical", "logical"]] = "physical",
logger: Optional[Logger] = None,
):
# pylint: disable=import-outside-toplevel
import tvm.tir.tensor_intrin # pylint: disable=unused-import
from .search_strategy import SearchStrategy
from .space_generator import SpaceGenerator
# pylint: enable=import-outside-toplevel
if isinstance(mod, PrimFunc):
mod = _normalize_mod(mod)
if target is not None:
if not isinstance(target, Target):
target = Target(target)
if space_generator is not None:
if not isinstance(space_generator, SpaceGenerator):
space_generator = SpaceGenerator.create(space_generator)
if search_strategy is not None:
if not isinstance(search_strategy, SearchStrategy):
search_strategy = SearchStrategy.create(search_strategy)
if logger is None:
logger = get_logger(__name__)
if not isinstance(num_threads, int):
if num_threads == "physical":
num_threads = cpu_count(logical=False)
elif num_threads == "logical":
num_threads = cpu_count(logical=True)
else:
raise ValueError(
f"Invalid num_threads: {num_threads}, "
"should be either an integer, 'physical', or 'logical'"
)
self.__init_handle_by_constructor__(
_ffi_api.TuneContext, # type: ignore # pylint: disable=no-member
mod,
target,
space_generator,
search_strategy,
task_name,
num_threads,
rand_state,
get_logging_func(logger),
)
_ffi_api.TuneContextInitialize(self) # type: ignore # pylint: disable=no-member
def generate_design_space(self) -> List[Schedule]:
"""Generate design spaces given a module.
Delegated to self.space_generator.generate_design_space with self.mod
Returns
-------
design_spaces : List[Schedule]
The generated design spaces, i.e., schedules.
"""
if self.mod is None:
raise ValueError("`mod` is not provided. Please construct TuneContext with `mod`")
if self.space_generator is None:
raise ValueError(
"space_generator is not provided."
"Please construct TuneContext with space_generator"
)
return self.space_generator.generate_design_space(self.mod)
def pre_tuning(
self,
max_trials: int,
num_trials_per_iter: int = 64,
design_spaces: Optional[List[Schedule]] = None,
database: Optional["Database"] = None,
cost_model: Optional["CostModel"] = None,
) -> None:
"""A method to be called for SearchStrategy to do necessary preparation before tuning.
Delegated to self.search_strategy.pre_tuning.
Parameters
----------
max_trials : int
The maximum number of trials to be executed.
num_trials_per_iter : int = 64
The number of trials to be executed per iteration.
design_spaces : Optional[List[Schedule]]
The design spaces used during tuning process.
If None, use the outcome of `self.generate_design_space()`.
database : Optional[Database] = None
The database used during tuning process.
If None, and the search strategy is `EvolutionarySearch`,
then use `tvm.meta_schedule.database.MemoryDatabase`.
cost_model : Optional[CostModel] = None
The cost model used during tuning process.
If None, and the search strategy is `EvolutionarySearch`,
then use `tvm.meta_schedule.cost_model.RandomModel`.
"""
# pylint: disable=import-outside-toplevel
from .cost_model import RandomModel
from .database import MemoryDatabase
from .search_strategy import EvolutionarySearch
# pylint: enable=import-outside-toplevel
if self.search_strategy is None:
raise ValueError(
"search_strategy is not provided."
"Please construct TuneContext with search_strategy"
)
if design_spaces is None:
design_spaces = self.generate_design_space()
if database is None:
if isinstance(self.search_strategy, EvolutionarySearch):
database = MemoryDatabase() # type: ignore
if cost_model is None:
if isinstance(self.search_strategy, EvolutionarySearch):
cost_model = RandomModel() # type: ignore
return self.search_strategy.pre_tuning(
max_trials,
num_trials_per_iter,
design_spaces,
database,
cost_model,
)
def post_tuning(self) -> None:
"""A method to be called for SearchStrategy to do necessary cleanup after tuning.
Delegated to self.search_strategy.post_tuning.
"""
if self.search_strategy is None:
raise ValueError(
"search_strategy is not provided."
"Please construct TuneContext with search_strategy"
)
return self.search_strategy.post_tuning()
def generate_measure_candidates(self) -> Optional[List["MeasureCandidate"]]:
"""Generate a batch of measure candidates from design spaces for measurement.
Delegated to self.search_strategy.generate_measure_candidates.
Returns
-------
measure_candidates : Optional[List[IRModule]]
The measure candidates generated, None if search is finished.
"""
if self.search_strategy is None:
raise ValueError(
"search_strategy is not provided."
"Please construct TuneContext with search_strategy"
)
return self.search_strategy.generate_measure_candidates()
def notify_runner_results(
self,
measure_candidates: List["MeasureCandidate"],
results: List["RunnerResult"],
) -> None:
"""Update the state in SearchStrategy with profiling results.
Delegated to self.search_strategy.notify_runner_results.
Parameters
----------
measure_candidates : List[MeasureCandidate]
The measure candidates for update.
results : List[RunnerResult]
The profiling results from the runner.
"""
if self.search_strategy is None:
raise ValueError(
"search_strategy is not provided."
"Please construct TuneContext with search_strategy"
)
return self.search_strategy.notify_runner_results(measure_candidates, results)
def clone(self) -> "TuneContext":
"""Clone the TuneContext.
Returns
-------
cloned_context : TuneContext
The cloned TuneContext.
"""
return _ffi_api.TuneContextClone(self) # type: ignore # pylint: disable=no-member
| https://github.com/zk-ml/tachikoma |
python/tvm/meta_schedule/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities for meta schedule"""
import ctypes
import os
import shutil
from typing import Any, Callable, List, Optional, Union
import numpy as np # type: ignore
import psutil # type: ignore
from tvm._ffi import get_global_func, register_func
from tvm.error import TVMError
from tvm.ir import Array, IRModule, Map
from tvm.rpc import RPCSession
from tvm.runtime import PackedFunc, String
from tvm.tir import FloatImm, IntImm
def derived_object(cls: type) -> type:
"""A decorator to register derived subclasses for TVM objects.
Parameters
----------
cls : type
The derived class to be registered.
Returns
-------
cls : type
The decorated TVM object.
Example
-------
.. code-block:: python
@register_object("meta_schedule.PyRunner")
class _PyRunner(meta_schedule.Runner):
def __init__(self, f_run: Callable = None):
self.__init_handle_by_constructor__(_ffi_api.RunnerPyRunner, f_run)
class PyRunner:
_tvm_metadata = {
"cls": _PyRunner,
"methods": ["run"]
}
def run(self, runner_inputs):
raise NotImplementedError
@derived_object
class LocalRunner(PyRunner):
def run(self, runner_inputs):
...
"""
import functools # pylint: disable=import-outside-toplevel
import weakref # pylint: disable=import-outside-toplevel
def _extract(inst: type, name: str):
"""Extract function from intrinsic class."""
def method(*args, **kwargs):
return getattr(inst, name)(*args, **kwargs)
if getattr(base, name) is getattr(cls, name) and name != "__str__":
# for task scheduler return None means calling default function
# otherwise it will trigger a TVMError of method not implemented
# on the c++ side when you call the method, __str__ not required
return None
return method
assert isinstance(cls.__base__, type)
assert hasattr(
cls, "_tvm_metadata"
), "Please use the user-facing method overriding class, i.e., PyRunner."
base = cls.__base__
metadata = getattr(base, "_tvm_metadata")
fields = metadata.get("fields", [])
methods = metadata.get("methods", [])
class TVMDerivedObject(metadata["cls"]): # type: ignore
"""The derived object to avoid cyclic dependency."""
def __init__(self, *args, **kwargs):
"""Constructor."""
self.handle = None
self._inst = cls(*args, **kwargs)
super().__init__(
# the constructor's parameters, builder, runner, etc.
*[getattr(self._inst, name) for name in fields],
# the function methods, init_with_tune_context, build, run, etc.
*[_extract(self._inst, name) for name in methods],
)
# for task scheduler hybrid funcs in c++ & python side
# using weakref to avoid cyclic dependency
self._inst._outer = weakref.ref(self)
def __getattr__(self, name: str):
"""Bridge the attribute function."""
try:
return self._inst.__getattribute__(name)
except AttributeError:
return super(TVMDerivedObject, self).__getattr__(name)
def __setattr__(self, name, value):
if name not in ["_inst", "key", "handle"]:
self._inst.__setattr__(name, value)
else:
super(TVMDerivedObject, self).__setattr__(name, value)
functools.update_wrapper(TVMDerivedObject.__init__, cls.__init__) # type: ignore
TVMDerivedObject.__name__ = cls.__name__
TVMDerivedObject.__doc__ = cls.__doc__
TVMDerivedObject.__module__ = cls.__module__
return TVMDerivedObject
@register_func("meta_schedule.cpu_count")
def _cpu_count_impl(logical: bool = True) -> int:
"""Return the number of logical or physical CPUs in the system
Parameters
----------
logical : bool = True
If True, return the number of logical CPUs, otherwise return the number of physical CPUs
Returns
-------
cpu_count : int
The number of logical or physical CPUs in the system
Note
----
The meta schedule search infra intentionally does not adopt the following convention in TVM:
- C++ API `tvm::runtime::threading::MaxConcurrency()`
- Environment variable `TVM_NUM_THREADS` or
- Environment variable `OMP_NUM_THREADS`
This is because these variables are dedicated to controlling
the runtime behavior of generated kernels, instead of the host-side search.
Setting these variables may interfere the host-side search with profiling of generated kernels
when measuring locally.
"""
return psutil.cpu_count(logical=logical) or 1
def cpu_count(logical: bool = True) -> int:
"""Return the number of logical or physical CPUs in the system
Parameters
----------
logical : bool = True
If True, return the number of logical CPUs, otherwise return the number of physical CPUs
Returns
-------
cpu_count : int
The number of logical or physical CPUs in the system
Note
----
The meta schedule search infra intentionally does not adopt the following convention in TVM:
- C++ API `tvm::runtime::threading::MaxConcurrency()`
- Environment variable `TVM_NUM_THREADS` or
- Environment variable `OMP_NUM_THREADS`
This is because these variables are dedicated to controlling
the runtime behavior of generated kernels, instead of the host-side search.
Setting these variables may interfere the host-side search with profiling of generated kernels
when measuring locally.
"""
return _cpu_count_impl(logical)
@register_func("meta_schedule.using_ipython")
def _using_ipython() -> bool:
"""Return whether the current process is running in an IPython shell.
Returns
-------
result : bool
Whether the current process is running in an IPython shell.
"""
try:
return get_ipython().__class__.__name__ == "ZMQInteractiveShell" # type: ignore
except NameError:
return False
@register_func("meta_schedule.print_interactive_table")
def print_interactive_table(data: str) -> None:
"""Print the dataframe interactive table in notebook.
Parameters
----------
data : str
The serialized performance table from MetaSchedule table printer.
"""
import pandas as pd # type: ignore # pylint: disable=import-outside-toplevel
from IPython.display import display # type: ignore # pylint: disable=import-outside-toplevel
pd.set_option("display.max_rows", None)
pd.set_option("display.max_colwidth", None)
parsed = [
x.split("|")[1:] for x in list(filter(lambda x: set(x) != {"-"}, data.strip().split("\n")))
]
display(
pd.DataFrame(
parsed[1:],
columns=parsed[0],
)
)
def get_global_func_with_default_on_worker(
name: Union[None, str, Callable],
default: Callable,
) -> Callable:
"""Get the registered global function on the worker process.
Parameters
----------
name : Union[None, str, Callable]
If given a string, retrieve the function in TVM's global registry;
If given a python function, return it as it is;
Otherwise, return `default`.
default : Callable
The function to be returned if `name` is None.
Returns
-------
result : Callable
The retrieved global function or `default` if `name` is None
"""
if name is None:
return default
if callable(name):
return name
try:
return get_global_func(name)
except TVMError as error:
raise ValueError(
"Function '{name}' is not registered on the worker process. "
"The build function and export function should be registered in the worker process. "
"Note that the worker process is only aware of functions registered in TVM package, "
"if there are extra functions to be registered, "
"please send the registration logic via initializer."
) from error
def get_global_func_on_rpc_session(
session: RPCSession,
name: str,
extra_error_msg: Optional[str] = None,
) -> PackedFunc:
"""Get a PackedFunc from the global registry from an RPCSession.
Parameters
----------
session : RPCSession
The RPCSession to be retrieved from
name : str
The name of the PackedFunc
extra_error_msg : Optional[str]
Extra information to provide in the error message
Returns
-------
result : PackedFunc
The result
"""
try:
result = session.get_function(name)
except AttributeError as error:
error_msg = f'Unable to find function "{name}" on the remote RPC server.'
if extra_error_msg:
error_msg = f"{error_msg} {extra_error_msg}"
raise AttributeError(error_msg) from error
return result
@register_func("meta_schedule.remove_build_dir")
def remove_build_dir(artifact_path: str) -> None:
"""Clean up the build directory"""
shutil.rmtree(os.path.dirname(artifact_path))
def _json_de_tvm(obj: Any) -> Any:
"""Unpack a TVM nested container to a JSON object in python.
Parameters
----------
obj : Any
The TVM nested container to be unpacked.
Returns
-------
result : Any
The unpacked json object.
"""
if obj is None:
return None
if isinstance(obj, (int, float)):
return obj
if isinstance(obj, (IntImm, FloatImm)):
return obj.value
if isinstance(obj, (str, String)):
return str(obj)
if isinstance(obj, Array):
return [_json_de_tvm(i) for i in obj]
if isinstance(obj, Map):
return {_json_de_tvm(k): _json_de_tvm(v) for k, v in obj.items()}
raise TypeError("Not supported type: " + str(type(obj)))
def shash2hex(mod: IRModule) -> str:
"""Get the structural hash of a module.
Parameters
----------
mod : IRModule
The module to be hashed.
Returns
-------
result : str
The structural hash of the module.
"""
func = get_global_func("meta_schedule._SHash2Hex")
return str(func(mod))
def _get_default_str(obj: Any) -> str:
return (
# pylint: disable=protected-access
f"meta_schedule.{obj.__class__.__name__}"
+ f"({_to_hex_address(obj._outer().handle)})" # type: ignore
# pylint: enable=protected-access
)
def _to_hex_address(handle: ctypes.c_void_p) -> str:
"""Get the hexadecimal address of a handle.
Parameters
----------
handle : ctypes.c_void_p
The handle to be converted.
Returns
-------
result : str
The hexadecimal address of the handle.
"""
return hex(ctypes.cast(handle, ctypes.c_void_p).value)
def fork_seed(seed: Optional[int], n: int) -> List[int]:
# fmt: off
return np.random.RandomState(seed=seed).randint(1, 2 ** 30, size=n).tolist()
# fmt: on
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MicroTVM module for bare-metal backends"""
from .build import autotvm_build_func
from .build import AutoTvmModuleLoader
from .build import get_standalone_crt_dir
from .build import get_microtvm_template_projects
from .model_library_format import (
export_model_library_format,
UnsupportedInModelLibraryFormatError,
)
from .project import generate_project, GeneratedProject, TemplateProject
from .session import (
create_local_graph_executor,
create_local_debug_executor,
Session,
SessionTerminatedError,
)
from .transport import TransportLogger
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.