text
stringlengths
0
2.2M
void handle(Allocate* allocate) final {
switch (allocate->memoryType()) {
case MemoryType::Global:
summary_.global_allocations.push_back(allocate);
break;
case MemoryType::Shared:
if (ExpressionEvaluator::isConst(allocate->size())) {
summary_.static_smem_allocations.push_back(allocate);
} else {
summary_.dynamic_smem_allocations.push_back(allocate);
}
break;
case MemoryType::Local:
if (!ExpressionEvaluator::isConst(allocate->size())) {
summary_.has_dynamic_local_memory_allocations = true;
summary_.dynamic_lmem_allocations.emplace_back(allocate);
}
break;
}
}
void handle(UnaryOp* unary_op) final {
if (unary_op->getUnaryOpType() == UnaryOpType::RandLike) {
// This kernel is using random numbers
summary_.is_stochastic = true;
}
}
void handle(TensorIndex* tensor_index) final {
const auto tv = tensor_index->view();
const auto domain = tv->domain();
// Do we have any reductions?
summary_.has_block_reductions =
summary_.has_block_reductions || domain->hasBlockReduction();
// Update the largest smem data type
if (domain->hasBlockReduction() || domain->hasGridReduction() ||
tv->getMemoryType() == MemoryType::Shared) {
const auto data_type = tv->dtype();
const size_t type_size = dataTypeSize(data_type);
if (type_size > max_smem_type_size_) {
max_smem_type_size_ = type_size;
summary_.largest_smem_data_type = data_type;
}
}
}
void handle(WelfordOp* welford_op) final {
summary_.has_welford = true;
TORCH_INTERNAL_ASSERT(welford_op->outAvg()->isA<TensorIndex>());
auto out_dom = welford_op->outAvg()->as<TensorIndex>()->view()->domain();
summary_.has_block_welford =
summary_.has_block_welford || out_dom->hasBlockReduction();
}
void handle(GridWelford* grid_welford) final {
summary_.has_welford = true;
summary_.has_grid_welford = true;
const auto dom =
grid_welford->welford_op()->out()->as<TensorIndex>()->view()->domain();
updateGridReductionInLoop(dom);
}
void handle(GridReduction* grid_reduction) final {
summary_.has_grid_reductions = true;
const auto dom = grid_reduction->reduction_op()
->out()
->as<TensorIndex>()
->view()
->domain();
updateGridReductionInLoop(dom);
}
void handle(GridBroadcast* grid_broadcast) final {
summary_.has_cooperative_grid_reduction = true;
handle(grid_broadcast->broadcast_op());
}
void handle(BroadcastOp* bop) final {
const ParallelTypeBitmap parallel_types =
GpuLower::current()->threadPredMap().getParallelBroadcastDomains(
bop->out()->as<TensorIndex>()->view());
summary_.broadcast_parallel_types.emplace(bop, parallel_types);
// Do we have block broadcasts?
summary_.has_block_broadcasts =
summary_.has_block_broadcasts || parallel_types.hasTID();
// Do we have grid broadcasts?
summary_.has_grid_broadcasts =
summary_.has_grid_broadcasts || parallel_types.hasBID();
}
private:
size_t max_smem_type_size_ = 0;
KernelSummary summary_;
private:
void updateGridReductionInLoop(TensorDomain* dom) {
summary_.has_grid_reductions = true;